gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
#qpy:console
import site
import os
from peewee import *
import time
import datetime
import io
import sys
# 20160103
if os.path.exists('/storage/extSdCard'):
db = SqliteDatabase('/storage/extSdCard/mydb/lessonplan2010.db', **{})
#backupdir = '/storage/extSdCard/dbbackup/'
#db = '/storage/extSdCard/mydb/english-notes-exercises.sqlite'
else:
db = SqliteDatabase('lessonplan2010.db', **{})
#db = SqliteDatabase('lessonplan2010.db', **{})
#db = SqliteDatabase('/storage/extSdCard/englishdb/lessonplan2010.db', **{})
class BaseModel(Model):
class Meta:
database = db
class Lessonplan2016(BaseModel):
activity1 = CharField(null=True)
activity2 = CharField(null=True)
assimilation = CharField(null=True)
content = CharField(null=True)
date = IntegerField(null=True)
duration = CharField(null=True)
exercise = TextField(null=True)
handout = TextField(null=True)
impact = CharField(null=True)
lo1 = CharField(null=True)
lo2 = CharField(null=True)
lo3 = CharField(null=True)
note = CharField(null=True)
theme = CharField(null=True)
timeend = CharField(null=True)
timestart = CharField(null=True)
tingkatan = CharField(null=True)
topic = CharField(null=True)
week = CharField(null=True)
class Meta:
db_table = 'lessonplan2016'
class Lessonplanbank(BaseModel):
activity1 = CharField(null=True)
activity2 = CharField(null=True)
assimilation = CharField(null=True)
bank = PrimaryKeyField(db_column='bank_id', null=True)
content = CharField(null=True)
duration = CharField(null=True)
exercise = TextField(null=True)
handout = TextField(null=True)
impact = CharField(null=True)
level = CharField(null=True)
lo1 = CharField(null=True)
lo2 = CharField(null=True)
lo3 = CharField(null=True)
note = CharField(null=True)
theme = CharField(null=True)
tingkatan = CharField(null=True)
topic = CharField(null=True)
week = IntegerField(null=True)
class Meta:
db_table = 'lessonplanbank'
db.connect()
if len(sys.argv) < 4:
print "Begini boh: %s minggu (WW) bulan (MM) hb (DD)" % sys.argv[0]
sys.exit(1)
week = sys.argv[1]
month = sys.argv[2]
hb = int(sys.argv[3])
tahunini = datetime.datetime.now().year
#tahunini = "2015"
tdatesun = datetime.datetime(int(tahunini), int(month), int(hb)) + datetime.timedelta(days=0)
datesun = tdatesun.strftime('%Y%m%d')
#datesun = (str(tahunini)+str(month)+str(hb))
#-----------------------------------------------------------------------
tdatemon = datetime.datetime(int(tahunini), int(month), int(hb)) + datetime.timedelta(days=1)
tdatetue = datetime.datetime(int(tahunini), int(month), int(hb)) + datetime.timedelta(days=2)
tdatewed = datetime.datetime(int(tahunini), int(month), int(hb)) + datetime.timedelta(days=3)
tdatethu = datetime.datetime(int(tahunini), int(month), int(hb)) + datetime.timedelta(days=4)
datemon = tdatemon.strftime('%Y%m%d')
datetue = tdatetue.strftime('%Y%m%d')
datewed = tdatewed.strftime('%Y%m%d')
datethu = tdatethu.strftime('%Y%m%d')
#-----------------------------------------------------------------------
# Sunday
sun01 = Lessonplan2016.create(tingkatan="6J4-UPM",\
date=int(datesun),\
timestart="1050",\
timeend="1250",\
duration="120",\
theme="-",\
topic="-",\
lo1="-",\
lo2="-",\
lo3="-",\
content="-",\
activity1="-",\
activity2="-",\
activity3="-",\
assimilation="-",\
impact="-",\
note="-",\
week=week,\
handout="-",\
exercise="-"
)
#-----------------------------------------------------------------------
# Monday
mon01 = Lessonplan2016.create(tingkatan="6S4-UPM",\
date=datemon,\
timestart="0740",\
timeend="0940",\
duration="120",\
theme="-",\
topic="-",\
lo1="-",\
lo2="-",\
lo3="-",\
content="-",\
activity1="-",\
activity2="-",\
activity3="-",\
assimilation="-",\
impact="-",\
note="-",\
week=week,\
handout="-",\
exercise="-"
)
#-----------------------------------------------------------------------
# Tuesday
tue01 = Lessonplan2016.create(tingkatan="6S4-UPM",\
date=datetue,\
timestart="1210",\
timeend="1410",\
duration="120",\
theme="-",\
topic="-",\
lo1="-",\
lo2="-",\
lo3="-",\
content="-",\
activity1="-",\
activity2="-",\
activity3="-",\
assimilation="-",\
impact="-",\
note="-",\
week=week,\
handout="-",\
exercise="-"
)
#-----------------------------------------------------------------------
# Wednesday
wed01 = Lessonplan2016.create(tingkatan="6J4-UPM",\
date=datewed,\
timestart="1130",\
timeend="1330",\
duration="120",\
theme="-",\
topic="-",\
lo1="-",\
lo2="-",\
lo3="-",\
content="-",\
activity1="-",\
activity2="-",\
activity3="-",\
assimilation="-",\
impact="-",\
note="-",\
week=week,\
handout="-",\
exercise="-"
)
#-----------------------------------------------------------------------
# Thursday
thu01 = Lessonplan2016.create(tingkatan="6J4-UPM",\
date=datethu,\
timestart="1050",\
timeend="1210",\
duration="80",\
theme="-",\
topic="-",\
lo1="-",\
lo2="-",\
lo3="-",\
content="-",\
activity1="-",\
activity2="-",\
activity3="-",\
assimilation="-",\
impact="-",\
note="-",\
week=week,\
handout="-",\
exercise="-"
)
thu02 = Lessonplan2016.create(tingkatan="6S4-UPM",\
date=datethu,\
timestart="1250",\
timeend="1440",\
duration="80",\
theme="-",\
topic="-",\
lo1="-",\
lo2="-",\
lo3="-",\
content="-",\
activity1="-",\
activity2="-",\
activity3="-",\
assimilation="-",\
impact="-",\
note="-",\
week=week,\
handout="-",\
exercise="-"
)
thisweek = Lessonplan2016.select().where(Lessonplan2016.week == week)
for i in thisweek:
print str(i.date)+" = "+i.timestart
|
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
from six.moves import range
import frappe
from six.moves import html_parser as HTMLParser
import smtplib, quopri, json
from frappe import msgprint, throw, _
from frappe.email.smtp import SMTPServer, get_outgoing_email_account
from frappe.email.email_body import get_email, get_formatted_html, add_attachment
from frappe.utils.verified_command import get_signed_params, verify_request
from html2text import html2text
from frappe.utils import get_url, nowdate, encode, now_datetime, add_days, split_emails, cstr, cint
from frappe.utils.file_manager import get_file
from rq.timeouts import JobTimeoutException
from frappe.utils.scheduler import log
from six import text_type, string_types
class EmailLimitCrossedError(frappe.ValidationError): pass
def send(recipients=None, sender=None, subject=None, message=None, text_content=None, reference_doctype=None,
reference_name=None, unsubscribe_method=None, unsubscribe_params=None, unsubscribe_message=None,
attachments=None, reply_to=None, cc=[], bcc=[], message_id=None, in_reply_to=None, send_after=None,
expose_recipients=None, send_priority=1, communication=None, now=False, read_receipt=None,
queue_separately=False, is_notification=False, add_unsubscribe_link=1, inline_images=None,
header=None):
"""Add email to sending queue (Email Queue)
:param recipients: List of recipients.
:param sender: Email sender.
:param subject: Email subject.
:param message: Email message.
:param text_content: Text version of email message.
:param reference_doctype: Reference DocType of caller document.
:param reference_name: Reference name of caller document.
:param send_priority: Priority for Email Queue, default 1.
:param unsubscribe_method: URL method for unsubscribe. Default is `/api/method/frappe.email.queue.unsubscribe`.
:param unsubscribe_params: additional params for unsubscribed links. default are name, doctype, email
:param attachments: Attachments to be sent.
:param reply_to: Reply to be captured here (default inbox)
:param in_reply_to: Used to send the Message-Id of a received email back as In-Reply-To.
:param send_after: Send this email after the given datetime. If value is in integer, then `send_after` will be the automatically set to no of days from current date.
:param communication: Communication link to be set in Email Queue record
:param now: Send immediately (don't send in the background)
:param queue_separately: Queue each email separately
:param is_notification: Marks email as notification so will not trigger notifications from system
:param add_unsubscribe_link: Send unsubscribe link in the footer of the Email, default 1.
:param inline_images: List of inline images as {"filename", "filecontent"}. All src properties will be replaced with random Content-Id
:param header: Append header in email (boolean)
"""
if not unsubscribe_method:
unsubscribe_method = "/api/method/frappe.email.queue.unsubscribe"
if not recipients and not cc:
return
if isinstance(recipients, string_types):
recipients = split_emails(recipients)
if isinstance(cc, string_types):
cc = split_emails(cc)
if isinstance(bcc, string_types):
bcc = split_emails(bcc)
if isinstance(send_after, int):
send_after = add_days(nowdate(), send_after)
email_account = get_outgoing_email_account(True, append_to=reference_doctype, sender=sender)
if not sender or sender == "Administrator":
sender = email_account.default_sender
check_email_limit(recipients)
if not text_content:
try:
text_content = html2text(message)
except HTMLParser.HTMLParseError:
text_content = "See html attachment"
if reference_doctype and reference_name:
unsubscribed = [d.email for d in frappe.db.get_all("Email Unsubscribe", "email",
{"reference_doctype": reference_doctype, "reference_name": reference_name})]
unsubscribed += [d.email for d in frappe.db.get_all("Email Unsubscribe", "email",
{"global_unsubscribe": 1})]
else:
unsubscribed = []
recipients = [r for r in list(set(recipients)) if r and r not in unsubscribed]
email_text_context = text_content
should_append_unsubscribe = (add_unsubscribe_link
and reference_doctype
and (unsubscribe_message or reference_doctype=="Newsletter")
and add_unsubscribe_link==1)
unsubscribe_link = None
if should_append_unsubscribe:
unsubscribe_link = get_unsubscribe_message(unsubscribe_message, expose_recipients)
email_text_context += unsubscribe_link.text
email_content = get_formatted_html(subject, message,
email_account=email_account, header=header,
unsubscribe_link=unsubscribe_link)
# add to queue
add(recipients, sender, subject,
formatted=email_content,
text_content=email_text_context,
reference_doctype=reference_doctype,
reference_name=reference_name,
attachments=attachments,
reply_to=reply_to,
cc=cc,
bcc=bcc,
message_id=message_id,
in_reply_to=in_reply_to,
send_after=send_after,
send_priority=send_priority,
email_account=email_account,
communication=communication,
add_unsubscribe_link=add_unsubscribe_link,
unsubscribe_method=unsubscribe_method,
unsubscribe_params=unsubscribe_params,
expose_recipients=expose_recipients,
read_receipt=read_receipt,
queue_separately=queue_separately,
is_notification = is_notification,
inline_images = inline_images,
header=header,
now=now)
def add(recipients, sender, subject, **kwargs):
"""Add to Email Queue"""
if kwargs.get('queue_separately') or len(recipients) > 20:
email_queue = None
for r in recipients:
if not email_queue:
email_queue = get_email_queue([r], sender, subject, **kwargs)
if kwargs.get('now'):
email_queue(email_queue.name, now=True)
else:
duplicate = email_queue.get_duplicate([r])
duplicate.insert(ignore_permissions=True)
if kwargs.get('now'):
send_one(duplicate.name, now=True)
frappe.db.commit()
else:
email_queue = get_email_queue(recipients, sender, subject, **kwargs)
if kwargs.get('now'):
send_one(email_queue.name, now=True)
def get_email_queue(recipients, sender, subject, **kwargs):
'''Make Email Queue object'''
e = frappe.new_doc('Email Queue')
e.priority = kwargs.get('send_priority')
attachments = kwargs.get('attachments')
if attachments:
# store attachments with fid or print format details, to be attached on-demand later
_attachments = []
for att in attachments:
if att.get('fid'):
_attachments.append(att)
elif att.get("print_format_attachment") == 1:
att['lang'] = frappe.local.lang
_attachments.append(att)
e.attachments = json.dumps(_attachments)
try:
mail = get_email(recipients,
sender=sender,
subject=subject,
formatted=kwargs.get('formatted'),
text_content=kwargs.get('text_content'),
attachments=kwargs.get('attachments'),
reply_to=kwargs.get('reply_to'),
cc=kwargs.get('cc'),
bcc=kwargs.get('bcc'),
email_account=kwargs.get('email_account'),
expose_recipients=kwargs.get('expose_recipients'),
inline_images=kwargs.get('inline_images'),
header=kwargs.get('header'))
mail.set_message_id(kwargs.get('message_id'),kwargs.get('is_notification'))
if kwargs.get('read_receipt'):
mail.msg_root["Disposition-Notification-To"] = sender
if kwargs.get('in_reply_to'):
mail.set_in_reply_to(kwargs.get('in_reply_to'))
e.message_id = mail.msg_root["Message-Id"].strip(" <>")
e.message = cstr(mail.as_string())
e.sender = mail.sender
except frappe.InvalidEmailAddressError:
# bad Email Address - don't add to queue
frappe.log_error('Invalid Email ID Sender: {0}, Recipients: {1}'.format(mail.sender,
', '.join(mail.recipients)), 'Email Not Sent')
e.set_recipients(recipients + kwargs.get('cc', []) + kwargs.get('bcc', []))
e.reference_doctype = kwargs.get('reference_doctype')
e.reference_name = kwargs.get('reference_name')
e.add_unsubscribe_link = kwargs.get("add_unsubscribe_link")
e.unsubscribe_method = kwargs.get('unsubscribe_method')
e.unsubscribe_params = kwargs.get('unsubscribe_params')
e.expose_recipients = kwargs.get('expose_recipients')
e.communication = kwargs.get('communication')
e.send_after = kwargs.get('send_after')
e.show_as_cc = ",".join(kwargs.get('cc', []))
e.show_as_bcc = ",".join(kwargs.get('bcc', []))
e.insert(ignore_permissions=True)
return e
def check_email_limit(recipients):
# if using settings from site_config.json, check email limit
# No limit for own email settings
smtp_server = SMTPServer()
if (smtp_server.email_account
and getattr(smtp_server.email_account, "from_site_config", False)
or frappe.flags.in_test):
monthly_email_limit = frappe.conf.get('limits', {}).get('emails')
daily_email_limit = cint(frappe.conf.get('limits', {}).get('daily_emails'))
if frappe.flags.in_test:
monthly_email_limit = 500
daily_email_limit = 50
if daily_email_limit:
# get count of sent mails in last 24 hours
today = get_emails_sent_today()
if (today + len(recipients)) > daily_email_limit:
throw(_("Cannot send this email. You have crossed the sending limit of {0} emails for this day.").format(daily_email_limit),
EmailLimitCrossedError)
if not monthly_email_limit:
return
# get count of mails sent this month
this_month = get_emails_sent_this_month()
if (this_month + len(recipients)) > monthly_email_limit:
throw(_("Cannot send this email. You have crossed the sending limit of {0} emails for this month.").format(monthly_email_limit),
EmailLimitCrossedError)
def get_emails_sent_this_month():
return frappe.db.sql("""select count(name) from `tabEmail Queue` where
status='Sent' and MONTH(creation)=MONTH(CURDATE())""")[0][0]
def get_emails_sent_today():
return frappe.db.sql("""select count(name) from `tabEmail Queue` where
status='Sent' and creation>DATE_SUB(NOW(), INTERVAL 24 HOUR)""")[0][0]
def get_unsubscribe_message(unsubscribe_message, expose_recipients):
if unsubscribe_message:
unsubscribe_html = '''<a href="<!--unsubscribe url-->"
target="_blank">{0}</a>'''.format(unsubscribe_message)
else:
unsubscribe_link = '''<a href="<!--unsubscribe url-->"
target="_blank">{0}</a>'''.format(_('Unsubscribe'))
unsubscribe_html = _("{0} to stop receiving emails of this type").format(unsubscribe_link)
html = """<div class="email-unsubscribe">
<!--cc message-->
<div>
{0}
</div>
</div>""".format(unsubscribe_html)
if expose_recipients == "footer":
text = "\n<!--cc message-->"
else:
text = ""
text += "\n\n{unsubscribe_message}: <!--unsubscribe url-->\n".format(unsubscribe_message=unsubscribe_message)
return frappe._dict({
"html": html,
"text": text
})
def get_unsubcribed_url(reference_doctype, reference_name, email, unsubscribe_method, unsubscribe_params):
params = {"email": email.encode("utf-8"),
"doctype": reference_doctype.encode("utf-8"),
"name": reference_name.encode("utf-8")}
if unsubscribe_params:
params.update(unsubscribe_params)
query_string = get_signed_params(params)
# for test
frappe.local.flags.signed_query_string = query_string
return get_url(unsubscribe_method + "?" + get_signed_params(params))
@frappe.whitelist(allow_guest=True)
def unsubscribe(doctype, name, email):
# unsubsribe from comments and communications
if not verify_request():
return
try:
frappe.get_doc({
"doctype": "Email Unsubscribe",
"email": email,
"reference_doctype": doctype,
"reference_name": name
}).insert(ignore_permissions=True)
except frappe.DuplicateEntryError:
frappe.db.rollback()
else:
frappe.db.commit()
return_unsubscribed_page(email, doctype, name)
def return_unsubscribed_page(email, doctype, name):
frappe.respond_as_web_page(_("Unsubscribed"),
_("{0} has left the conversation in {1} {2}").format(email, _(doctype), name),
indicator_color='green')
def flush(from_test=False):
"""flush email queue, every time: called from scheduler"""
# additional check
cache = frappe.cache()
check_email_limit([])
auto_commit = not from_test
if frappe.are_emails_muted():
msgprint(_("Emails are muted"))
from_test = True
smtpserver = SMTPServer()
make_cache_queue()
for i in range(cache.llen('cache_email_queue')):
email = cache.lpop('cache_email_queue')
if cint(frappe.defaults.get_defaults().get("hold_queue"))==1:
break
if email:
send_one(email, smtpserver, auto_commit, from_test=from_test)
# NOTE: removing commit here because we pass auto_commit
# finally:
# frappe.db.commit()
def make_cache_queue():
'''cache values in queue before sendign'''
cache = frappe.cache()
emails = frappe.db.sql('''select
name
from
`tabEmail Queue`
where
(status='Not Sent' or status='Partially Sent') and
(send_after is null or send_after < %(now)s)
order
by priority desc, creation asc
limit 500''', { 'now': now_datetime() })
# reset value
cache.delete_value('cache_email_queue')
for e in emails:
cache.rpush('cache_email_queue', e[0])
def send_one(email, smtpserver=None, auto_commit=True, now=False, from_test=False):
'''Send Email Queue with given smtpserver'''
email = frappe.db.sql('''select
name, status, communication, message, sender, reference_doctype,
reference_name, unsubscribe_param, unsubscribe_method, expose_recipients,
show_as_cc, add_unsubscribe_link, attachments
from
`tabEmail Queue`
where
name=%s
for update''', email, as_dict=True)[0]
recipients_list = frappe.db.sql('''select name, recipient, status from
`tabEmail Queue Recipient` where parent=%s''',email.name,as_dict=1)
if frappe.are_emails_muted():
frappe.msgprint(_("Emails are muted"))
return
if cint(frappe.defaults.get_defaults().get("hold_queue"))==1 :
return
if email.status not in ('Not Sent','Partially Sent') :
# rollback to release lock and return
frappe.db.rollback()
return
frappe.db.sql("""update `tabEmail Queue` set status='Sending', modified=%s where name=%s""",
(now_datetime(), email.name), auto_commit=auto_commit)
if email.communication:
frappe.get_doc('Communication', email.communication).set_delivery_status(commit=auto_commit)
try:
if not frappe.flags.in_test:
if not smtpserver: smtpserver = SMTPServer()
smtpserver.setup_email_account(email.reference_doctype, sender=email.sender)
for recipient in recipients_list:
if recipient.status != "Not Sent":
continue
message = prepare_message(email, recipient.recipient, recipients_list)
if not frappe.flags.in_test:
smtpserver.sess.sendmail(email.sender, recipient.recipient, encode(message))
recipient.status = "Sent"
frappe.db.sql("""update `tabEmail Queue Recipient` set status='Sent', modified=%s where name=%s""",
(now_datetime(), recipient.name), auto_commit=auto_commit)
#if all are sent set status
if any("Sent" == s.status for s in recipients_list):
frappe.db.sql("""update `tabEmail Queue` set status='Sent', modified=%s where name=%s""",
(now_datetime(), email.name), auto_commit=auto_commit)
else:
frappe.db.sql("""update `tabEmail Queue` set status='Error', error=%s
where name=%s""", ("No recipients to send to", email.name), auto_commit=auto_commit)
if frappe.flags.in_test:
frappe.flags.sent_mail = message
return
if email.communication:
frappe.get_doc('Communication', email.communication).set_delivery_status(commit=auto_commit)
except (smtplib.SMTPServerDisconnected,
smtplib.SMTPConnectError,
smtplib.SMTPHeloError,
smtplib.SMTPAuthenticationError,
JobTimeoutException):
# bad connection/timeout, retry later
if any("Sent" == s.status for s in recipients_list):
frappe.db.sql("""update `tabEmail Queue` set status='Partially Sent', modified=%s where name=%s""",
(now_datetime(), email.name), auto_commit=auto_commit)
else:
frappe.db.sql("""update `tabEmail Queue` set status='Not Sent', modified=%s where name=%s""",
(now_datetime(), email.name), auto_commit=auto_commit)
if email.communication:
frappe.get_doc('Communication', email.communication).set_delivery_status(commit=auto_commit)
# no need to attempt further
return
except Exception as e:
frappe.db.rollback()
if any("Sent" == s.status for s in recipients_list):
frappe.db.sql("""update `tabEmail Queue` set status='Partially Errored', error=%s where name=%s""",
(text_type(e), email.name), auto_commit=auto_commit)
else:
frappe.db.sql("""update `tabEmail Queue` set status='Error', error=%s
where name=%s""", (text_type(e), email.name), auto_commit=auto_commit)
if email.communication:
frappe.get_doc('Communication', email.communication).set_delivery_status(commit=auto_commit)
if now:
print(frappe.get_traceback())
raise e
else:
# log to Error Log
log('frappe.email.queue.flush', text_type(e))
def prepare_message(email, recipient, recipients_list):
message = email.message
if not message:
return ""
if email.add_unsubscribe_link and email.reference_doctype: # is missing the check for unsubscribe message but will not add as there will be no unsubscribe url
unsubscribe_url = get_unsubcribed_url(email.reference_doctype, email.reference_name, recipient,
email.unsubscribe_method, email.unsubscribe_params)
message = message.replace("<!--unsubscribe url-->", quopri.encodestring(unsubscribe_url.encode()).decode())
if email.expose_recipients == "header":
pass
else:
if email.expose_recipients == "footer":
if isinstance(email.show_as_cc, string_types):
email.show_as_cc = email.show_as_cc.split(",")
email_sent_to = [r.recipient for r in recipients_list]
email_sent_cc = ", ".join([e for e in email_sent_to if e in email.show_as_cc])
email_sent_to = ", ".join([e for e in email_sent_to if e not in email.show_as_cc])
if email_sent_cc:
email_sent_message = _("This email was sent to {0} and copied to {1}").format(email_sent_to,email_sent_cc)
else:
email_sent_message = _("This email was sent to {0}").format(email_sent_to)
message = message.replace("<!--cc message-->", quopri.encodestring(email_sent_message.encode()).decode())
message = message.replace("<!--recipient-->", recipient)
message = (message and message.encode('utf8')) or ''
if not email.attachments:
return message
# On-demand attachments
from email.parser import Parser
msg_obj = Parser().parsestr(message)
attachments = json.loads(email.attachments)
for attachment in attachments:
if attachment.get('fcontent'): continue
fid = attachment.get("fid")
if fid:
fname, fcontent = get_file(fid)
attachment.update({
'fname': fname,
'fcontent': fcontent,
'parent': msg_obj
})
attachment.pop("fid", None)
add_attachment(**attachment)
elif attachment.get("print_format_attachment") == 1:
attachment.pop("print_format_attachment", None)
print_format_file = frappe.attach_print(**attachment)
print_format_file.update({"parent": msg_obj})
add_attachment(**print_format_file)
return msg_obj.as_string()
def clear_outbox():
"""Remove low priority older than 31 days in Outbox and expire mails not sent for 7 days.
Called daily via scheduler.
Note: Used separate query to avoid deadlock
"""
email_queues = frappe.db.sql_list("""select name from `tabEmail Queue`
where priority=0 and datediff(now(), modified) > 31""")
if email_queues:
frappe.db.sql("""delete from `tabEmail Queue` where name in (%s)"""
% ','.join(['%s']*len(email_queues)), tuple(email_queues))
frappe.db.sql("""delete from `tabEmail Queue Recipient` where parent in (%s)"""
% ','.join(['%s']*len(email_queues)), tuple(email_queues))
frappe.db.sql("""
update `tabEmail Queue`
set status='Expired'
where datediff(curdate(), modified) > 7 and status='Not Sent' and (send_after is null or send_after < %(now)s)""", { 'now': now_datetime() })
|
|
"""
Interactivity functions and classes using matplotlib and IPython widgets
**Gravity forward modeling**
* :class:`~fatiando.gravmag.interactive.Moulder`: a matplitlib GUI for 2D
forward modeling using polygons
----
"""
from __future__ import division
import cPickle as pickle
import numpy
from matplotlib import pyplot, widgets, patches
from matplotlib.lines import Line2D
from IPython.core.pylabtools import print_figure
from IPython.display import Image
from .. import utils
from . import talwani
from ..mesher import Polygon
class Moulder(object):
"""
Interactive 2D forward modeling using polygons.
A matplotlib GUI application. Allows drawing and manipulating polygons and
computes their predicted data automatically. Also permits contaminating the
data with gaussian pseudo-random error for producing synthetic data sets.
Uses :mod:`fatiando.gravmag.talwani` for computations.
*Moulder* objects can be persisted to Python pickle files using the
:meth:`~fatiando.gravmag.interactive.Moulder.save` method and later
restored using :meth:`~fatiando.gravmag.interactive.Moulder.load`.
.. warning::
Cannot be used with ``%matplotlib inline`` on IPython notebooks because
the app uses the matplotlib plot window. You can still embed the
generated model and data figure on notebooks using the
:meth:`~fatiando.gravmag.interactive.Moulder.plot` method.
Parameters:
* area : list = (x1, x2, z1, z2)
The limits of the model drawing area, in meters.
* x, z : 1d-arrays
The x- and z-coordinates of the computation points (places where
predicted data will be computed). In meters.
* data : None or 1d-array
Observed data measured at *x* and *z*. Will plot this with black dots
along the predicted data.
* density_range : list = [min, max]
The minimum and maximum values allowed for the density. Determines the
limits of the density slider of the application. In kg.m^-3. Defaults
to [-2000, 2000].
* kwargs : dict
Other keyword arguments used to restore the state of the application.
Used by the :meth:`~fatiando.gravmag.interactive.Moulder.load` method.
Not intended for general use.
Examples:
Make the Moulder object and start the app::
import numpy as np
area = (0, 10e3, 0, 5e3)
# Calculate on 100 points
x = np.linspace(area[0], area[1], 100)
z = np.zeros_like(x)
app = Moulder(area, x, z)
app.run()
# This will pop-up a window with the application (like the screenshot
# below). Start drawing (follow the instruction in the figure title).
# When satisfied, close the window to resume execution.
.. image:: ../_static/images/Moulder-screenshot.png
:alt: Screenshot of the Moulder GUI
After closing the plot window, you can access the model and data from the
*Moulder* object::
app.model # The drawn model as fatiando.mesher.Polygon
app.predicted # 1d-array with the data predicted by the model
# You can save the predicted data to use later
app.save_predicted('data.txt')
# You can also save the application and resume it later
app.save('application.pkl')
# Close this session/IPython notebook/etc.
# To resume drawing later:
app = Moulder.load('application.pkl')
app.run()
"""
# The tolerance range for mouse clicks on vertices. In pixels.
epsilon = 5
# App instructions printed in the figure suptitle
instructions = ' | '.join([
'n: New polygon', 'd: delete', 'click: select/move', 'esc: cancel'])
def __init__(self, area, x, z, data=None, density_range=[-2000, 2000],
**kwargs):
self.area = area
self.x, self.z = numpy.asarray(x), numpy.asarray(z)
self.density_range = density_range
self.data = data
# Used to set the ylims for the data axes.
if data is None:
self.dmin, self.dmax = 0, 0
else:
self.dmin, self.dmax = data.min(), data.max()
self.predicted = kwargs.get('predicted', numpy.zeros_like(x))
self.error = kwargs.get('error', 0)
self.cmap = kwargs.get('cmap', pyplot.cm.RdBu_r)
self.line_args = dict(
linewidth=2, linestyle='-', color='k', marker='o',
markerfacecolor='k', markersize=5, animated=False, alpha=0.6)
self.polygons = []
self.lines = []
self.densities = kwargs.get('densities', [])
vertices = kwargs.get('vertices', [])
for xy, dens in zip(vertices, self.densities):
poly, line = self._make_polygon(xy, dens)
self.polygons.append(poly)
self.lines.append(line)
def save_predicted(self, fname):
"""
Save the predicted data to a text file.
Data will be saved in 3 columns separated by spaces: x z data
Parameters:
* fname : string or file-like object
The name of the output file or an open file-like object.
"""
numpy.savetxt(fname, numpy.transpose([self.x, self.z, self.predicted]))
def save(self, fname):
"""
Save the application state into a pickle file.
Use this to persist the application. You can later reload the entire
object, with the drawn model and data, using the
:meth:`~fatiando.gravmag.interactive.Moulder.load` method.
Parameters:
* fname : string
The name of the file to save the application. The extension doesn't
matter (use ``.pkl`` if in doubt).
"""
with open(fname, 'w') as f:
vertices = [numpy.asarray(p.xy) for p in self.polygons]
state = dict(area=self.area, x=self.x,
z=self.z, data=self.data,
density_range=self.density_range,
cmap=self.cmap,
predicted=self.predicted,
vertices=vertices,
densities=self.densities,
error=self.error)
pickle.dump(state, f)
@classmethod
def load(cls, fname):
"""
Restore an application from a pickle file.
The pickle file should have been generated by the
:meth:`~fatiando.gravmag.interactive.Moulder.save` method.
Parameters:
* fname : string
The name of the file.
Returns:
* app : Moulder object
The restored application. You can continue using it as if nothing
had happened.
"""
with open(fname) as f:
state = pickle.load(f)
app = cls(**state)
return app
@property
def model(self):
"""
The polygon model drawn as :class:`fatiando.mesher.Polygon` objects.
"""
m = [Polygon(p.xy, {'density': d})
for p, d in zip(self.polygons, self.densities)]
return m
def run(self):
"""
Start the application for drawing.
Will pop-up a window with a place for drawing the model (below) and a
place with the predicted (and, optionally, observed) data (top).
Follow the instruction on the figure title.
When done, close the window to resume program execution.
"""
fig = self._figure_setup()
# Sliders to control the density and the error in the data
self.density_slider = widgets.Slider(
fig.add_axes([0.10, 0.01, 0.30, 0.02]), 'Density',
self.density_range[0], self.density_range[1], valinit=0.,
valfmt='%6.0f kg/m3')
self.error_slider = widgets.Slider(
fig.add_axes([0.60, 0.01, 0.30, 0.02]), 'Error',
0, 5, valinit=self.error, valfmt='%1.2f mGal')
# Put instructions on figure title
self.dataax.set_title(self.instructions)
# Markers for mouse click events
self._ivert = None
self._ipoly = None
self._lastevent = None
self._drawing = False
self._xy = []
self._drawing_plot = None
# Used to blit the model plot and make
# rendering faster
self.background = None
# Connect event callbacks
self._connect()
self._update_data()
self._update_data_plot()
self.canvas.draw()
pyplot.show()
def _connect(self):
"""
Connect the matplotlib events to their callback methods.
"""
# Make the proper callback connections
self.canvas.mpl_connect('button_press_event',
self._button_press_callback)
self.canvas.mpl_connect('key_press_event',
self._key_press_callback)
self.canvas.mpl_connect('button_release_event',
self._button_release_callback)
self.canvas.mpl_connect('motion_notify_event',
self._mouse_move_callback)
self.canvas.mpl_connect('draw_event',
self._draw_callback)
# Call the cleanup and extra code for a draw event when resizing as
# well. This is needed so that tight_layout adjusts the figure when
# resized. Otherwise, tight_layout snaps only when the user clicks on
# the figure to do something.
self.canvas.mpl_connect('resize_event',
self._draw_callback)
self.density_slider.on_changed(self._set_density_callback)
self.error_slider.on_changed(self._set_error_callback)
def plot(self, figsize=(10, 8), dpi=70):
"""
Make a plot of the data and model for embedding in IPython notebooks
Doesn't require ``%matplotlib inline`` to embed the plot (as that would
not allow the app to run).
Parameters:
* figsize : list = (width, height)
The figure size in inches.
* dpi : float
The number of dots-per-inch for the figure resolution.
"""
fig = self._figure_setup(figsize=figsize, facecolor='white')
self._update_data_plot()
pyplot.close(fig)
data = print_figure(fig, dpi=dpi)
return Image(data=data)
def _figure_setup(self, **kwargs):
"""
Setup the plot figure with labels, titles, ticks, etc.
Sets the *canvas*, *dataax*, *modelax*, *polygons* and *lines*
attributes.
Parameters:
* kwargs : dict
Keyword arguments passed to ``pyplot.subplots``.
Returns:
* fig : matplotlib figure object
The created figure
"""
fig, axes = pyplot.subplots(2, 1, **kwargs)
ax1, ax2 = axes
self.predicted_line, = ax1.plot(self.x, self.predicted, '-r')
if self.data is not None:
self.data_line, = ax1.plot(self.x, self.data, '.k')
ax1.set_ylabel('Gravity anomaly (mGal)')
ax1.set_xlabel('x (m)', labelpad=-10)
ax1.set_xlim(self.area[:2])
ax1.set_ylim((-200, 200))
ax1.grid(True)
tmp = ax2.pcolor(numpy.array([self.density_range]), cmap=self.cmap)
tmp.set_visible(False)
pyplot.colorbar(tmp, orientation='horizontal',
pad=0.08, aspect=80).set_label(r'Density (kg/cm3)')
# Remake the polygons and lines to make sure they belong to the right
# axis coordinates
vertices = [p.xy for p in self.polygons]
newpolygons, newlines = [], []
for xy, dens in zip(vertices, self.densities):
poly, line = self._make_polygon(xy, dens)
newpolygons.append(poly)
newlines.append(line)
ax2.add_patch(poly)
ax2.add_line(line)
self.polygons = newpolygons
self.lines = newlines
ax2.set_xlim(self.area[:2])
ax2.set_ylim(self.area[2:])
ax2.grid(True)
ax2.invert_yaxis()
ax2.set_ylabel('z (m)')
fig.subplots_adjust(top=0.95, left=0.1, right=0.95, bottom=0.06,
hspace=0.1)
self.figure = fig
self.canvas = fig.canvas
self.dataax = axes[0]
self.modelax = axes[1]
fig.canvas.draw()
return fig
def _density2color(self, density):
"""
Map density values to colors using the given *cmap* attribute.
Parameters:
* density : 1d-array
The density values of the model polygons
Returns
* colors : 1d-array
The colors mapped to each density value (returned by a matplotlib
colormap object.
"""
dmin, dmax = self.density_range
return self.cmap((density - dmin)/(dmax - dmin))
def _make_polygon(self, vertices, density):
"""
Create a polygon for drawing.
Polygons are matplitlib.patches.Polygon objects for the fill and
matplotlib.lines.Line2D for the contour.
Parameters:
* vertices : list of [x, z]
List of the [x, z] coordinate pairs of each vertex of the polygon
* density : float
The density of the polygon (used to set the color)
Returns:
* polygon, line
The matplotlib Polygon and Line2D objects
"""
poly = patches.Polygon(vertices, animated=False, alpha=0.9,
color=self._density2color(density))
x, y = zip(*poly.xy)
line = Line2D(x, y, **self.line_args)
return poly, line
def _update_data(self):
"""
Recalculate the predicted data (optionally with random error)
"""
self.predicted = talwani.gz(self.x, self.z, self.model)
if self.error > 0:
self.predicted = utils.contaminate(self.predicted, self.error)
def _update_data_plot(self):
"""
Update the predicted data plot in the *dataax*.
Adjusts the xlim of the axes to fit the data.
"""
self.predicted_line.set_ydata(self.predicted)
vmin = 1.2*min(self.predicted.min(), self.dmin)
vmax = 1.2*max(self.predicted.max(), self.dmax)
self.dataax.set_ylim(vmin, vmax)
self.dataax.grid(True)
self.canvas.draw()
def _draw_callback(self, value):
"""
Callback for the canvas.draw() event.
This is called everytime the figure is redrawn. Used to do some
clean up and tunning whenever this is called as well, like calling
``tight_layout``.
"""
self.figure.tight_layout()
def _set_error_callback(self, value):
"""
Callback when error slider is edited
"""
self.error = value
self._update_data()
self._update_data_plot()
def _set_density_callback(self, value):
"""
Callback when density slider is edited
"""
if self._ipoly is not None:
self.densities[self._ipoly] = value
self.polygons[self._ipoly].set_color(self._density2color(value))
self._update_data()
self._update_data_plot()
self.canvas.draw()
def _get_polygon_vertice_id(self, event):
"""
Find out which vertex of which polygon the event happened in.
If the click was inside a polygon (not on a vertex), identify that
polygon.
Returns:
* p, v : int, int
p: the index of the polygon the event happened in or None if
outside all polygons.
v: the index of the polygon vertex that was clicked or None if the
click was not on a vertex.
"""
distances = []
indices = []
for poly in self.polygons:
x, y = poly.get_transform().transform(poly.xy).T
d = numpy.sqrt((x - event.x)**2 + (y - event.y)**2)
distances.append(d.min())
indices.append(numpy.argmin(d))
p = numpy.argmin(distances)
if distances[p] >= self.epsilon:
# Check if the event was inside a polygon
x, y = event.x, event.y
p, v = None, None
for i, poly in enumerate(self.polygons):
if poly.contains_point([x, y]):
p = i
break
else:
v = indices[p]
last = len(self.polygons[p].xy) - 1
if v == 0 or v == last:
v = [0, last]
return p, v
def _button_press_callback(self, event):
"""
What actions to perform when a mouse button is clicked
"""
if event.inaxes != self.modelax:
return
if event.button == 1 and not self._drawing and self.polygons:
self._lastevent = event
for line, poly in zip(self.lines, self.polygons):
poly.set_animated(False)
line.set_animated(False)
line.set_color([0, 0, 0, 0])
self.canvas.draw()
# Find out if a click happened on a vertice
# and which vertice of which polygon
self._ipoly, self._ivert = self._get_polygon_vertice_id(event)
if self._ipoly is not None:
self.density_slider.set_val(self.densities[self._ipoly])
self.polygons[self._ipoly].set_animated(True)
self.lines[self._ipoly].set_animated(True)
self.lines[self._ipoly].set_color([0, 1, 0, 0])
self.canvas.draw()
self.background = self.canvas.copy_from_bbox(self.modelax.bbox)
self.modelax.draw_artist(self.polygons[self._ipoly])
self.modelax.draw_artist(self.lines[self._ipoly])
self.canvas.blit(self.modelax.bbox)
elif self._drawing:
if event.button == 1:
self._xy.append([event.xdata, event.ydata])
self._drawing_plot.set_data(zip(*self._xy))
self.canvas.restore_region(self.background)
self.modelax.draw_artist(self._drawing_plot)
self.canvas.blit(self.modelax.bbox)
elif event.button == 3:
if len(self._xy) >= 3:
density = self.density_slider.val
poly, line = self._make_polygon(self._xy, density)
self.polygons.append(poly)
self.lines.append(line)
self.densities.append(density)
self.modelax.add_patch(poly)
self.modelax.add_line(line)
self._drawing_plot.remove()
self._drawing_plot = None
self._xy = None
self._drawing = False
self._ipoly = len(self.polygons) - 1
self.lines[self._ipoly].set_color([0, 1, 0, 0])
self.dataax.set_title(self.instructions)
self.canvas.draw()
self._update_data()
self._update_data_plot()
def _button_release_callback(self, event):
"""
Reset place markers on mouse button release
"""
if event.inaxes != self.modelax:
return
if event.button != 1:
return
if self._ivert is None and self._ipoly is None:
return
self.background = None
for line, poly in zip(self.lines, self.polygons):
poly.set_animated(False)
line.set_animated(False)
self.canvas.draw()
self._ivert = None
# self._ipoly is only released when clicking outside
# the polygons
self._lastevent = None
self._update_data()
self._update_data_plot()
def _key_press_callback(self, event):
"""
What to do when a key is pressed on the keyboard.
"""
if event.inaxes is None:
return
if event.key == 'd':
if self._drawing and self._xy:
self._xy.pop()
if self._xy:
self._drawing_plot.set_data(zip(*self._xy))
else:
self._drawing_plot.set_data([], [])
self.canvas.restore_region(self.background)
self.modelax.draw_artist(self._drawing_plot)
self.canvas.blit(self.modelax.bbox)
elif self._ivert is not None:
poly = self.polygons[self._ipoly]
line = self.lines[self._ipoly]
if len(poly.xy) > 4:
verts = numpy.atleast_1d(self._ivert)
poly.xy = numpy.array([xy for i, xy in enumerate(poly.xy)
if i not in verts])
line.set_data(zip(*poly.xy))
self._update_data()
self._update_data_plot()
self.canvas.restore_region(self.background)
self.modelax.draw_artist(poly)
self.modelax.draw_artist(line)
self.canvas.blit(self.modelax.bbox)
self._ivert = None
elif self._ipoly is not None:
self.polygons[self._ipoly].remove()
self.lines[self._ipoly].remove()
self.polygons.pop(self._ipoly)
self.lines.pop(self._ipoly)
self.densities.pop(self._ipoly)
self._ipoly = None
self.canvas.draw()
self._update_data()
self._update_data_plot()
elif event.key == 'n':
self._ivert = None
self._ipoly = None
for line, poly in zip(self.lines, self.polygons):
poly.set_animated(False)
line.set_animated(False)
line.set_color([0, 0, 0, 0])
self.canvas.draw()
self.background = self.canvas.copy_from_bbox(self.modelax.bbox)
self._drawing = True
self._xy = []
self._drawing_plot = Line2D([], [], **self.line_args)
self._drawing_plot.set_animated(True)
self.modelax.add_line(self._drawing_plot)
self.dataax.set_title(' | '.join([
'left click: set vertice', 'right click: finish',
'esc: cancel']))
self.canvas.draw()
elif event.key == 'escape':
self._drawing = False
self._xy = []
if self._drawing_plot is not None:
self._drawing_plot.remove()
self._drawing_plot = None
for line, poly in zip(self.lines, self.polygons):
poly.set_animated(False)
line.set_animated(False)
line.set_color([0, 0, 0, 0])
self.canvas.draw()
def _mouse_move_callback(self, event):
"""
Handle things when the mouse move.
"""
if event.inaxes != self.modelax:
return
if event.button != 1:
return
if self._ivert is None and self._ipoly is None:
return
x, y = event.xdata, event.ydata
p = self._ipoly
v = self._ivert
if self._ivert is not None:
self.polygons[p].xy[v] = x, y
else:
dx = x - self._lastevent.xdata
dy = y - self._lastevent.ydata
self.polygons[p].xy[:, 0] += dx
self.polygons[p].xy[:, 1] += dy
self.lines[p].set_data(zip(*self.polygons[p].xy))
self._lastevent = event
self.canvas.restore_region(self.background)
self.modelax.draw_artist(self.polygons[p])
self.modelax.draw_artist(self.lines[p])
self.canvas.blit(self.modelax.bbox)
|
|
#!/usr/bin/python
# =======================================
# multimux v0.1b - Copyright 2011
# Writted by muttley
# Get last version from muttley.eb2a.com
# =======================================
import os
import sys
import glob
import re
import shlex
import subprocess
from optparse import OptionParser
# GLOBAL VAR
# Script file name
g_script = os.path.basename(sys.argv[0])
# Script file absolute path
g_script_path = os.path.abspath(os.path.dirname(sys.argv[0]))
# Verbosity
g_verbosity = 0
# Default ffmpeg command line
G_FFMPEG_CLI = 'ffmpeg -i "%(video)s" -i "%(audio)s" -acodec copy -vcodec copy "%(dest)s"'
# Default muxed file postfix (es. video.avi => videopostfix.avi)
G_DEST_POSTFIX = '.muxed'
# Default regexs tupla for video mask
G_VIDEO_MASK = ( r'(0?[0-9]{1,2})x(0?[0-9]{1,2}).*\.(?:avi|mpg)$', r'S(0?[0-9]{1,2}).?e(0?[0-9]{1,2}).*\.(?:avi|mpg)$' )
# Default regexs tupla for audio mask
G_AUDIO_MASK = ( r'(0?[0-9]{1,2})x(0?[0-9]{1,2}).*\.(?:mp3|wav)$', r'S(0?[0-9]{1,2}).?e(0?[0-9]{1,2}).*\.(?:mp3|wav)$' )
def main ():
global g_verbosity
# Help message
usage = "usage: %prog [-m VIDEOMASK] [-a AUDIOMASK] [-t AUDIOTARGET] [-d DEST] [-p POSTFIX] [-c EXECCMD] [-sv] <video-files>\n"\
" %prog -i\n\n"\
" es: %prog /path/to/video\n"\
" %prog -t '/path/to/audio/*.mp3' /path/to/video/*.avi\n"\
" %prog -v '([0-9]+)x([0-9]+).*\.(?:avi)' -v 's([0-9]+)e([0-9]+).*\.(?:avi)' /path/to/video"
description = "Mux video with relative audio file. Match correct file with regex sub-match.\n"\
"es. foo.01x03.avi => sub-match: '01' and '03' => match with: pippo.s01e03.mp3"
version =" %prog 0.6b" # --version to print
epilog= " Copyright 2011"
# Program parsed option
parser = OptionParser(usage=usage, description=description, epilog=epilog, version=version)
parser.add_option("-m", "--video-mask", action="append", type="string", dest="videoMask", default=G_VIDEO_MASK,
help="regex to match video files. Submatch for match with audio (e.s episodie number, season). Repeatable option.")
parser.add_option("-a", "--audio-mask", action="append", type="string", dest="audioMask", default=G_AUDIO_MASK,
help="regex to match audio files. Submatch for match with video (e.s episodie number, season). Repeatable option.")
parser.add_option("-t", "--audio-target", action="append", type="string", dest="audioTarget", default=None,
help="directory where are audio to mux")
parser.add_option("-d", "--dest", type="string", dest="dest", default=None,
help="directory dest, where are saved muxed files. Default: same path of video-files")
parser.add_option("-p", "--dest-postfix", type="string", dest="postfix", default=G_DEST_POSTFIX,
help="Muxed files postfix (es. video.avi => videopostfix.avi). Default: " + G_DEST_POSTFIX )
parser.add_option("-c", "--exec-cmd", type="string", dest="ffmpegCli", default=G_FFMPEG_CLI, metavar="EXECCMD",
help="program executed with matched viedo and audio (placeholder: %(video)s, %(audio)s %(dest)s)")
parser.add_option("-s", "--trial", action="store_true", dest="trial", default=False,
help="perform a trial run with no changes made")
parser.add_option("-v", "--verbose", action="count", dest="verbosity", default=g_verbosity,
help="Enable (and increase) verbosity. Repeatable option.")
parser.add_option("-i", "--info", action="store_true", dest="info",
help="view default regex mask and exec-cmd")
(options, args) = parser.parse_args()
# Manage param and error
if options.info is True:
myPrint( 'Default regex mask:\n video: {0}\n audio: {1}\n'.format( G_VIDEO_MASK, G_AUDIO_MASK ), STR_STDOUT )
myPrint( '\nnote: overwrite regex mask with -m and -a options.\n', STR_STDOUT )
myPrint( '\nDefault exec cmd: "{0}"\n'.format( options.ffmpegCli ), STR_STDOUT )
myPrint( '\nnote: for set your execcmd, use -c and set correct placholder.\n\n', STR_STDOUT )
sys.exit(0)
if len(args) == 0:
myPrint( clr( 'ERROR:', 4) + ' Set video-files!\n\n', STR_STDERR )
parser.print_help()
sys.exit(1)
if options.dest is None:
(filepath, filename) = os.path.split( args[0] )
options.dest = filepath
if options.audioTarget is None:
options.audioTarget = args
if options.trial is True:
myPrint( clr( 'Trial mode: ON', 3 ) + ' (perform a trial run with no changes made)\n\n', STR_STDOUT )
g_verbosity = options.verbosity
# the core of app :)
matched, successOnExec = muxVideoAudio( args, options.audioTarget, options.dest, options.postfix, options.videoMask, options.audioMask, execffmpegCli, options.ffmpegCli , options.trial)
myPrint( "tot. matched: %d succes execcmd: %d/%d\n" % (matched, successOnExec, matched) , STR_STDOUT, 1, g_verbosity )
# exit without errors
sys.exit(0)
# Expand wildcards (*,?,[],~) in a list of path and return files list for directory
# @lstPath: list[string] - list of path to expand (dirs or files)
# @return: list[string] - expanded list
def expandWildCard( lstPath ):
lstExpandedPath = []
for path in lstPath:
# expand ~ to HOME path
lstPathGlobize = os.path.expanduser( path )
# expand *.?.[] wildcards
lstPathGlobize = glob.glob( lstPathGlobize )
for pathGlobize in lstPathGlobize:
# return file list for dir path
if os.path.isdir(pathGlobize):
for file in os.listdir( pathGlobize ):
filePath = os.path.join( pathGlobize, file )
if os.path.isfile( filePath ):
lstExpandedPath.append( filePath )
else:
lstExpandedPath.append( pathGlobize )
return lstExpandedPath
# Match Video with correct Audio file by compare sub-group finded with regex Mask
# @lstTargetVideo: list[string] - list of path video target
# @lstTargetAudio: list[string] - list of path audio target
# @lstVideoMask: list[string] - list of regex with sub-grup to match video files (es. (0?[0-9]{1,2})x(0?[0-9]{1,2}).*\.(?:avi|mpg))
# @lstAudioMask: list[string] - list of regex with sub-grup to match audio files (es. (0?[0-9]{1,2})x(0?[0-9]{1,2}).*\.(?:mp3|wav))
# @callBack: function - this function is call for each match with 2 params: videoFilename, audioFilename
# @return: int, int - matched and sugges
def muxVideoAudio( lstTargetVideo, lstTargetAudio, dest, postfix, lstVideoMask = None, lstAudioMask = None, callBack = None, ffmpegCli = None, trial = False ):
# get audio and video files list
lstTargetAudio = expandWildCard(lstTargetAudio)
lstTargetVideo = expandWildCard(lstTargetVideo)
countMatched = 0
countSuccessOnExec = 0
# for each video search the correct audio (match on regex sub-group)
for targetVideo in lstTargetVideo:
# set True when find correct audio
matched = False
(filepathVideo, filenameVideo) = os.path.split(targetVideo)
myPrint( "> target video: %s\n" % filenameVideo , STR_STDOUT, 2, g_verbosity )
# for each videoMask: regex with sub-group (es. season and episodie)
for videoMask in lstVideoMask:
if matched is True: break;
# get sub-group: foo.S01E10.*.avi => ('01', '10')
objMatchedVideo = re.search(videoMask, filenameVideo, re.IGNORECASE)
# if not match => next videoMask
if objMatchedVideo is None:
myPrint( " video mask '%s' not usable!\n" % videoMask , STR_STDOUT, 3, g_verbosity )
continue
# convert to list of int
lstMatchedVideo = list(objMatchedVideo.groups())
for i in range(len(lstMatchedVideo)): lstMatchedVideo[i] = int(lstMatchedVideo[i])
myPrint( " video mask '%s' match: %s\n" % ( videoMask, lstMatchedVideo ) , STR_STDOUT, 2, g_verbosity )
# for each audio
for targetAudio in lstTargetAudio:
if matched is True: break;
(filepathAudio, filenameAudio) = os.path.split(targetAudio)
myPrint( " + target audio: %s\n" % filenameAudio , STR_STDOUT, 2, g_verbosity )
# for each audioMask: regex with sub-group (es. season and episodie)
for audioMask in lstAudioMask:
# get sub-group: foo.S01E10.*.mp3 => ('01', '10')
objMatchedAudio = re.search(audioMask, filenameAudio, re.IGNORECASE)
# if not match => next audioMask
if objMatchedAudio is None:
myPrint( " audio mask '%s' not usable!\n" % audioMask , STR_STDOUT, 3, g_verbosity )
continue
# convert to list of int
lstMatchedAudio = list(objMatchedAudio.groups())
for i in range(len(lstMatchedAudio)): lstMatchedAudio[i] = int(lstMatchedAudio[i])
myPrint( " audio mask '%s' match: %s\n" % ( audioMask, lstMatchedAudio ) , STR_STDOUT, 2, g_verbosity )
if lstMatchedVideo == lstMatchedAudio:
myPrint( "Matched video (" + clr( os.path.join( filepathVideo, filenameVideo ), 2 ) + ") and audio (" + clr( os.path.join( filepathAudio, filenameAudio ), 2 ) + ")!\n", STR_STDOUT, 1, g_verbosity )
retCode = callBack( os.path.join( filepathVideo, filenameVideo ), os.path.join( filepathAudio, filenameAudio ), dest, postfix, ffmpegCli, trial )
if( retCode is 0 ): countSuccessOnExec +=1
countMatched += 1
matched = True
break
else:
myPrint( " not match: '%s' with '%s'\n" % ( filenameVideo , filenameAudio ), STR_STDOUT, 3, g_verbosity )
return countMatched, countSuccessOnExec
# Execute ffmpegCli
# @videoFile: string - video file full path
# @audioFile: string - audio file full path
# @dest: string - path to dest
# @postfix: string - filname postfix (es. video.avi => videoPOSTFIX.avi)
# @ffmpegCli: string - execcmd with placeholder: %(video)s, %(audio)s, %(dest)s
# @trial: bool - don't execute cmd
# @return: int - value returned from execcmd
def execffmpegCli( videoFile, audioFile, dest, postfix, ffmpegCli, trial = False ):
# create full dest file path with prefix (es. /path/do/dest/<video-filename><postfix>.ext)
(path, filename) = os.path.split(videoFile)
(shortname, extension) = os.path.splitext(filename)
dest = os.path.join(dest, shortname + postfix + extension)
ffmpegCli = ffmpegCli % {"video": videoFile, "audio": audioFile, "dest": dest}
if( trial ):
myPrint( "fake exec: " + ffmpegCli + '\n' )
retCode = 0
else:
retCode = subprocess.call( ffmpegCli, shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w') )
myPrint( "executed: %s ...with exit code: %d\n" % ( ffmpegCli, retCode), STR_STDOUT, 1, g_verbosity )
# Manage return code
#if retCode != 0:
# if ret < 0:
# print "Killed by signal", -ret
# else:
# print "Command failed with return code", ret
#else:
# print "SUCCESS!! %d" % ret
return retCode
STR_STDOUT = 0
STR_STDERR = 1
# Print on screen follow verbosity and stream type
# @text: string - text to print
# @streamType: int - STR_STDOUT or STR_STDERR
# @verbosity: int - verbosity associed to text
# @requestVerbosity: int - request verbosity
# @return: bool - return if printed
def myPrint( text, streamType = STR_STDOUT, verbosity = 1 , requestVerbosity = 1 ):
if verbosity <= requestVerbosity:
if streamType == STR_STDOUT:
sys.stdout.write( text )
else:
sys.stderr.write( text )
return True
else:
return False
G_COLOR = ( '\033[95m', '\033[94m', '\033[92m', '\033[93m', '\033[91m' )
G_COLOR_ENDC = '\033[0m'
# Colorize string
# @string: string - text to colorize
# @color: int - color type, see G_COLOR tuple
# @return: string - return colored string
def clr( string, color ):
return G_COLOR[color] + string + G_COLOR_ENDC
if __name__ == "__main__":
main()
|
|
"""
Author: Bharat Balegere
Date created: 10-Oct-2017
Date last modified: 23-Jan-2018
Python Version: 3.6
"""
import argparse
import datetime
import numpy as np
import pandas as pd
from gurobipy import *
def read_input_csv(filename, typ=None):
sldf = pd.read_csv(filename, header=0, dtype=typ)
sldf.columns = sldf.columns.str.strip()
sldf[sldf.columns[0]] = sldf[sldf.columns[0]].astype(str).str.strip()
sldf.set_index(sldf.columns[0], inplace=True)
return sldf.to_dict('index'), sorted(sldf.columns.values), list(sldf.index.values)
def read_slots_interviews(filename):
sidf = pd.read_csv(filename, dtype=object)
sidf.columns = sidf.columns.str.strip()
sidict = sidf.to_dict('list')
return dict((key, int(v[0])) for key, v in sidict.items())
def read_shortlists(filename):
sldf = pd.read_csv(filename, dtype=object)
sldf.columns = sldf.columns.str.strip()
comps = list(sldf.columns.values)
comtupl = [(c, str(n).strip()) for c in comps for n in list(sldf[c].dropna().values)]
return dict((x, 1) for x in comtupl), sorted(comps), sorted(set([x[1] for x in comtupl]))
def read_lp(filename):
exnames = []
with open(filename) as f:
for csvline in f:
exnames = exnames + [str(x).strip() for x in csvline.strip().split(',') if len(str(x).strip()) > 0]
return sorted(set(exnames))
def generateSchedule(companies, fixedints, allnames, panels, prefs, shortlists, slots, slots_int, out):
print(datetime.datetime.now().time())
# Find out max number of panels
maxpanels = dict((c, max(panels[s][c] for s in slots)) for c in companies)
# Generate cost of slots
costs = dict((slots[s], s + 1) for s in range(len(slots)))
# Calculate number shortlists for each students
crit = dict((n, sum(shortlists.get((c, n), 0) for c in companies)) for n in allnames)
# Remove names who dont have any shortlists
names = [key for key, value in crit.items() if value > 0]
# Calculate number shortlists per company
compshortlists = dict((c, sum(shortlists.get((c, n), 0) for n in names)) for c in companies)
# Calculate total number of panels per company
comppanels = dict((c, int(sum(panels[s][c] for s in slots) / slots_int.get(c, 1))) for c in companies)
for c in companies:
if compshortlists[c] > comppanels[c]:
print(c + " has shortlists greater than no of panels " + str(compshortlists[c]) + " > " + str(comppanels[c]))
fibonacii = [2, 3]
for i in range(2, 1 + int(max(crit.values()))):
fibonacii.append(fibonacii[i - 1] + fibonacii[i - 2])
# Create Objective Coefficients
prefsnew = dict()
objcoeff = dict()
if len(prefs):
for n in names:
actpref = dict((c, prefs[n][c] * shortlists.get((c, n), 0)) for c in companies if shortlists.get((c, n), 0) > 0)
scaledpref = {key: rank for rank, key in enumerate(sorted(actpref, key=actpref.get), 1)}
for c, rank in scaledpref.items():
prefsnew[n, c] = rank
for s in slots:
if compshortlists[c] > comppanels[c]:
objcoeff[s, c, n] = (rank / (crit[n] + 1)) * (len(slots) + 1 - costs[s])
else:
objcoeff[s, c, n] = (1 - rank / (crit[n] + 1)) * costs[s]
print('Creating IPLP')
model = Model('interviews')
compnames = tuplelist([(c, n) for c, n in shortlists.keys() if n in names and c in companies])
choices = model.addVars(slots, compnames, vtype=GRB.BINARY, name='G')
# Objective - allocate max students to the initial few slots
model.setObjective(quicksum(choices[s, c, n] * objcoeff.get((s, c, n), costs[s]) for s in slots for c, n in compnames), GRB.MINIMIZE)
# Constraint - maximum number in a slot for a club is limited by panels
model.addConstrs((choices.sum(s, c, '*') <= panels[s][c] for s in slots for c in companies))
# Constraint - allocate student only if he has a shortlist
model.addConstrs((choices.sum('*', c, n) <= shortlists.get((c, n), 0) * slots_int.get(c, 1) for n in names for c in companies))
# Constraint - slots should not conflict for a student
model.addConstrs((choices.sum(s, '*', n) <= 1 for s in slots for n in names))
# Constraint - allocate all students or number of interviews possible
model.addConstrs((choices.sum('*', c, '*') == min(compshortlists[c], comppanels[c]) * slots_int.get(c, 1) for c in companies))
# Constraint - for multiple slots per interview, same candidate should be allocated
for c, si in slots_int.items():
start_slot = 0
while panels[slots[start_slot]][c] == 0:
start_slot += 1
if si > 1:
for i in range(si - 1 + start_slot, len(slots), si):
for x, n in compnames.select(c, '*'):
for j in range(i - si + 1, i):
model.addConstr((choices[slots[i], c, n] - choices[slots[j], c, n]), GRB.EQUAL, 0)
# Constraint - Fix manually given schedule
flist = [(s, c, n) for s, vals in fixedints.items() for c, n in vals.items() if (c, n) in compnames]
model.addConstrs((choices[s, c, n] == 1 for s, c, n in flist))
print('Optimising')
model.optimize()
solution = model.getAttr('X', choices)
sche = [['Slot'] + [c + str(j + 1) for c in companies for j in range(int(maxpanels[c]))]]
for s in slots:
temp = [s]
for c in companies:
row = [''] * int(maxpanels[c])
i = 0
for n in [name for com, name in compnames if com == c]:
if solution.get((s, c, n), 0):
row[i] = n
i = i + 1
temp = temp + row
sche.append(temp)
schedf = pd.DataFrame(sche)
schedf.to_csv(out + '\\sche.csv', index=False, header=False)
namesdf = pd.DataFrame.from_dict(dict((s, {n: c for c in companies for n in names if solution.get((s, c, n), 0)}) for s in slots), orient='index')
namesdf.sort_index(axis=1).to_csv(out + '\\names.csv')
print(model.status)
print(datetime.datetime.now().time())
if prefsnew:
unordn = set()
for n in names:
init = 1
for s in slots:
stop = False
for c in companies:
if solution.get((s, c, n), 0) == 1:
if prefsnew[n, c] < init:
unordn.add(n)
stop = True
break
else:
init = prefsnew[n, c]
if stop:
break
print('The following candidates preference order has been violated')
print(unordn)
print(len(unordn))
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('shortlists', help='Shortlists File per company as CSV', metavar='Shortlists.csv')
parser.add_argument('slotspanels', help='Slots and Panels per company as CSV', metavar='SlotsPanels.csv')
parser.add_argument('-s', '--slotsint', help='Number of Slots required per Interview for each company', metavar='SlotsInterview.csv')
parser.add_argument('-p', '--prefs', help='CSV with a matrix containing names and companies', metavar='prefs.csv')
parser.add_argument('-l', '--leftprocess', help='CSV with a list of candidates who have left the process', metavar='lp.csv')
parser.add_argument('-f', '--fixed', help='CSV of the schedule with pre fixed candidates. Should satisfy constraints', metavar='fixed.csv')
parser.add_argument('-o', '--output', help='Output directory', default='out')
args = parser.parse_args()
shortlists, shcompanies, names = read_shortlists(args.shortlists)
panels, companies, slots = read_input_csv(args.slotspanels)
print('Number of Companies')
print(len(companies))
print('Number of Candidates')
print(len(names))
print('Number of Slots')
print(len(slots))
print(set(companies) ^ set(shcompanies))
if not set(companies).issubset(set(shcompanies)):
raise ValueError('Shortlists are not present for all companies')
if len([x for vals in panels.values() for x in vals.values() if not np.issubdtype(x, int) or x < 0]):
raise ValueError('The number of panels must be a positive integer ')
slots_int = dict()
if args.slotsint:
slots_int = read_slots_interviews(args.slotsint)
assert (sorted(slots_int.keys()) == sorted(companies))
lp = list()
if args.leftprocess:
lp = read_lp(args.leftprocess)
names = [n for n in names if n not in lp]
prefs = dict()
if args.prefs:
prefs, comps3, names2 = read_input_csv(args.prefs)
for vals in prefs.values():
for val in vals.values():
if val not in range(1, len(shcompanies) + 1):
raise ValueError('Incorrect preference ' + str(val) + '. It should be between 1 and ' + str(len(shcompanies)))
assert (set(companies).issubset(set(comps3)))
assert (shcompanies == comps3)
missing = set(names) - set(names2)
if len(missing):
print('Preferences are missing for below names')
print(missing)
raise ValueError('Some names are mssing')
fixedints = dict()
if args.fixed:
fixedints, comps4, slots2 = read_input_csv(args.fixed, typ=object)
if not os.path.exists(args.output):
os.makedirs(args.output)
generateSchedule(companies, fixedints, names, panels, prefs, shortlists, slots, slots_int, args.output)
|
|
import os
import json
import hashlib
import jsonschema
import requests
from flask import (
Flask, request, jsonify, send_from_directory, render_template, abort,
send_file)
from flask.ext.cacheify import init_cacheify
from flask.views import MethodView
APP_ROOT = os.path.dirname(os.path.abspath(__file__))
DEBUG = os.environ.get('DEBUG', False) in ('true', '1', 'y', 'yes')
SCHEMA_URL = 'https://raw.githubusercontent.com/mozilla/contribute.json/master/schema.json'
KNOWN_URLS_URL = 'https://raw.githubusercontent.com/mozilla/contribute.json/master/knownurls.txt'
SAMPLE = """
{
"name": "contribute.json",
"description": "Standard to describe open source projects",
"repository": {
"url": "https://github.com/mozilla/contribute.json",
"license": "MPL2"
},
"keywords": [
"JSON",
"Python",
"Flask"
]
}
""".strip()
app = Flask(__name__)
cache = init_cacheify(app)
def cache_set(key, value, *args, **options):
if isinstance(value, (dict, list, bool)):
value = json.dumps(value)
cache.set(key, value, *args, **options)
def cache_get(key, default=None):
value = cache.get(key)
if value is None:
value = default
if value is not None and not isinstance(value, (dict, list, bool)):
value = json.loads(value)
return value
@app.route('/contribute.json')
def this_contribute_json():
return send_file('root_files/contribute.json')
@app.route('/')
def index_html():
return catch_all('index.html')
@app.route('/static/<path:path>')
def serve_static(path):
# should only be used when run from __main__.py.
send_from_directory('static', path)
@app.route('/<path:path>')
def catch_all(path):
context = {
'DEBUG': DEBUG,
'SAMPLE': SAMPLE,
}
if path == 'partials/schema.html':
# only this partial needs this
context['SCHEMA'] = json.dumps(get_schema(), indent=4)
# if path == 'favicon.ico':
# path = 'static/favicon.ico'
_, ext = os.path.splitext(path)
if path and ext in ('.png', '.gif', '.css', '.js'):
# most likely something's gone wrong
default = False
else:
path = path or 'index.html'
default = True
# print "PATH", path, os.path.isfile(path)
if os.path.isfile(os.path.join('templates', path)):
return render_template(path, **context)
elif default:
return render_template('index.html', **context)
else:
abort(404)
def get_schema():
schema_content = cache_get('schema')
if schema_content is None:
schema = requests.get(SCHEMA_URL)
schema_content = schema.json()
cache_set('schema', schema_content, 60 * 60)
return schema_content
class ValidationView(MethodView):
def post(self):
if 'url' in request.args:
url = request.args['url']
# We need to make an exception. We can't load this sites
# /contribute.json because since this is running in a single-thread
# single-worker, we're running into a strange chicken and egg
# situation.
if url == request.host_url + 'contribute.json':
url = (
'https://raw.githubusercontent.com/mozilla/contribute.json'
'/master/contribute.json'
)
try:
response = requests.get(url)
content = response.json()
except (ValueError, requests.exceptions.RequestException) as exp:
return jsonify({'request_error': str(exp)})
elif request.data:
try:
content = json.loads(request.data)
except ValueError as exp:
return jsonify({
'request_error': str(exp),
'response': request.data,
})
url = None
schema_content = get_schema()
context = {
'schema': schema_content,
'schema_url': SCHEMA_URL,
'response': content,
}
if url:
context['url'] = url
try:
jsonschema.validate(
content,
schema_content
)
context['errors'] = None
except jsonschema.ValidationError as error:
context['validation_error'] = error.message
except jsonschema.SchemaError as error:
context['schema_error'] = error.message
previous_urls = cache_get('urls_submitted', [])
if url in previous_urls:
previous_urls.remove(url)
previous_urls.insert(0, url)
cache_set('urls_submitted', previous_urls, 60 * 60 * 24 * 10)
return jsonify(context)
app.add_url_rule('/validate', view_func=ValidationView.as_view('validate'))
class ValidateUrlView(MethodView):
def post(self):
url = request.json['url']
result = cache_get('validation-%s' % url)
if result is None:
result = {
'url': url,
}
try:
r = requests.get(url)
result['status_code'] = r.status_code
except requests.ConnectionError:
result['status_code'] = 500
if result['status_code'] >= 200 and result['status_code'] < 500:
cache_set('validation-%s' % url, result, 60)
return jsonify(result)
app.add_url_rule('/validateurl',
view_func=ValidateUrlView.as_view('validateurl'))
class ExamplesView(MethodView):
def get(self):
known_urls = cache_get('known_urls')
if known_urls is None:
response = requests.get(KNOWN_URLS_URL)
assert response.status_code == 200, response.status_code
known_urls = []
for line in response.content.splitlines():
line = line.strip()
if line and not line.startswith('#'):
known_urls.append(line)
cache_set('known_urls', known_urls, 60 * 60)
return jsonify({'urls': known_urls})
app.add_url_rule('/examples.json', view_func=ExamplesView.as_view('examples'))
class LoadView(MethodView):
def get(self):
url = request.args['url']
cache_key = 'project_%s' % hashlib.md5(url).hexdigest()
project = cache_get(cache_key)
if project is None:
response = requests.get(url)
if response.status_code == 200:
project = response.json()
project['_url'] = url
project['links'] = []
if project.get('urls'):
if project.get('urls').get('prod'):
project['links'].append({
'url': project['urls']['prod'],
'label': 'prod'
})
if project.get('repository').get('url'):
project['links'].append({
'url': project['repository']['url'],
'label': 'repository'
})
cache_set(cache_key, project, 60 * 60)
return jsonify({'project': project})
app.add_url_rule('/load-example', view_func=LoadView.as_view('load_example'))
|
|
import sys
import os
import re
import shutil
import collections
import datetime
import json
from textwrap import fill
import click
from anchore.cli.common import anchore_print, anchore_print_err
from anchore import navigator, controller, anchore_utils, anchore_auth, anchore_feeds, anchore_policy
from anchore.util import contexts, scripting
config = {}
imagelist = []
@click.group(short_help='Useful tools and operations on images and containers')
@click.option('--imageid', help='Process specified image ID', metavar='<imageid>')
@click.option('--image', help='Process specified image tag/ID/digest', metavar='<tag|imageid|digest>')
@click.pass_context
@click.pass_obj
def toolbox(anchore_config, ctx, image, imageid):
"""
A collection of tools for operating on images and containers and building anchore modules.
Subcommands operate on the specified image passed in as --image <imgid>
"""
global config, imagelist, nav
config = anchore_config
ecode = 0
try:
# set up imagelist of imageIds
if image:
imagelist = [image]
try:
result = anchore_utils.discover_imageIds(imagelist)
except ValueError as err:
raise err
else:
imagelist = result
elif imageid:
if len(imageid) != 64 or re.findall("[^0-9a-fA-F]+",imageid):
raise Exception("input is not a valid imageId (64 characters, a-f, A-F, 0-9)")
imagelist = [imageid]
else:
imagelist = []
if ctx.invoked_subcommand not in ['import', 'delete', 'kubesync', 'images', 'show']:
if not imagelist:
raise Exception("for this operation, you must specify an image with '--image' or '--imageid'")
else:
try:
nav = navigator.Navigator(anchore_config=config, imagelist=imagelist, allimages=contexts['anchore_allimages'])
except Exception as err:
nav = None
raise err
except Exception as err:
anchore_print_err('operation failed')
ecode = 1
if ecode:
sys.exit(ecode)
@toolbox.command(name='delete', short_help="Delete input image(s) from the Anchore DB")
@click.option('--dontask', help='Will delete the image from Anchore DB without asking for coinfirmation', is_flag=True)
def delete(dontask):
ecode = 0
try:
for i in imagelist:
imageId = None
if contexts['anchore_db'].is_image_present(i):
imageId = i
else:
try:
ret = anchore_utils.discover_imageId(i)
#imageId = ret.keys()[0]
imageId = ret
except:
imageId = None
if imageId:
dodelete = False
if dontask:
dodelete = True
else:
try:
answer = raw_input("Really delete image '"+str(i)+"'? (y/N)")
except:
answer = "n"
if 'y' == answer.lower():
dodelete = True
else:
anchore_print("Skipping delete.")
if dodelete:
try:
anchore_print("Deleting image '"+str(i)+"'")
contexts['anchore_db'].delete_image(imageId)
except Exception as err:
raise err
except Exception as err:
anchore_print_err('operation failed')
ecode = 1
sys.exit(ecode)
@toolbox.command(name='unpack', short_help="Unpack the specified image into a temp location")
@click.option('--destdir', help='Destination directory for unpacked container image', metavar='<path>')
def unpack(destdir):
"""Unpack and Squash image to local filesystem"""
if not nav:
sys.exit(1)
ecode = 0
try:
anchore_print("Unpacking images: " + ' '.join(nav.get_images()))
result = nav.unpack(destdir=destdir)
if not result:
anchore_print_err("no images unpacked")
ecode = 1
else:
for imageId in result:
anchore_print("Unpacked image: " + imageId)
anchore_print("Unpack directory: "+ result[imageId])
except:
anchore_print_err("operation failed")
ecode = 1
contexts['anchore_allimages'].clear()
sys.exit(ecode)
@toolbox.command(name='setup-module-dev', short_help='Setup a module development environment')
@click.option('--destdir', help='Destination directory for module development environment', metavar='<path>')
def setup_module_dev(destdir):
"""
Sets up a development environment suitable for working on anchore modules (queries, etc) in the specified directory.
Creates a copied environment in the destination containing the module scripts, unpacked image(s) and helper scripts
such that a module script that works in the environment can be copied into the correct installation environment and
run with anchore explore <modulename> invocation and should work.
"""
if not nav:
sys.exit(1)
ecode = 0
try:
anchore_print("Anchore Module Development Environment\n")
helpstr = "This tool has set up an environment that represents what anchore will normally set up before running an analyzer, gate and/or query module. Each section below includes some information along with a string that you can use to help develop your own anchore modules.\n"
anchore_print(fill(helpstr, 80))
anchore_print("")
anchore_print("Setting up environment...")
anchore_print("")
result = nav.unpack(destdir=destdir)
if not result:
raise Exception("unable to unpack input image")
for imageId in result:
unpackdir = result[imageId]
# copy anchore imageDB dir into unpacked environment
imgdir = '/'.join([config.data['image_data_store'], imageId])
tmpdatastore = '/'.join([unpackdir, 'data'])
dstimgdir = '/'.join([tmpdatastore, imageId])
if not os.path.exists(imgdir):
anchore_print_err("Image must exist and have been analyzed before being used for module development.")
break
if not os.path.exists(tmpdatastore):
os.makedirs(tmpdatastore)
shutil.copytree(imgdir, dstimgdir, symlinks=True)
# copy examples into the unpacked environment
examples = {}
basedir = '/'.join([unpackdir, "anchore-modules"])
if not os.path.exists(basedir):
os.makedirs(basedir)
# copy the shell-utils
os.makedirs('/'.join([basedir, 'shell-utils']))
for s in os.listdir('/'.join([config.data['scripts_dir'], 'shell-utils'])):
shutil.copy('/'.join([config.data['scripts_dir'], 'shell-utils', s]), '/'.join([basedir, 'shell-utils', s]))
# copy any examples that exist in the anchore egg into the unpack dir
for d in os.listdir(config.data['scripts_dir']):
scriptdir = '/'.join([basedir, d])
if os.path.exists(config.data['scripts_dir'] + "/examples/" + d):
if not os.path.exists(scriptdir):
os.makedirs(scriptdir)
for s in os.listdir(config.data['scripts_dir'] + "/examples/" + d):
thefile = '/'.join([config.data['scripts_dir'], "examples", d, s])
thefiledst = '/'.join([scriptdir, s])
if re.match(".*(\.sh)$", thefile):
examples[d] = thefiledst
shutil.copy(thefile, thefiledst)
# all set, show how to use them
anchore_print("\tImage: " + imageId[0:12])
anchore_print("\tUnpack Directory: " +result[imageId])
anchore_print("")
analyzer_string = ' '.join([examples['analyzers'], imageId, tmpdatastore, dstimgdir, result[imageId]])
anchore_print("\tAnalyzer Command:\n\n\t" +analyzer_string)
anchore_print("")
anchore_utils.write_plainfile_fromstr(result[imageId] + "/queryimages", imageId+"\n")
queryoutput = '/'.join([result[imageId], "querytmp/"])
if not os.path.exists(queryoutput):
os.makedirs(queryoutput)
query_string = ' '.join([examples['queries'], result[imageId] + "/queryimages", tmpdatastore, queryoutput, "passwd"])
anchore_print("Query Command:\n\n\t" + query_string)
anchore_print("")
anchore_print("Next Steps: ")
anchore_print("\tFirst: run the above analyzer command and note the RESULT output")
anchore_print("\tSecond: run the above query command and note the RESULT output, checking that the query was able to use the analyzer data to perform its search")
anchore_print("\tThird: modify the analyzer/query modules as you wish, including renaming them and continue running/inspecting output until you are satisfied")
anchore_print("\tFinally: when you're happy with the analyzer/query, copy them to next to existing anchore analyzer/query modules and anchore will start calling them as part of container analysis/query:\n")
anchore_print("\tcp " + examples['analyzers'] + " " + config.data['scripts_dir'] + "/analyzers/99_analyzer-example.sh")
anchore_print("\tcp " + examples['queries'] + " " + config.data['scripts_dir'] + "/queries/")
anchore_print("\tanchore analyze --force --image " + imageId + " --imagetype none")
anchore_print("\tanchore query --image " + imageId + " query-example")
anchore_print("\tanchore query --image " + imageId + " query-example passwd")
anchore_print("\tanchore query --image " + imageId + " query-example pdoesntexist")
except:
anchore_print_err("operation failed")
ecode = 1
contexts['anchore_allimages'].clear()
sys.exit(ecode)
@toolbox.command(name='show-dockerfile')
def show_dockerfile():
"""Generate (or display actual) image Dockerfile"""
if not nav:
sys.exit(1)
ecode = 0
try:
result = nav.run_query(['show-dockerfile', 'all'])
if result:
anchore_utils.print_result(config, result)
except:
anchore_print_err("operation failed")
ecode = 1
contexts['anchore_allimages'].clear()
sys.exit(ecode)
@toolbox.command(name='show-layers')
def show_layers():
"""Show image layer IDs"""
if not nav:
sys.exit(1)
ecode = 0
try:
result = nav.run_query(['show-layers', 'all'])
if result:
anchore_utils.print_result(config, result)
except:
anchore_print_err("operation failed")
ecode = 1
contexts['anchore_allimages'].clear()
sys.exit(ecode)
@toolbox.command(name='show-familytree')
def show_familytree():
"""Show image family tree image IDs"""
if not nav:
sys.exit(1)
ecode = 0
try:
result = nav.run_query(['show-familytree', 'all'])
if result:
anchore_utils.print_result(config, result)
except:
anchore_print_err("operation failed")
ecode = 1
contexts['anchore_allimages'].clear()
sys.exit(ecode)
@toolbox.command(name='show-taghistory')
def show_taghistory():
"""Show history of all known repo/tags for image"""
if not nav:
sys.exit(1)
ecode = 0
try:
result = nav.get_taghistory()
if result:
anchore_utils.print_result(config, result)
except:
anchore_print_err("operation failed")
ecode = 1
contexts['anchore_allimages'].clear()
sys.exit(ecode)
@toolbox.command(name='show-analyzer-status')
def show_analyzer_status():
"""Show analyzer status for specified image"""
ecode = 0
try:
image=contexts['anchore_allimages'][imagelist[0]]
analyzer_status = contexts['anchore_db'].load_analyzer_manifest(image.meta['imageId'])
result = {image.meta['imageId']:{'result':{'header':['Analyzer', 'Status', '*Type', 'LastExec', 'Exitcode', 'Checksum'], 'rows':[]}}}
for script in analyzer_status.keys():
adata = analyzer_status[script]
nicetime = datetime.datetime.fromtimestamp(adata['timestamp']).strftime('%Y-%m-%d %H:%M:%S')
try:
row = [script.split('/')[-1], adata['status'], adata['atype'], nicetime, str(adata['returncode']), adata['csum']]
result[image.meta['imageId']]['result']['rows'].append(row)
except:
pass
if result:
anchore_utils.print_result(config, result)
except:
anchore_print_err("operation failed")
ecode = 1
contexts['anchore_allimages'].clear()
sys.exit(ecode)
@toolbox.command(name='export')
@click.option('--outfile', help='output file for exported image', required=True, metavar='<file.json>')
def export(outfile):
"""Export image anchore data to a JSON file."""
if not nav:
sys.exit(1)
ecode = 0
savelist = list()
for imageId in imagelist:
try:
record = {}
record['image'] = {}
record['image']['imageId'] = imageId
record['image']['imagedata'] = contexts['anchore_db'].load_image_new(imageId)
savelist.append(record)
except Exception as err:
anchore_print_err("could not find record for image ("+str(imageId)+")")
ecode = 1
if ecode == 0:
try:
if outfile == '-':
print json.dumps(savelist, indent=4)
else:
with open(outfile, 'w') as OFH:
OFH.write(json.dumps(savelist))
except Exception as err:
anchore_print_err("operation failed: " + str(err))
ecode = 1
sys.exit(ecode)
@toolbox.command(name='kubesync')
def kubesync():
"""Communicate with kubernetes deployment via kubectl and save image names/IDs to local files"""
ecode = 0
try:
images = anchore_utils.get_images_from_kubectl()
if images:
anchore_print("Writing image IDs to ./anchore_imageIds.kube")
with open("anchore_imageIds.kube", 'w') as OFH:
for imageId in images:
OFH.write(imageId + "\n")
anchore_print("Writing image names to ./anchore_imageNames.kube")
with open("anchore_imageNames.kube", 'w') as OFH:
for imageId in images:
OFH.write(images[imageId] + "\n")
except Exception as err:
anchore_print_err("operation failed: " + str(err))
ecode = 1
sys.exit(ecode)
@toolbox.command(name='import')
@click.option('--infile', help='input file that contains anchore image data from a previous export', type=click.Path(exists=True), metavar='<file.json>', required=True)
@click.option('--force', help='force import even if an image record is already in place', is_flag=True)
def image_import(infile, force):
"""Import image anchore data from a JSON file."""
ecode = 0
try:
with open(infile, 'r') as FH:
savelist = json.loads(FH.read())
except Exception as err:
anchore_print_err("could not load input file: " + str(err))
ecode = 1
if ecode == 0:
for record in savelist:
try:
imageId = record['image']['imageId']
if contexts['anchore_db'].is_image_present(imageId) and not force:
anchore_print("image ("+str(imageId)+") already exists in DB, skipping import.")
else:
imagedata = record['image']['imagedata']
try:
rc = contexts['anchore_db'].save_image_new(imageId, report=imagedata)
if not rc:
contexts['anchore_db'].delete_image(imageId)
raise Exception("save to anchore DB failed")
except Exception as err:
contexts['anchore_db'].delete_image(imageId)
raise err
except Exception as err:
anchore_print_err("could not store image ("+str(imageId)+") from import file: "+ str(err))
ecode = 1
sys.exit(ecode)
@toolbox.command(name='images')
@click.option('--no-trunc', help='Do not truncate imageIds', is_flag=True)
def images(no_trunc):
ecode = 0
import datetime
try:
anchoreDB = contexts['anchore_db']
header = ["Repository", "Tag", "Image ID", "Distro", "Last Analyzed", "Size"]
result = {"multi":{'result':{'header':header, 'rows':[]}}}
hasData = False
for image in anchoreDB.load_all_images_iter():
try:
imageId = image[0]
imagedata = image[1]
meta = imagedata['meta']
name = meta['humanname']
shortId = meta['shortId']
size = meta['sizebytes']
if no_trunc:
printId = imageId
else:
printId = shortId
patt = re.match("(.*):(.*)", name)
if patt:
repo = patt.group(1)
tag = patt.group(2)
else:
repo = "<none>"
tag = "<none>"
oldtags = ','.join(imagedata['anchore_all_tags'])
if meta['usertype']:
atype = meta['usertype']
else:
atype = "<none>"
distrometa = anchore_utils.get_distro_from_imageId(imageId)
distro = distrometa['DISTRO'] + "/" + distrometa['DISTROVERS']
amanifest = anchoreDB.load_analyzer_manifest(imageId)
latest = 0;
if amanifest:
for a in amanifest.keys():
ts = amanifest[a]['timestamp']
if ts > latest:
latest = ts
if latest:
timestr = datetime.datetime.fromtimestamp(int(latest)).strftime('%m-%d-%Y %H:%M:%S')
else:
timestr = "Not Analyzed"
row = [repo, tag, printId, distro, timestr, str(round(float(size) / 1024.0 / 1024.0, 2)) + "M"]
result['multi']['result']['rows'].append(row)
#t.add_row(row)
hasData = True
except Exception as err:
raise err
anchore_utils.print_result(config, result)
except:
anchore_print_err("operation failed")
ecode = 1
sys.exit(ecode)
@toolbox.command(name='show')
def show():
"""Show image summary information"""
ecode = 0
try:
o = collections.OrderedDict()
inimage = imagelist[0]
anchoreDB = contexts['anchore_db']
image = anchoreDB.load_image(inimage)
if image:
mymeta = image['meta']
alltags_current = image['anchore_current_tags']
distrodict = anchore_utils.get_distro_from_imageId(inimage)
distro = distrodict['DISTRO']
distrovers = distrodict['DISTROVERS']
base = image['familytree'][0]
o['IMAGEID'] = mymeta.pop('imageId', "N/A")
o['REPOTAGS'] = alltags_current
o['DISTRO'] = distro
o['DISTROVERS'] = distrovers
o['HUMANNAME'] = mymeta.pop('humanname', "N/A")
o['SHORTID'] = mymeta.pop('shortId', "N/A")
o['PARENTID'] = mymeta.pop('parentId', "N/A")
o['BASEID'] = base
o['IMAGETYPE'] = mymeta.pop('usertype', "N/A")
for k in o.keys():
if type(o[k]) is list:
s = ' '.join(o[k])
else:
s = str(o[k])
print k+"='"+s+"'"
else:
raise Exception("cannot locate input image in anchore DB")
except Exception as err:
anchore_print_err("operation failed")
ecode = 1
contexts['anchore_allimages'].clear()
sys.exit(ecode)
def show_orig():
"""Show image summary information"""
if not nav:
sys.exit(1)
ecode = 0
try:
image=contexts['anchore_allimages'][imagelist[0]]
o = collections.OrderedDict()
mymeta = {}
mymeta.update(image.meta)
o['IMAGEID'] = mymeta.pop('imageId', "N/A")
o['REPOTAGS'] = image.get_alltags_current()
o['DISTRO'] = image.get_distro()
o['DISTROVERS'] = image.get_distro_vers()
o['HUMANNAME'] = mymeta.pop('humanname', "N/A")
o['SHORTID'] = mymeta.pop('shortId', "N/A")
o['PARENTID'] = mymeta.pop('parentId', "N/A")
o['BASEID'] = image.get_earliest_base()
o['IMAGETYPE'] = mymeta.pop('usertype', "N/A")
for k in o.keys():
if type(o[k]) is list:
s = ' '.join(o[k])
else:
s = str(o[k])
print k+"='"+s+"'"
except:
anchore_print_err("operation failed")
ecode = 1
contexts['anchore_allimages'].clear()
sys.exit(ecode)
|
|
"""The tests for Nest device triggers."""
from google_nest_sdm.device import Device
from google_nest_sdm.event import EventMessage
import pytest
import homeassistant.components.automation as automation
from homeassistant.components.device_automation.exceptions import (
InvalidDeviceAutomationConfig,
)
from homeassistant.components.nest import DOMAIN
from homeassistant.components.nest.events import NEST_EVENT
from homeassistant.setup import async_setup_component
from .common import async_setup_sdm_platform
from tests.common import (
assert_lists_same,
async_get_device_automations,
async_mock_service,
)
DEVICE_ID = "some-device-id"
DEVICE_NAME = "My Camera"
DATA_MESSAGE = {"message": "service-called"}
def make_camera(device_id, name=DEVICE_NAME, traits={}):
"""Create a nest camera."""
traits = traits.copy()
traits.update(
{
"sdm.devices.traits.Info": {
"customName": name,
},
"sdm.devices.traits.CameraLiveStream": {
"maxVideoResolution": {
"width": 640,
"height": 480,
},
"videoCodecs": ["H264"],
"audioCodecs": ["AAC"],
},
}
)
return Device.MakeDevice(
{
"name": device_id,
"type": "sdm.devices.types.CAMERA",
"traits": traits,
},
auth=None,
)
async def async_setup_camera(hass, devices=None):
"""Set up the platform and prerequisites for testing available triggers."""
if not devices:
devices = {DEVICE_ID: make_camera(device_id=DEVICE_ID)}
return await async_setup_sdm_platform(hass, "camera", devices)
async def setup_automation(hass, device_id, trigger_type):
"""Set up an automation trigger for testing triggering."""
return await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device_id,
"type": trigger_type,
},
"action": {
"service": "test.automation",
"data": DATA_MESSAGE,
},
},
]
},
)
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_get_triggers(hass):
"""Test we get the expected triggers from a nest."""
camera = make_camera(
device_id=DEVICE_ID,
traits={
"sdm.devices.traits.CameraMotion": {},
"sdm.devices.traits.CameraPerson": {},
},
)
await async_setup_camera(hass, {DEVICE_ID: camera})
device_registry = await hass.helpers.device_registry.async_get_registry()
device_entry = device_registry.async_get_device(
{("nest", DEVICE_ID)}, connections={}
)
expected_triggers = [
{
"platform": "device",
"domain": DOMAIN,
"type": "camera_motion",
"device_id": device_entry.id,
},
{
"platform": "device",
"domain": DOMAIN,
"type": "camera_person",
"device_id": device_entry.id,
},
]
triggers = await async_get_device_automations(hass, "trigger", device_entry.id)
assert_lists_same(triggers, expected_triggers)
async def test_multiple_devices(hass):
"""Test we get the expected triggers from a nest."""
camera1 = make_camera(
device_id="device-id-1",
name="Camera 1",
traits={
"sdm.devices.traits.CameraSound": {},
},
)
camera2 = make_camera(
device_id="device-id-2",
name="Camera 2",
traits={
"sdm.devices.traits.DoorbellChime": {},
},
)
await async_setup_camera(hass, {"device-id-1": camera1, "device-id-2": camera2})
registry = await hass.helpers.entity_registry.async_get_registry()
entry1 = registry.async_get("camera.camera_1")
assert entry1.unique_id == "device-id-1-camera"
entry2 = registry.async_get("camera.camera_2")
assert entry2.unique_id == "device-id-2-camera"
triggers = await async_get_device_automations(hass, "trigger", entry1.device_id)
assert len(triggers) == 1
assert triggers[0] == {
"platform": "device",
"domain": DOMAIN,
"type": "camera_sound",
"device_id": entry1.device_id,
}
triggers = await async_get_device_automations(hass, "trigger", entry2.device_id)
assert len(triggers) == 1
assert triggers[0] == {
"platform": "device",
"domain": DOMAIN,
"type": "doorbell_chime",
"device_id": entry2.device_id,
}
async def test_triggers_for_invalid_device_id(hass):
"""Get triggers for a device not found in the API."""
camera = make_camera(
device_id=DEVICE_ID,
traits={
"sdm.devices.traits.CameraMotion": {},
"sdm.devices.traits.CameraPerson": {},
},
)
await async_setup_camera(hass, {DEVICE_ID: camera})
device_registry = await hass.helpers.device_registry.async_get_registry()
device_entry = device_registry.async_get_device(
{("nest", DEVICE_ID)}, connections={}
)
assert device_entry is not None
# Create an additional device that does not exist. Fetching supported
# triggers for an unknown device will fail.
assert len(device_entry.config_entries) == 1
config_entry_id = next(iter(device_entry.config_entries))
device_entry_2 = device_registry.async_get_or_create(
config_entry_id=config_entry_id, identifiers={(DOMAIN, "some-unknown-nest-id")}
)
assert device_entry_2 is not None
with pytest.raises(InvalidDeviceAutomationConfig):
await async_get_device_automations(hass, "trigger", device_entry_2.id)
async def test_no_triggers(hass):
"""Test we get the expected triggers from a nest."""
camera = make_camera(device_id=DEVICE_ID, traits={})
await async_setup_camera(hass, {DEVICE_ID: camera})
registry = await hass.helpers.entity_registry.async_get_registry()
entry = registry.async_get("camera.my_camera")
assert entry.unique_id == "some-device-id-camera"
triggers = await async_get_device_automations(hass, "trigger", entry.device_id)
assert triggers == []
async def test_fires_on_camera_motion(hass, calls):
"""Test camera_motion triggers firing."""
assert await setup_automation(hass, DEVICE_ID, "camera_motion")
message = {"device_id": DEVICE_ID, "type": "camera_motion"}
hass.bus.async_fire(NEST_EVENT, message)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data == DATA_MESSAGE
async def test_fires_on_camera_person(hass, calls):
"""Test camera_person triggers firing."""
assert await setup_automation(hass, DEVICE_ID, "camera_person")
message = {"device_id": DEVICE_ID, "type": "camera_person"}
hass.bus.async_fire(NEST_EVENT, message)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data == DATA_MESSAGE
async def test_fires_on_camera_sound(hass, calls):
"""Test camera_person triggers firing."""
assert await setup_automation(hass, DEVICE_ID, "camera_sound")
message = {"device_id": DEVICE_ID, "type": "camera_sound"}
hass.bus.async_fire(NEST_EVENT, message)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data == DATA_MESSAGE
async def test_fires_on_doorbell_chime(hass, calls):
"""Test doorbell_chime triggers firing."""
assert await setup_automation(hass, DEVICE_ID, "doorbell_chime")
message = {"device_id": DEVICE_ID, "type": "doorbell_chime"}
hass.bus.async_fire(NEST_EVENT, message)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data == DATA_MESSAGE
async def test_trigger_for_wrong_device_id(hass, calls):
"""Test for turn_on and turn_off triggers firing."""
assert await setup_automation(hass, DEVICE_ID, "camera_motion")
message = {"device_id": "wrong-device-id", "type": "camera_motion"}
hass.bus.async_fire(NEST_EVENT, message)
await hass.async_block_till_done()
assert len(calls) == 0
async def test_trigger_for_wrong_event_type(hass, calls):
"""Test for turn_on and turn_off triggers firing."""
assert await setup_automation(hass, DEVICE_ID, "camera_motion")
message = {"device_id": DEVICE_ID, "type": "wrong-event-type"}
hass.bus.async_fire(NEST_EVENT, message)
await hass.async_block_till_done()
assert len(calls) == 0
async def test_subscriber_automation(hass, calls):
"""Test end to end subscriber triggers automation."""
camera = make_camera(
device_id=DEVICE_ID,
traits={
"sdm.devices.traits.CameraMotion": {},
},
)
subscriber = await async_setup_camera(hass, {DEVICE_ID: camera})
device_registry = await hass.helpers.device_registry.async_get_registry()
device_entry = device_registry.async_get_device(
{("nest", DEVICE_ID)}, connections={}
)
assert await setup_automation(hass, device_entry.id, "camera_motion")
# Simulate a pubsub message received by the subscriber with a motion event
event = EventMessage(
{
"eventId": "some-event-id",
"timestamp": "2019-01-01T00:00:01Z",
"resourceUpdate": {
"name": DEVICE_ID,
"events": {
"sdm.devices.events.CameraMotion.Motion": {
"eventSessionId": "CjY5Y3VKaTZwR3o4Y19YbTVfMF...",
"eventId": "FWWVQVUdGNUlTU2V4MGV2aTNXV...",
},
},
},
},
auth=None,
)
await subscriber.async_receive_event(event)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data == DATA_MESSAGE
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Django settings for askkit project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from django.utils.translation import ugettext_lazy as _
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
if 'SECRET_KEY' in os.environ:
SECRET_KEY = os.environ['SECRET_KEY']
else:
SECRET_KEY = 'mysecretkey'
# SECURITY WARNING: don't run with debug turned on in production!
if 'DJANGO_DEBUG' in os.environ:
DEBUG = True
TEMPLATE_DEBUG = True
else:
DEBUG = False
TEMPLATE_DEBUG = False
# SSL settings
if not DEBUG:
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
CSRF_COOKIE_SECURE = True
SECURE_SSL_REDIRECT = True
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
SECURE_HSTS_SECONDS = 31536000
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
SECURE_FRAME_DENY = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_BROWSER_XSS_FILTER = True
ADMINS = (('Sergio', 's.romerobarra@gmail.com'),)
ALLOWED_HOSTS = ['localhost', 'askkit-dev-env.elasticbeanstalk.com', 'askkit-prod-env.elasticbeanstalk.com', 'askkit.net', 'www.askkit.net',]
# Application definition
INSTALLED_APPS = (
#'admin_tools',
#'admin_tools.theming',
#'admin_tools.menu',
#'admin_tools.dashboard',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# The Django sites framework is required
'django.contrib.sites',
'core',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.twitter',
#'allauth.socialaccount.providers.facebook',
'users',
'questions',
#'debug_toolbar',
'crispy_forms',
#'rest_framework',
#'betterforms',
'datetimewidget',
'redactor',
'imagekit',
'captcha',
'django_ses',
'storages',
'admin_honeypot',
'compressor',
'djangosecure',
'sanitizer',
)
SITE_ID = 1
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'djangosecure.middleware.SecurityMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'core.middleware.LocaleMiddleware',
'htmlmin.middleware.HtmlMinifyMiddleware',
'htmlmin.middleware.MarkRequestMiddleware',
)
ROOT_URLCONF = 'askkit.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates',],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
# `allauth` needs this from django
'django.core.context_processors.request',
# `allauth` specific context processors
'allauth.account.context_processors.account',
'allauth.socialaccount.context_processors.socialaccount',
'core.context_processors.common_timezones',
'core.context_processors.debug',
'core.context_processors.get_adsense_user',
'core.context_processors.get_adsense_main',
'core.context_processors.get_adsense_yes',
'core.context_processors.get_analytics_id',
#'core.context_processors.current_timezone',
'django.template.context_processors.i18n',
],
},
},
]
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
'django.contrib.auth.backends.ModelBackend',
# `allauth` specific authentication methods, such as login by e-mail
'allauth.account.auth_backends.AuthenticationBackend',
)
WSGI_APPLICATION = 'askkit.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
if 'RDS_DB_NAME' in os.environ:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': os.environ['RDS_DB_NAME'],
'USER': os.environ['RDS_USERNAME'],
'PASSWORD': os.environ['RDS_PASSWORD'],
'HOST': os.environ['RDS_HOSTNAME'],
'PORT': os.environ['RDS_PORT'],
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'mydatabase',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en'
#LANGUAGE_COOKIE_NAME = 'askkit_language'
LANGUAGES = (
('en', _('English')),
#('es', _('Spanish')),
#('it', _('Italian')),
#('fr', _('French')),
#('de', _('German')),
)
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
LOCALE_PATHS = (
os.path.join(BASE_DIR, 'locale'),
)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "files", "static"),
)
STATIC_URL = '/static/'
#STATIC_ROOT = os.path.join(BASE_DIR, "static")
MEDIA_URL = "/media/"
MEDIA_ROOT = os.path.join(BASE_DIR, "files", "media")
############################################################################################
### COMPRESS CONFIG ########################################################################
############################################################################################
COMPRESS_STORAGE = 'custom_storages.StaticStorage'
COMPRESS_URL = STATIC_URL
COMPRESS_ROOT = os.path.join(BASE_DIR, "files")
############################################################################################
### AMAZON S3 STORAGES CONFIG ##############################################################
############################################################################################
### AWS4-HMAC-SHA256 ERROR WORKARROUND ###################################
os.environ['S3_USE_SIGV4'] = 'True'
if 'AWS_ACCESS_KEY_ID' in os.environ:
AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID']
AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY']
STATICFILES_STORAGE = 'custom_storages.StaticStorage'
DEFAULT_FILE_STORAGE = 'custom_storages.MediaStorage'
AWS_STORAGE_BUCKET_NAME = os.environ['AWS_STORAGE_BUCKET_NAME']
AWS_S3_HOST = 's3.eu-central-1.amazonaws.com'
S3_URL = 'https://%s.%s' % (AWS_STORAGE_BUCKET_NAME, AWS_S3_HOST)
MEDIA_URL = S3_URL + '/media/'
STATIC_URL = S3_URL + '/static/'
### django compress setting
COMPRESS_URL = S3_URL + '/'
############################################################################################
### django-allauth config ##################################################################
############################################################################################
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = "username_email"
LOGIN_REDIRECT_URL = '/'
ACCOUNT_LOGOUT_ON_GET = True
SOCIALACCOUNT_QUERY_EMAIL = True
SOCIALACCOUNT_PROVIDERS = {
'facebook': {
'SCOPE': ['email', ],
'METHOD': 'oauth2' # instead of 'js_sdk'
}
}
if 'AWS_SES_ACCESS_KEY_ID' in os.environ:
EMAIL_BACKEND = 'django_ses.SESBackend'
DEFAULT_FROM_EMAIL = os.environ['DEFAULT_FROM_EMAIL']
AWS_SES_ACCESS_KEY_ID = os.environ['AWS_SES_ACCESS_KEY_ID']
AWS_SES_SECRET_ACCESS_KEY = os.environ['AWS_SES_SECRET_ACCESS_KEY']
AWS_SES_REGION_NAME = 'eu-west-1'
AWS_SES_REGION_ENDPOINT = 'email.eu-west-1.amazonaws.com'
else:
EMAIL_BACKEND = 'django.core.mail.backends.filebased.EmailBackend'
EMAIL_FILE_PATH = BASE_DIR+'/faked-emails'
############################################################################################
### CRISPY FORMS CONFIG ####################################################################
############################################################################################
CRISPY_TEMPLATE_PACK = 'bootstrap3'
############################################################################################
### REST FRAMEWORK #########################################################################
############################################################################################
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
]
}
############################################################################################
### REDACTOR ##############################################################################
############################################################################################
#REDACTOR_UPLOAD_HANDLER = 'redactor.handlers.DateDirectoryUploader'
#REDACTOR_AUTH_DECORATOR = 'django.contrib.auth.decorators.login_required'
#REDACTOR_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
REDACTOR_OPTIONS = {'lang': 'en', 'django_lang': True,}
############################################################################################
### REDACTOR ##############################################################################
############################################################################################
MAX_REPLIES_REGISTERED = 4
############################################################################################
### HTML MINIFY ############################################################################
############################################################################################
if DEBUG:
HTML_MINIFY = False
else:
HTML_MINIFY = True
############################################################################################
### HTML MINIFY ############################################################################
############################################################################################
if 'RECAPTCHA_PUBLIC_KEY' in os.environ:
RECAPTCHA_PUBLIC_KEY = os.environ['RECAPTCHA_PUBLIC_KEY']
RECAPTCHA_PRIVATE_KEY = os.environ['RECAPTCHA_PRIVATE_KEY']
else:
RECAPTCHA_PUBLIC_KEY = ''
RECAPTCHA_PRIVATE_KEY = ''
NOCAPTCHA = True
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
############################################################################################
### ADSENSE SETTINGS #######################################################################
############################################################################################
if 'ADSENSE_YES' in os.environ:
ADSENSE_YES = True
ADSENSE_USER = os.environ['ADSENSE_USER']
ADSENSE_MAIN = os.environ['ADSENSE_MAIN']
else:
ADSENSE_YES = False
ADSENSE_USER = ''
ADSENSE_MAIN = ''
############################################################################################
### ANALYTICS SETTINGS #####################################################################
############################################################################################
G_ANALYTICS_ID = None
if 'G_ANALYTICS_ID' in os.environ:
G_ANALYTICS_ID = os.environ['G_ANALYTICS_ID']
|
|
# coding=utf-8
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: disable=invalid-name
"""Facilities to construct SDP Verification problem instances."""
import collections
import jax.numpy as jnp
from jax_verify.extensions.sdp_verify import utils
import numpy as np
################## SDP Verification Instances ####################
# Dual variables correspond to:
# lam: ReLU quadratic constraint: z^2 = z*(Wx)
# nu: IBP quadratic constraint: x^2 <= (l+u)*x - l*u
# nu_quad: IBP quadratic matrix constraint: (x_i - l_i)(x_j - u_j) <= 0
# muminus: x'>=0
# muminus2: Triangle linear Relu relaxation - u(Wx+b) - ul - (u-l)x' >= 0
# where l = min(l, 0), u = max(u, 0)
# muplus: x'>=Wx+b
DualVar = collections.namedtuple(
'DualVar', ['lam', 'nu', 'nu_quad', 'muminus', 'muplus', 'muminus2'])
DualVarFin = collections.namedtuple('DualVarFin', ['nu', 'nu_quad'])
DEFAULT_DISABLED_DUAL_VARS = ('nu_quad', 'muminus2')
NECESSARY_DUAL_VARS = ('lam', 'muplus', 'muminus')
def make_relu_network_lagrangian(dual_vars, params, bounds, obj):
"""Returns a function that computes the Lagrangian for a ReLU network.
This function assumes `params` represent a feedforward ReLU network i.e.
x_{i+1} = relu(W_i x_i + b_i). It defines the Lagrangian by applying the
objective `obj` to the final layer activations, and encoding the Lagrangian
terms for each of the constraints defining the ReLU network. It then returns
this function.
Args:
dual_vars: A length L+1 list of dual variables at each layer
params: A length L list of (W, b) pairs, elided network weights
bounds: A length L+1 list of `IntBound`s, elided bounds at each layer
obj: function, taking final layer activations as input
Returns:
Function that computes Lagrangian L(x) with fixed `dual_vars`.
"""
layer_sizes = utils.layer_sizes_from_bounds(bounds)
def lagrangian(xs_list):
"""Computes Lagrangian L(x) with fixed `dual_vars`."""
assert all([x.shape[0] == 1 for x in xs_list]), 'no batch mode support'
lag = obj(xs_list[-1])
for i in range(len(layer_sizes)):
if i < len(params):
y = utils.fwd(xs_list[i], params[i])
# Lagrangian for constraint x' * x' = x' * (Wx+b) where x'=ReLU(Wx+b)
lag += (jnp.sum(dual_vars[i].lam * xs_list[i + 1] *
(y - xs_list[i + 1])))
# Lagrangian for the constraint x'>=Wx+b
lag += jnp.sum(dual_vars[i].muplus * (xs_list[i + 1] - y))
if dual_vars[i].muminus2.shape:
# Lagrangian for u(Wx+b) - ul - (u-l)x' >= 0, where
# l = min(l, 0) and u = max(u, 0)
raise NotImplementedError('dropped support for muminus2')
# Lagrangian for the constraint x'>=0
lag += jnp.sum(dual_vars[i].muminus * xs_list[i + 1])
# Lagrangian for IBP constraint (x-l)(x-u) <= 0
if dual_vars[i].nu.shape:
lag += -jnp.sum(dual_vars[i].nu *
(xs_list[i] - bounds[i].lb) *(xs_list[i] - bounds[i].ub)
)
if dual_vars[i].nu_quad.shape:
# IBP quadratic matrix constraint: (x_i - l_i)(x_j - u_j) <= 0
lag += -jnp.sum(dual_vars[i].nu_quad *
jnp.matmul((xs_list[i]-bounds[i].lb).T,
xs_list[i]-bounds[i].ub))
return lag
return lagrangian
def relu_robustness_verif_instance_to_sdp(verif_instance):
"""Convert solver-agnostic VerifInstance to SdpDualVerifInstance."""
assert verif_instance.type in [
utils.VerifInstanceTypes.MLP_ELIDED, utils.VerifInstanceTypes.CNN_ELIDED]
elided_bounds = verif_instance.bounds[:-1]
dual_shapes, dual_types = get_dual_shapes_and_types(elided_bounds)
def obj(x_final):
out = jnp.sum(x_final * jnp.reshape(verif_instance.obj, x_final.shape))
return out + verif_instance.const
def make_inner_lagrangian(dual_vars):
return make_relu_network_lagrangian(
dual_vars, verif_instance.params, elided_bounds, obj)
return utils.SdpDualVerifInstance(
make_inner_lagrangian=make_inner_lagrangian,
bounds=elided_bounds,
dual_shapes=dual_shapes,
dual_types=dual_types)
def make_sdp_verif_instance(verif_instance):
if isinstance(verif_instance, utils._AdvRobustnessVerifInstance): # pylint: disable=protected-access
return relu_robustness_verif_instance_to_sdp(verif_instance)
else:
raise NotImplementedError('unrecognized verif_instance type')
def make_vae_sdp_verif_instance(params, data_x, bounds):
"""Make SdpDualVerifInstance for VAE reconstruction error spec."""
elided_params = params[:-1]
elided_bounds = bounds[:-1]
dual_shapes, dual_types = get_dual_shapes_and_types(elided_bounds)
def recon_loss(x_final):
x_hat = utils.predict_cnn(params[-1:], x_final).reshape(1, -1)
return jnp.sum(jnp.square(data_x.reshape(x_hat.shape) - x_hat))
def make_inner_lagrangian(dual_vars):
return make_relu_network_lagrangian(
dual_vars, elided_params, elided_bounds, recon_loss)
return utils.SdpDualVerifInstance(
make_inner_lagrangian=make_inner_lagrangian,
bounds=elided_bounds,
dual_shapes=dual_shapes,
dual_types=dual_types)
def make_vae_semantic_spec_params(x, vae_params, classifier_params):
"""Defines network f(z_noise) = classifier(reconstruct(x, z_noise))."""
# Setup - encoder fwd pass
encoder_params, decoder_params = vae_params
encoder_mu_params = encoder_params[:-1]
encoder_sigmasq_params = encoder_params[:-2] + [encoder_params[-1]]
mu_z = utils.predict_cnn(encoder_mu_params, x)
log_sigmasq_z = utils.predict_cnn(encoder_sigmasq_params, x)
sigmasq_z = jnp.exp(log_sigmasq_z)
# Combine the reparameterization with the first decoder layer
# z0 = mu + sigma * z
# z1 = jnp.dot(z0, W) + b
# = jnp.dot(mu + sigma * z, W) + b
# = jnp.dot(z, sigma * W) + [b + jnp.dot(mu, W)]
assert isinstance(decoder_params[0], tuple)
W0_orig, b0_orig = decoder_params[0]
W0 = W0_orig * jnp.reshape(jnp.sqrt(sigmasq_z), (-1, 1))
b0 = b0_orig + jnp.dot(mu_z, W0_orig)
# Now the network is just concatenation of modified decoder + classifier
# This is also applying a Relu to decoder output, but that's fine
combined_params = [(W0, b0)] + decoder_params[1:] + classifier_params
return combined_params
def get_dual_shapes_and_types(bounds_elided):
"""Get shapes and types of dual vars."""
dual_shapes = []
dual_types = []
layer_sizes = utils.layer_sizes_from_bounds(bounds_elided)
for it in range(len(layer_sizes)):
m = layer_sizes[it]
m = [m] if isinstance(m, int) else list(m)
if it < len(layer_sizes)-1:
n = layer_sizes[it + 1]
n = [n] if isinstance(n, int) else list(n)
shapes = {
'lam': [1] + n,
'nu': [1] + m,
'muminus': [1] + n,
'muplus': [1] + n,
'nu_quad': [], 'muminus2': [],
}
types = {
'lam': utils.DualVarTypes.EQUALITY,
'nu': utils.DualVarTypes.INEQUALITY,
'muminus': utils.DualVarTypes.INEQUALITY,
'muplus': utils.DualVarTypes.INEQUALITY,
'nu_quad': utils.DualVarTypes.INEQUALITY,
'muminus2': utils.DualVarTypes.INEQUALITY,
}
dual_shapes.append(DualVar(**{
k: np.array(s) for k, s in shapes.items()}))
dual_types.append(DualVar(**types))
else:
shapes = {'nu': [1] + m, 'nu_quad': []}
types = {'nu': utils.DualVarTypes.INEQUALITY,
'nu_quad': utils.DualVarTypes.INEQUALITY}
dual_shapes.append(DualVarFin(**{
k: np.array(s) for k, s in shapes.items()}))
dual_types.append(DualVarFin(**types))
# Add kappa
N = sum([np.prod(np.array(i)) for i in layer_sizes])
dual_shapes.append(np.array([1, N+1]))
dual_types.append(utils.DualVarTypes.INEQUALITY)
return dual_shapes, dual_types
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Volume v2 transfer action implementations"""
import logging
from cinderclient import api_versions
from osc_lib.command import command
from osc_lib import exceptions
from osc_lib import utils
from openstackclient.i18n import _
LOG = logging.getLogger(__name__)
class AcceptTransferRequest(command.ShowOne):
_description = _("Accept volume transfer request.")
def get_parser(self, prog_name):
parser = super(AcceptTransferRequest, self).get_parser(prog_name)
parser.add_argument(
'transfer_request',
metavar="<transfer-request-id>",
help=_('Volume transfer request to accept (ID only)'),
)
parser.add_argument(
'--auth-key',
metavar="<key>",
required=True,
help=_('Volume transfer request authentication key'),
)
return parser
def take_action(self, parsed_args):
volume_client = self.app.client_manager.volume
try:
transfer_request_id = utils.find_resource(
volume_client.transfers,
parsed_args.transfer_request
).id
except exceptions.CommandError:
# Non-admin users will fail to lookup name -> ID so we just
# move on and attempt with the user-supplied information
transfer_request_id = parsed_args.transfer_request
transfer_accept = volume_client.transfers.accept(
transfer_request_id,
parsed_args.auth_key,
)
transfer_accept._info.pop("links", None)
return zip(*sorted(transfer_accept._info.items()))
class CreateTransferRequest(command.ShowOne):
_description = _("Create volume transfer request.")
def get_parser(self, prog_name):
parser = super(CreateTransferRequest, self).get_parser(prog_name)
parser.add_argument(
'--name',
metavar="<name>",
help=_('New transfer request name (default to None)'),
)
parser.add_argument(
'--snapshots',
action='store_true',
dest='snapshots',
help=_(
'Allow transfer volumes without snapshots (default) '
'(supported by --os-volume-api-version 3.55 or later)'
),
default=None,
)
parser.add_argument(
'--no-snapshots',
action='store_false',
dest='snapshots',
help=_(
'Disallow transfer volumes without snapshots '
'(supported by --os-volume-api-version 3.55 or later)'
),
)
parser.add_argument(
'volume',
metavar="<volume>",
help=_('Volume to transfer (name or ID)'),
)
return parser
def take_action(self, parsed_args):
volume_client = self.app.client_manager.volume
kwargs = {}
if parsed_args.snapshots is not None:
if volume_client.api_version < api_versions.APIVersion('3.55'):
msg = _(
"--os-volume-api-version 3.55 or greater is required to "
"support the '--(no-)snapshots' option"
)
raise exceptions.CommandError(msg)
# unfortunately this option is negative so we have to reverse
# things
kwargs['no_snapshots'] = not parsed_args.snapshots
volume_id = utils.find_resource(
volume_client.volumes,
parsed_args.volume,
).id
volume_transfer_request = volume_client.transfers.create(
volume_id,
parsed_args.name,
**kwargs,
)
volume_transfer_request._info.pop("links", None)
return zip(*sorted(volume_transfer_request._info.items()))
class DeleteTransferRequest(command.Command):
_description = _("Delete volume transfer request(s).")
def get_parser(self, prog_name):
parser = super(DeleteTransferRequest, self).get_parser(prog_name)
parser.add_argument(
'transfer_request',
metavar="<transfer-request>",
nargs="+",
help=_('Volume transfer request(s) to delete (name or ID)'),
)
return parser
def take_action(self, parsed_args):
volume_client = self.app.client_manager.volume
result = 0
for t in parsed_args.transfer_request:
try:
transfer_request_id = utils.find_resource(
volume_client.transfers,
t,
).id
volume_client.transfers.delete(transfer_request_id)
except Exception as e:
result += 1
LOG.error(_("Failed to delete volume transfer request "
"with name or ID '%(transfer)s': %(e)s")
% {'transfer': t, 'e': e})
if result > 0:
total = len(parsed_args.transfer_request)
msg = (_("%(result)s of %(total)s volume transfer requests failed"
" to delete") % {'result': result, 'total': total})
raise exceptions.CommandError(msg)
class ListTransferRequest(command.Lister):
_description = _("Lists all volume transfer requests.")
def get_parser(self, prog_name):
parser = super(ListTransferRequest, self).get_parser(prog_name)
parser.add_argument(
'--all-projects',
dest='all_projects',
action="store_true",
default=False,
help=_('Include all projects (admin only)'),
)
return parser
def take_action(self, parsed_args):
columns = ['ID', 'Name', 'Volume ID']
column_headers = ['ID', 'Name', 'Volume']
volume_client = self.app.client_manager.volume
volume_transfer_result = volume_client.transfers.list(
detailed=True,
search_opts={'all_tenants': parsed_args.all_projects},
)
return (column_headers, (
utils.get_item_properties(s, columns)
for s in volume_transfer_result))
class ShowTransferRequest(command.ShowOne):
_description = _("Show volume transfer request details.")
def get_parser(self, prog_name):
parser = super(ShowTransferRequest, self).get_parser(prog_name)
parser.add_argument(
'transfer_request',
metavar="<transfer-request>",
help=_('Volume transfer request to display (name or ID)'),
)
return parser
def take_action(self, parsed_args):
volume_client = self.app.client_manager.volume
volume_transfer_request = utils.find_resource(
volume_client.transfers,
parsed_args.transfer_request,
)
volume_transfer_request._info.pop("links", None)
return zip(*sorted(volume_transfer_request._info.items()))
|
|
# ------------------------------------------------------------------------
#
# Copyright 2005-2015 WSO2, Inc. (http://wso2.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
#
# ------------------------------------------------------------------------
import subprocess
import socket
import os
from plugins.contracts import ICartridgeAgentPlugin
from modules.util.log import LogFactory
from entity import *
from config import Config
class WSO2StartupHandler(ICartridgeAgentPlugin):
"""
Configures and starts configurator, carbon server
"""
log = LogFactory().get_log(__name__)
# class constants
CONST_PORT_MAPPINGS = "PORT_MAPPINGS"
CONST_APPLICATION_ID = "APPLICATION_ID"
CONST_MB_IP = "MB_IP"
CONST_SERVICE_NAME = "SERVICE_NAME"
CONST_CLUSTER_ID = "CLUSTER_ID"
CONST_WORKER = "worker"
CONST_MANAGER = "manager"
CONST_MGT = "mgt"
CONST_PORT_MAPPING_MGT_HTTP_TRANSPORT = "mgt-http"
CONST_PORT_MAPPING_MGT_HTTPS_TRANSPORT = "mgt-https"
CONST_PROTOCOL_HTTP = "http"
CONST_PROTOCOL_HTTPS = "https"
CONST_PPAAS_MEMBERSHIP_SCHEME = "private-paas"
CONST_PRODUCT = "AS"
SERVICES = ["wso2as-530-manager", "wso2as-530-worker"]
# list of environment variables exported by the plugin
ENV_CONFIG_PARAM_SUB_DOMAIN = 'CONFIG_PARAM_SUB_DOMAIN'
ENV_CONFIG_PARAM_MB_HOST = 'CONFIG_PARAM_MB_HOST'
ENV_CONFIG_PARAM_CLUSTER_IDs = 'CONFIG_PARAM_CLUSTER_IDs'
ENV_CONFIG_PARAM_HTTP_PROXY_PORT = 'CONFIG_PARAM_HTTP_PROXY_PORT'
ENV_CONFIG_PARAM_HTTPS_PROXY_PORT = 'CONFIG_PARAM_HTTPS_PROXY_PORT'
ENV_CONFIG_PARAM_HOST_NAME = 'CONFIG_PARAM_HOST_NAME'
ENV_CONFIG_PARAM_MGT_HOST_NAME = 'CONFIG_PARAM_MGT_HOST_NAME'
ENV_CONFIG_PARAM_LOCAL_MEMBER_HOST = 'CONFIG_PARAM_LOCAL_MEMBER_HOST'
# clustering related environment variables read from payload_parameters
ENV_CONFIG_PARAM_CLUSTERING = 'CONFIG_PARAM_CLUSTERING'
ENV_CONFIG_PARAM_MEMBERSHIP_SCHEME = 'CONFIG_PARAM_MEMBERSHIP_SCHEME'
def run_plugin(self, values):
# read from 'values'
port_mappings_str = values[self.CONST_PORT_MAPPINGS].replace("'", "")
app_id = values[self.CONST_APPLICATION_ID]
mb_ip = values[self.CONST_MB_IP]
service_type = values[self.CONST_SERVICE_NAME]
my_cluster_id = values[self.CONST_CLUSTER_ID]
clustering = values.get(self.ENV_CONFIG_PARAM_CLUSTERING, 'false')
membership_scheme = values.get(self.ENV_CONFIG_PARAM_MEMBERSHIP_SCHEME)
# read topology from PCA TopologyContext
topology = TopologyContext.topology
# log above values
WSO2StartupHandler.log.info("Port Mappings: %s" % port_mappings_str)
WSO2StartupHandler.log.info("Application ID: %s" % app_id)
WSO2StartupHandler.log.info("MB IP: %s" % mb_ip)
WSO2StartupHandler.log.info("Service Name: %s" % service_type)
WSO2StartupHandler.log.info("Cluster ID: %s" % my_cluster_id)
WSO2StartupHandler.log.info("Clustering: %s" % clustering)
WSO2StartupHandler.log.info("Membership Scheme: %s" % membership_scheme)
# export Proxy Ports as Env. variables - used in catalina-server.xml
mgt_http_proxy_port = self.read_proxy_port(port_mappings_str, self.CONST_PORT_MAPPING_MGT_HTTP_TRANSPORT,
self.CONST_PROTOCOL_HTTP)
mgt_https_proxy_port = self.read_proxy_port(port_mappings_str, self.CONST_PORT_MAPPING_MGT_HTTPS_TRANSPORT,
self.CONST_PROTOCOL_HTTPS)
self.export_env_var(self.ENV_CONFIG_PARAM_HTTP_PROXY_PORT, mgt_http_proxy_port)
self.export_env_var(self.ENV_CONFIG_PARAM_HTTPS_PROXY_PORT, mgt_https_proxy_port)
# set sub-domain
sub_domain = None
if service_type.endswith(self.CONST_MANAGER):
sub_domain = self.CONST_MGT
elif service_type.endswith(self.CONST_WORKER):
sub_domain = self.CONST_WORKER
self.export_env_var(self.ENV_CONFIG_PARAM_SUB_DOMAIN, sub_domain)
# if CONFIG_PARAM_MEMBERSHIP_SCHEME is not set, set the private-paas membership scheme as default one
if clustering == 'true' and membership_scheme is None:
membership_scheme = self.CONST_PPAAS_MEMBERSHIP_SCHEME
self.export_env_var(self.ENV_CONFIG_PARAM_MEMBERSHIP_SCHEME, membership_scheme)
# check if clustering is enabled
if clustering == 'true':
# set hostnames
self.export_host_names(topology, app_id)
# check if membership scheme is set to 'private-paas'
if membership_scheme == self.CONST_PPAAS_MEMBERSHIP_SCHEME:
# export Cluster_Ids as Env. variables - used in axis2.xml
self.export_cluster_ids(topology, app_id, service_type, my_cluster_id)
# export mb_ip as Env.variable - used in jndi.properties
self.export_env_var(self.ENV_CONFIG_PARAM_MB_HOST, mb_ip)
# set instance private ip as CONFIG_PARAM_LOCAL_MEMBER_HOST
private_ip = self.get_member_private_ip(topology, Config.service_name, Config.cluster_id, Config.member_id)
self.export_env_var(self.ENV_CONFIG_PARAM_LOCAL_MEMBER_HOST, private_ip)
# start configurator
WSO2StartupHandler.log.info("Configuring WSO2 %s..." % self.CONST_PRODUCT)
config_command = "python ${CONFIGURATOR_HOME}/configurator.py"
env_var = os.environ.copy()
p = subprocess.Popen(config_command, env=env_var, shell=True)
output, errors = p.communicate()
WSO2StartupHandler.log.info("WSO2 %s configured successfully" % self.CONST_PRODUCT)
# start server
WSO2StartupHandler.log.info("Starting WSO2 %s ..." % self.CONST_PRODUCT)
if service_type.endswith(self.CONST_WORKER):
start_command = "exec ${CARBON_HOME}/bin/wso2server.sh -DworkerNode=true start"
else:
start_command = "exec ${CARBON_HOME}/bin/wso2server.sh -Dsetup start"
env_var = os.environ.copy()
p = subprocess.Popen(start_command, env=env_var, shell=True)
output, errors = p.communicate()
WSO2StartupHandler.log.info("WSO2 %s started successfully" % self.CONST_PRODUCT)
def get_member_private_ip(self, topology, service_name, cluster_id, member_id):
service = topology.get_service(service_name)
if service is None:
raise Exception("Service not found in topology [service] %s" % service_name)
cluster = service.get_cluster(cluster_id)
if cluster is None:
raise Exception("Cluster id not found in topology [cluster] %s" % cluster_id)
member = cluster.get_member(member_id)
if member is None:
raise Exception("Member id not found in topology [member] %s" % member_id)
if member.member_default_private_ip and not member.member_default_private_ip.isspace():
WSO2StartupHandler.log.info(
"Member private ip read from the topology: %s" % member.member_default_private_ip)
return member.member_default_private_ip
else:
local_ip = socket.gethostbyname(socket.gethostname())
WSO2StartupHandler.log.info(
"Member private ip not found in the topology. Reading from the socket interface: %s" % local_ip)
return local_ip
def export_host_names(self, topology, app_id):
"""
Set hostnames of services read from topology for worker manager instances
exports MgtHostName and HostName
:return: void
"""
mgt_host_name = None
host_name = None
for service_name in self.SERVICES:
if service_name.endswith(self.CONST_MANAGER):
mgr_cluster = self.get_cluster_of_service(topology, service_name, app_id)
if mgr_cluster is not None:
mgt_host_name = mgr_cluster.hostnames[0]
elif service_name.endswith(self.CONST_WORKER):
worker_cluster = self.get_cluster_of_service(topology, service_name, app_id)
if worker_cluster is not None:
host_name = worker_cluster.hostnames[0]
self.export_env_var(self.ENV_CONFIG_PARAM_MGT_HOST_NAME, mgt_host_name)
self.export_env_var(self.ENV_CONFIG_PARAM_HOST_NAME, host_name)
def export_cluster_ids(self, topology, app_id, service_type, my_cluster_id):
"""
Set clusterIds of services read from topology for worker manager instances
else use own clusterId
:return: void
"""
cluster_ids = []
cluster_id_of_service = None
if service_type.endswith(self.CONST_MANAGER) or service_type.endswith(self.CONST_WORKER):
for service_name in self.SERVICES:
cluster_of_service = self.get_cluster_of_service(topology, service_name, app_id)
if cluster_of_service is not None:
cluster_id_of_service = cluster_of_service.cluster_id
if cluster_id_of_service is not None:
cluster_ids.append(cluster_id_of_service)
else:
cluster_ids.append(my_cluster_id)
# If clusterIds are available, export them as environment variables
if cluster_ids:
cluster_ids_string = ",".join(cluster_ids)
self.export_env_var(self.ENV_CONFIG_PARAM_CLUSTER_IDs, cluster_ids_string)
@staticmethod
def get_cluster_of_service(topology, service_name, app_id):
cluster_obj = None
clusters = None
if topology is not None:
if topology.service_exists(service_name):
service = topology.get_service(service_name)
if service is not None:
clusters = service.get_clusters()
else:
WSO2StartupHandler.log.warn("[Service] %s is None" % service_name)
else:
WSO2StartupHandler.log.warn("[Service] %s is not available in topology" % service_name)
else:
WSO2StartupHandler.log.warn("Topology is empty.")
if clusters is not None:
for cluster in clusters:
if cluster.app_id == app_id:
cluster_obj = cluster
return cluster_obj
@staticmethod
def read_proxy_port(port_mappings_str, port_mapping_name, port_mapping_protocol):
"""
returns proxy port of the requested port mapping
:return: void
"""
# port mappings format: NAME:mgt-http|PROTOCOL:http|PORT:30001|PROXY_PORT:0|TYPE:NodePort;
# NAME:mgt-https|PROTOCOL:https|PORT:30002|PROXY_PORT:0|TYPE:NodePort;
# NAME:pt-http|PROTOCOL:http|PORT:30003|PROXY_PORT:7280|TYPE:ClientIP;
# NAME:pt-https|PROTOCOL:https|PORT:30004|PROXY_PORT:7243|TYPE:NodePort
if port_mappings_str is not None:
port_mappings_array = port_mappings_str.split(";")
if port_mappings_array:
for port_mapping in port_mappings_array:
# WSO2StartupHandler.log.debug("port_mapping: %s" % port_mapping)
name_value_array = port_mapping.split("|")
name = name_value_array[0].split(":")[1]
protocol = name_value_array[1].split(":")[1]
proxy_port = name_value_array[3].split(":")[1]
# If PROXY_PORT is not set, set PORT as the proxy port (ex:Kubernetes),
if proxy_port == '0':
proxy_port = name_value_array[2].split(":")[1]
if name == port_mapping_name and protocol == port_mapping_protocol:
return proxy_port
@staticmethod
def export_env_var(variable, value):
"""
exports key value pairs as env. variables
:return: void
"""
if value is not None:
os.environ[variable] = value
WSO2StartupHandler.log.info("Exported environment variable %s: %s" % (variable, value))
else:
WSO2StartupHandler.log.warn("Could not export environment variable %s " % variable)
|
|
#
# (c) Christian Sommerfeldt OEien
# All rights reserved
from math import floor, pi, cos, sin
from rmg.math_ import rnd, matrix_multiply
class Color:
def __init__(self, r, g, b):
self.r = r
self.g = g
self.b = b
def __mul__(self, o):
if isinstance(o, Color):
return Color(self.r * o.r, self.g * o.g, self.b * o.b)
else:
return Color(self.r * o, self.g * o, self.b * o)
def __pow__(self, e):
return Color(self.r ** e, self.g ** e, self.b ** e)
def __add__(self, o):
return Color(self.r + o.r, self.g + o.g, self.b + o.b)
def __sub__(self, o):
return self + o*-1
def mix(self, o, f = 0.5):
return self * (1 - f) + o * f
def cap(self):
if self.r > 1: self.r = 1
elif self.r < 0: self.r = 0
if self.g > 1: self.g = 1
elif self.g < 0: self.g = 0
if self.b > 1: self.b = 1
elif self.b < 0: self.b = 0
@staticmethod
def gray(v):
return Color(v, v, v)
@staticmethod
def random(saturation = None, value = None):
if saturation is None and value is None:
saturation = value = 1
elif saturation is None:
saturation = rnd(1 - value, 1)
elif value is None:
value = rnd(1 - saturation, 1)
return Color.from_hsv(rnd(0, 2 * pi), saturation, value)
@staticmethod
def random_gray():
return Color.gray(rnd(0, 1))
def rgb(self):
return (self.r, self.g, self.b)
@staticmethod
def from_binary_rgb(data):
return Color(
data[0] / 255.0,
data[1] / 255.0,
data[2] / 255.0) ** 2.2 # note: gamma
def binary_rgb(self):
y = self ** (1 / 2.2) # note: gamma
return bytes([
int(round(y.r * 255)),
int(round(y.g * 255)),
int(round(y.b * 255))])
@staticmethod
def from_int_rgb(i):
return Color(
((i ) & 1023) / 1023.0,
((i >> 10) & 1023) / 1023.0,
((i >> 20) & 1023) / 1023.0)
def int_rgb(self):
return (int(round(self.r * 1023)) ) \
+ (int(round(self.g * 1023)) << 10) \
+ (int(round(self.b * 1023)) << 20)
def hsv(self):
r, g, b = self.rgb()
min_ = min(r, g, b)
max_ = max(r, g, b)
v = max_
delta = max_ - min_
if delta == 0: return (None, 0, v)
s = delta / max_
if r == max_:
h = (g - b) / delta
if h < 0: h += 6
elif g == max_: h = 2 + (b - r) / delta
else: h = 4 + (r - g) / delta
assert h >= 0
assert h <= 6
if h == 6:
h = 0
h *= pi / 3
return (h, s, v)
@staticmethod
def from_hsv(h, s = 1, v = 1):
if s == 0:
return Color.gray(v)
h %= 2 * pi
h *= 3 / pi
i = floor(h)
f = h - i
p = v * (1 - s)
q = v * (1 - s * f)
t = v * (1 - s * (1 - f))
if i == 0: rgb = (v, t, p)
elif i == 1: rgb = (q, v, p)
elif i == 2: rgb = (p, v, t)
elif i == 3: rgb = (p, q, v)
elif i == 4: rgb = (t, p, v)
elif i == 5: rgb = (v, p, q)
return Color(*rgb)
def intensity(self):
return .299 * self.r + .587 * self.g + .114 * self.b
def __str__(self):
return "%LG %LG %LG" % (self.r, self.g, self.b)
black = opaque = Color(0, 0, 0)
white = transparent = Color(1, 1, 1)
red = Color(1, 0, 0)
green = Color(0, 1, 0)
blue = Color(0, 0, 1)
class InkColor:
def __init__(self, c, m, y):
self.c = c
self.m = m
self.y = y
def cmy(self):
return (self.c, self.m, self.y)
def cap(self):
if self.c > 1: self.c = 1
elif self.c < 0: self.c = 0
if self.m > 1: self.m = 1
elif self.m < 0: self.m = 0
if self.y > 1: self.y = 1
elif self.y < 0: self.y = 0
@staticmethod
def gray(v):
k = 1 - v
return InkColor(k, k, k)
def hsv(self):
c, m, y = self.cmy()
k = min(c, m, y)
w = max(c, m, y)
v = 1 - k
if w == k:
return (None, 0, v)
if k > 0:
c = (c - k) / v
m = (m - k) / v
y = (y - k) / v
s = max(c, m, y)
if y == s: h = 1 + (c - m) / s
elif c == s: h = 3 + (m - y) / s
else: h = 5 + (y - c) / s
assert h >= 0
assert h <= 6
if h == 6:
h = 0
h *= pi / 3
return (h, s, v)
@staticmethod
def from_hsv(h, s = 1, v = 1):
if s == 0:
return InkColor.gray(v)
h %= 2 * pi
h *= 3 / pi
i = floor(h)
f = h - i
t = s * f
u = s * (1 - f)
k = 1 - v
u += (1 - u) * k
s += (1 - s) * k
t += (1 - t) * k
if i == 0: c, m, y = k, u, s
elif i == 1: c, m, y = t, k, s
elif i == 2: c, m, y = s, k, u
elif i == 3: c, m, y = s, t, k
elif i == 4: c, m, y = u, s, k
elif i == 5: c, m, y = k, s, t
return InkColor(c, m, y)
def __mul__(a, b):
if isinstance(b, InkColor):
return InkColor(a.c*b.c, a.m*b.m, a.y*b.y)
else:
return InkColor(a.c*b, a.m*b, a.y*b)
def __add__(a, b):
return InkColor(a.c+b.c, a.m+b.m, a.y+b.y)
def __sub__(a, b):
return a + b * -1
white_ink = InkColor(0, 0, 0)
black_ink = InkColor(1, 1, 1)
cyan_ink = InkColor(1, 0, 0)
magenta_ink = InkColor(0, 1, 0)
yellow_ink = InkColor(0, 0, 1)
class TvColor: # YUV for HDTV
def __init__(self, y, u, v):
self.y = y # [ 0,1]
self.u = u # [-1,1]
self.v = v # [-1,1]
def yuv(self):
return (self.y, self.u, self.v)
@staticmethod
def from_rgb(r, g, b):
return TvColor(*matrix_multiply([
[0.2126, 0.7152, 0.0722],
[-0.09991, -0.33609, 0.436],
[0.615, -0.55861, -0.05639]], [r, g, b]))
def __add__(self, other):
return TvColor(self.y, self.u, self.v)
def rgb(self):
return matrix_multiply([
[1, 0, 1.28033],
[1, -0.21482, -0.38059],
[1, 2.12798, 0]], self.yuv())
class Optics:
def __init__(self, reflection, absorption, index = -1,
refraction = black, passthrough = black):
self.reflection_filter = reflection
self.absorption_filter = absorption
self.refraction_index = index
self.refraction_filter = refraction
self.passthrough_filter = passthrough
def __str__(self):
return "optics %s %s\n%LG %s %s" % (
self.reflection_filter,
self.absorption_filter,
self.refraction_index,
self.refraction_filter,
self.passthrough_filter)
|
|
#Paranaues importantes
import os
import time
#comentario bug
#Variaveis globais
empate = 0
Novo_Jogo = 0
x = 0
end_game = 0
l1 = ' | | '
l2 = ' t | i | c '
l3 = '_____|_____|_____'
l4 = ' | | '
l5 = ' t | a | c '
l6 = '_____|_____|_____'
l7 = ' | | '
l8 = ' t | o | e '
l9 = ' | | '
L1 = [x for x in l1]
L2 = [x for x in l2]
L3 = [x for x in l3]
L4 = [x for x in l4]
L5 = [x for x in l5]
L6 = [x for x in l6]
L7 = [x for x in l7]
L8 = [x for x in l8]
L9 = [x for x in l9]
T=[L1,L2,L3,L4,L5,L6,L7,L8,L9]
# Fun__es:
#fun__o para converter cada linha em string, antes de printar
def tabuleiro (T):
os.system('clear')
for linha in T:
lt = ''
for esp in linha:
lt = lt + str(esp)
print lt
#fun__o para testar vit_ria/empate
def verifica (T):
global empate
global end_game
empate += 1
#horizontal, mant_m linha, confere coluna
vic = [True,True,True]
a = ['']*3
count = 0
for l in [1,4,7]:
for c in [2,8,14]:
if count <= 3:
a[count] = T[l][c]
count += 1
if map(lambda z: z== 'O', a) == vic:
print 'Jogador O ganhou!'
count = 0
a = ['']*3
empate = 0
end_game = 1
elif map(lambda z: z== 'X', a) == vic:
print 'Jogador X ganhou!'
count = 0
a = ['']*3
empate = 0
end_game = 1
elif count == 3:
count = 0
a = ['']*3
#vertical, mant_m coluna, confere linha
b = ['']*3
count2 = 0
for c in [2,8,14]:
for l in [1,4,7]:
if count2 <= 3:
b[count2] = T[l][c]
count2 += 1
if map(lambda z: z== 'O', b) == vic:
print 'Jogador O ganhou!'
count2 = 0
b = ['']*3
empate = 0
end_game = 1
elif map(lambda z: z== 'X', b) == vic:
print 'Jogador X ganhou!'
count2 = 0
b = ['']*3
empate = 0
end_game = 1
elif count2 == 3:
count2 = 0
b = ['']*3
#Diagonal principal, linha e coluna tem o mesmo valor no jogo, mas n_o no programa.
e = ['']*3
count3 = 0
for lc in [[1,2],[4,8],[7,14]]:
if count3 <= 3:
e[count3] = T[lc[0]][lc[1]]
count3 += 1
if map(lambda z: z== 'O', e) == vic:
print 'Jogador O ganhou!'
count3 = 0
e = ['']*3
empate = 0
end_game = 1
elif map(lambda z: z== 'X', e) == vic:
print 'Jogador X ganhou!'
count3 = 0
e = ['']*3
empate = 0
end_game = 1
elif count3 == 3:
count3 = 0
e = ['']*3
#Diagonal secund_ria, linha e coluna n_o tem o mesmo valor no jogo, nem no programa.
f = ['']*3
count4 = 0
for lc in [[1,14],[4,8],[7,2]]:
if count4 <= 3:
f[count4] = T[lc[0]][lc[1]]
count4 += 1
if map(lambda z: z== 'O', f) == vic:
print 'Jogador O ganhou!'
count4 = 0
f = ['']*3
empate = 0
end_game = 1
elif map(lambda z: z== 'X', f) == vic:
print 'Jogador X ganhou!'
count4 = 0
f = ['']*3
empate = 0
end_game = 1
elif count4 == 3:
count4 = 0
f = ['']*3
#EMPATE, todas as casas preenchidas sem vit_ria
if empate == 9:
print 'JOGO EMPATADO'
end_game = 1
def jogadas(xis):
global x
jogador = ['BOLA','XIS']
M=[[2,8,14],[1,4,7]]
s = ['Linha','Coluna']
ajuste = []
free_position = 0
print 'Vez de %s \n' %(jogador[x])
while free_position == 0:
for a in range(0,2):
w = 0
while w == 0:
while True:
try:
input_lc = int(raw_input('Digite o numero da %s _1_2_3\n' %(s[a].lower())))
break
except ValueError:
print 'Escolha invalida, digite um numero de 1 a 3'
time.sleep(1)
tabuleiro(T)
if input_lc == 1:
teste = 1
if teste == 1: #teste de linha/coluna cheia
d=[' ']*3
d1 = 0
for teste2 in M[a]:
if a == 0:
d[d1] = T[1][teste2]
d1 += 1
elif a==1:
d[d1] = T[teste2][2]
d1 += 1
if map(lambda a: a != ' ', d) == [True, True, True]:
print '%s cheia, digite novamente\n' %(s[a])
time.sleep(2)
tabuleiro(T)
teste = 0
else:
if a == 0:
l = 1
w = 1
elif a == 1:
c = 2
w = 1
elif input_lc == 2:
teste = 1
if teste == 1: #teste de linha/coluna cheia
d=[' ']*3
d1 = 0
for teste2 in M[a]:
if a == 0:
d[d1] = T[4][teste2]
d1 += 1
elif a==1:
d[d1] = T[teste2][8]
d1 += 1
if map(lambda a: a != ' ', d) == [True, True, True]:
print '%s cheia, digite novamente\n' %(s[a])
time.sleep(2)
tabuleiro(T)
teste = 0
else:
if a == 0:
l = 4
w = 1
elif a == 1:
c = 8
w = 1
elif input_lc == 3:
teste = 1
if teste == 1: #teste de linha/coluna cheia
d=[' ']*3
d1 = 0
for teste2 in M[a]:
if a == 0:
d[d1] = T[7][teste2]
d1 += 1
elif a==1:
d[d1] = T[teste2][14]
d1 += 1
if map(lambda a: a != ' ', d) == [True, True, True]:
print '%s cheia, digite novamente\n' %(s[a])
time.sleep(2)
tabuleiro(T)
teste = 0
else:
if a == 0:
l = 7
w = 1
elif a == 1:
c = 14
w = 1
else:
print 'Jogada Invalida'
w = 0
if T[l][c] == ' ':
if x == 0:
T[l][c] = 'O'
tabuleiro(T)
free_position = 1
x = 1
elif x == 1:
T[l][c] = 'X'
tabuleiro(T)
free_position = 1
x = 0
else:
print 'Local ocupado'
time.sleep(2)
tabuleiro(T)
free_position = 0
#Inicio de jogo:
#Apresenta_o
inicio = 0
sp = ['',' ']*5
while inicio <= 9:
print '%sBem vindo ao tic tac toe' %(sp[inicio])
inicio += 1
time.sleep(.15)
os.system('clear')
tabuleiro(T)
sacanagem =1
while sacanagem == 1:
global x
print 'Deseja iniciar um novo jogo?\n'
NG = str(raw_input('S/N\n')).lower()
if NG == 's':
for limpa in [1,4,7]:
for limpa2 in [2,8,14]:
T[limpa][limpa2] = ' '
os.system('clear')
tabuleiro(T)
sacanagem2 = 1
while sacanagem2 == 1:
player = str(raw_input('Qual jogador vai iniciar? X ou O\n')).lower()
if player == 'x':
x = 1
sacanagem2 = 0
end_game = 0
elif player == 'o':
x = 0
sacanagem2 = 0
end_game = 0
else:
sacanagem2=1
print'Escolha invAlida. X ou O\n'
tabuleiro(T)
elif NG == 'n':
print 'Good Bye!!'
end_game = 1
sacanagem = 0
else:
print 'Escolha invAlida.\n'
sacanagem ==1
tabuleiro(T)
while end_game == 0:
jogadas(x)
verifica(T)
time.sleep(1)
sacanagem = 1
|
|
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import xml.sax
import threading
import boto
from boto import handler
from boto.connection import AWSQueryConnection
from boto.sdb.domain import Domain, DomainMetaData
from boto.sdb.item import Item
from boto.sdb.regioninfo import SDBRegionInfo
from boto.exception import SDBResponseError
class ItemThread(threading.Thread):
"""
A threaded :class:`Item <boto.sdb.item.Item>` retriever utility class.
Retrieved :class:`Item <boto.sdb.item.Item>` objects are stored in the
``items`` instance variable after :py:meth:`run() <run>` is called.
.. tip:: The item retrieval will not start until
the :func:`run() <boto.sdb.connection.ItemThread.run>` method is called.
"""
def __init__(self, name, domain_name, item_names):
"""
:param str name: A thread name. Used for identification.
:param str domain_name: The name of a SimpleDB
:class:`Domain <boto.sdb.domain.Domain>`
:type item_names: string or list of strings
:param item_names: The name(s) of the items to retrieve from the specified
:class:`Domain <boto.sdb.domain.Domain>`.
:ivar list items: A list of items retrieved. Starts as empty list.
"""
super(ItemThread, self).__init__(name=name)
#print 'starting %s with %d items' % (name, len(item_names))
self.domain_name = domain_name
self.conn = SDBConnection()
self.item_names = item_names
self.items = []
def run(self):
"""
Start the threaded retrieval of items. Populates the
``items`` list with :class:`Item <boto.sdb.item.Item>` objects.
"""
for item_name in self.item_names:
item = self.conn.get_attributes(self.domain_name, item_name)
self.items.append(item)
#boto.set_stream_logger('sdb')
class SDBConnection(AWSQueryConnection):
"""
This class serves as a gateway to your SimpleDB region (defaults to
us-east-1). Methods within allow access to SimpleDB
:class:`Domain <boto.sdb.domain.Domain>` objects and their associated
:class:`Item <boto.sdb.item.Item>` objects.
.. tip::
While you may instantiate this class directly, it may be easier to
go through :py:func:`boto.connect_sdb`.
"""
DefaultRegionName = 'us-east-1'
DefaultRegionEndpoint = 'sdb.us-east-1.amazonaws.com'
APIVersion = '2009-04-15'
ResponseError = SDBResponseError
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/',
converter=None, security_token=None, validate_certs=True):
"""
For any keywords that aren't documented, refer to the parent class,
:py:class:`boto.connection.AWSAuthConnection`. You can avoid having
to worry about these keyword arguments by instantiating these objects
via :py:func:`boto.connect_sdb`.
:type region: :class:`boto.sdb.regioninfo.SDBRegionInfo`
:keyword region: Explicitly specify a region. Defaults to ``us-east-1``
if not specified. You may also specify the region in your ``boto.cfg``:
.. code-block:: cfg
[SDB]
region = eu-west-1
"""
if not region:
region_name = boto.config.get('SDB', 'region', self.DefaultRegionName)
for reg in boto.sdb.regions():
if reg.name == region_name:
region = reg
break
self.region = region
super(SDBConnection, self).__init__(aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy,
proxy_port, proxy_user, proxy_pass,
self.region.endpoint, debug,
https_connection_factory, path,
security_token=security_token,
validate_certs=validate_certs)
self.box_usage = 0.0
self.converter = converter
self.item_cls = Item
def _required_auth_capability(self):
return ['sdb']
def set_item_cls(self, cls):
"""
While the default item class is :py:class:`boto.sdb.item.Item`, this
default may be overridden. Use this method to change a connection's
item class.
:param object cls: The new class to set as this connection's item
class. See the default item class for inspiration as to what your
replacement should/could look like.
"""
self.item_cls = cls
def _build_name_value_list(self, params, attributes, replace=False,
label='Attribute'):
keys = sorted(attributes.keys())
i = 1
for key in keys:
value = attributes[key]
if isinstance(value, list):
for v in value:
params['%s.%d.Name' % (label, i)] = key
if self.converter:
v = self.converter.encode(v)
params['%s.%d.Value' % (label, i)] = v
if replace:
params['%s.%d.Replace' % (label, i)] = 'true'
i += 1
else:
params['%s.%d.Name' % (label, i)] = key
if self.converter:
value = self.converter.encode(value)
params['%s.%d.Value' % (label, i)] = value
if replace:
params['%s.%d.Replace' % (label, i)] = 'true'
i += 1
def _build_expected_value(self, params, expected_value):
params['Expected.1.Name'] = expected_value[0]
if expected_value[1] is True:
params['Expected.1.Exists'] = 'true'
elif expected_value[1] is False:
params['Expected.1.Exists'] = 'false'
else:
params['Expected.1.Value'] = expected_value[1]
def _build_batch_list(self, params, items, replace=False):
item_names = items.keys()
i = 0
for item_name in item_names:
params['Item.%d.ItemName' % i] = item_name
j = 0
item = items[item_name]
if item is not None:
attr_names = item.keys()
for attr_name in attr_names:
value = item[attr_name]
if isinstance(value, list):
for v in value:
if self.converter:
v = self.converter.encode(v)
params['Item.%d.Attribute.%d.Name' % (i, j)] = attr_name
params['Item.%d.Attribute.%d.Value' % (i, j)] = v
if replace:
params['Item.%d.Attribute.%d.Replace' % (i, j)] = 'true'
j += 1
else:
params['Item.%d.Attribute.%d.Name' % (i, j)] = attr_name
if self.converter:
value = self.converter.encode(value)
params['Item.%d.Attribute.%d.Value' % (i, j)] = value
if replace:
params['Item.%d.Attribute.%d.Replace' % (i, j)] = 'true'
j += 1
i += 1
def _build_name_list(self, params, attribute_names):
i = 1
attribute_names.sort()
for name in attribute_names:
params['Attribute.%d.Name' % i] = name
i += 1
def get_usage(self):
"""
Returns the BoxUsage (in USD) accumulated on this specific SDBConnection
instance.
.. tip:: This can be out of date, and should only be treated as a
rough estimate. Also note that this estimate only applies to the
requests made on this specific connection instance. It is by
no means an account-wide estimate.
:rtype: float
:return: The accumulated BoxUsage of all requests made on the connection.
"""
return self.box_usage
def print_usage(self):
"""
Print the BoxUsage and approximate costs of all requests made on
this specific SDBConnection instance.
.. tip:: This can be out of date, and should only be treated as a
rough estimate. Also note that this estimate only applies to the
requests made on this specific connection instance. It is by
no means an account-wide estimate.
"""
print 'Total Usage: %f compute seconds' % self.box_usage
cost = self.box_usage * 0.14
print 'Approximate Cost: $%f' % cost
def get_domain(self, domain_name, validate=True):
"""
Retrieves a :py:class:`boto.sdb.domain.Domain` object whose name
matches ``domain_name``.
:param str domain_name: The name of the domain to retrieve
:keyword bool validate: When ``True``, check to see if the domain
actually exists. If ``False``, blindly return a
:py:class:`Domain <boto.sdb.domain.Domain>` object with the
specified name set.
:raises:
:py:class:`boto.exception.SDBResponseError` if ``validate`` is
``True`` and no match could be found.
:rtype: :py:class:`boto.sdb.domain.Domain`
:return: The requested domain
"""
domain = Domain(self, domain_name)
if validate:
self.select(domain, """select * from `%s` limit 1""" % domain_name)
return domain
def lookup(self, domain_name, validate=True):
"""
Lookup an existing SimpleDB domain. This differs from
:py:meth:`get_domain` in that ``None`` is returned if ``validate`` is
``True`` and no match was found (instead of raising an exception).
:param str domain_name: The name of the domain to retrieve
:param bool validate: If ``True``, a ``None`` value will be returned
if the specified domain can't be found. If ``False``, a
:py:class:`Domain <boto.sdb.domain.Domain>` object will be dumbly
returned, regardless of whether it actually exists.
:rtype: :class:`boto.sdb.domain.Domain` object or ``None``
:return: The Domain object or ``None`` if the domain does not exist.
"""
try:
domain = self.get_domain(domain_name, validate)
except:
domain = None
return domain
def get_all_domains(self, max_domains=None, next_token=None):
"""
Returns a :py:class:`boto.resultset.ResultSet` containing
all :py:class:`boto.sdb.domain.Domain` objects associated with
this connection's Access Key ID.
:keyword int max_domains: Limit the returned
:py:class:`ResultSet <boto.resultset.ResultSet>` to the specified
number of members.
:keyword str next_token: A token string that was returned in an
earlier call to this method as the ``next_token`` attribute
on the returned :py:class:`ResultSet <boto.resultset.ResultSet>`
object. This attribute is set if there are more than Domains than
the value specified in the ``max_domains`` keyword. Pass the
``next_token`` value from you earlier query in this keyword to
get the next 'page' of domains.
"""
params = {}
if max_domains:
params['MaxNumberOfDomains'] = max_domains
if next_token:
params['NextToken'] = next_token
return self.get_list('ListDomains', params, [('DomainName', Domain)])
def create_domain(self, domain_name):
"""
Create a SimpleDB domain.
:type domain_name: string
:param domain_name: The name of the new domain
:rtype: :class:`boto.sdb.domain.Domain` object
:return: The newly created domain
"""
params = {'DomainName':domain_name}
d = self.get_object('CreateDomain', params, Domain)
d.name = domain_name
return d
def get_domain_and_name(self, domain_or_name):
"""
Given a ``str`` or :class:`boto.sdb.domain.Domain`, return a
``tuple`` with the following members (in order):
* In instance of :class:`boto.sdb.domain.Domain` for the requested
domain
* The domain's name as a ``str``
:type domain_or_name: ``str`` or :class:`boto.sdb.domain.Domain`
:param domain_or_name: The domain or domain name to get the domain
and name for.
:raises: :class:`boto.exception.SDBResponseError` when an invalid
domain name is specified.
:rtype: tuple
:return: A ``tuple`` with contents outlined as per above.
"""
if (isinstance(domain_or_name, Domain)):
return (domain_or_name, domain_or_name.name)
else:
return (self.get_domain(domain_or_name), domain_or_name)
def delete_domain(self, domain_or_name):
"""
Delete a SimpleDB domain.
.. caution:: This will delete the domain and all items within the domain.
:type domain_or_name: string or :class:`boto.sdb.domain.Domain` object.
:param domain_or_name: Either the name of a domain or a Domain object
:rtype: bool
:return: True if successful
"""
domain, domain_name = self.get_domain_and_name(domain_or_name)
params = {'DomainName':domain_name}
return self.get_status('DeleteDomain', params)
def domain_metadata(self, domain_or_name):
"""
Get the Metadata for a SimpleDB domain.
:type domain_or_name: string or :class:`boto.sdb.domain.Domain` object.
:param domain_or_name: Either the name of a domain or a Domain object
:rtype: :class:`boto.sdb.domain.DomainMetaData` object
:return: The newly created domain metadata object
"""
domain, domain_name = self.get_domain_and_name(domain_or_name)
params = {'DomainName':domain_name}
d = self.get_object('DomainMetadata', params, DomainMetaData)
d.domain = domain
return d
def put_attributes(self, domain_or_name, item_name, attributes,
replace=True, expected_value=None):
"""
Store attributes for a given item in a domain.
:type domain_or_name: string or :class:`boto.sdb.domain.Domain` object.
:param domain_or_name: Either the name of a domain or a Domain object
:type item_name: string
:param item_name: The name of the item whose attributes are being
stored.
:type attribute_names: dict or dict-like object
:param attribute_names: The name/value pairs to store as attributes
:type expected_value: list
:param expected_value: If supplied, this is a list or tuple consisting
of a single attribute name and expected value. The list can be
of the form:
* ['name', 'value']
In which case the call will first verify that the attribute "name"
of this item has a value of "value". If it does, the delete
will proceed, otherwise a ConditionalCheckFailed error will be
returned. The list can also be of the form:
* ['name', True|False]
which will simply check for the existence (True) or
non-existence (False) of the attribute.
:type replace: bool
:param replace: Whether the attribute values passed in will replace
existing values or will be added as addition values.
Defaults to True.
:rtype: bool
:return: True if successful
"""
domain, domain_name = self.get_domain_and_name(domain_or_name)
params = {'DomainName' : domain_name,
'ItemName' : item_name}
self._build_name_value_list(params, attributes, replace)
if expected_value:
self._build_expected_value(params, expected_value)
return self.get_status('PutAttributes', params)
def batch_put_attributes(self, domain_or_name, items, replace=True):
"""
Store attributes for multiple items in a domain.
:type domain_or_name: string or :class:`boto.sdb.domain.Domain` object.
:param domain_or_name: Either the name of a domain or a Domain object
:type items: dict or dict-like object
:param items: A dictionary-like object. The keys of the dictionary are
the item names and the values are themselves dictionaries
of attribute names/values, exactly the same as the
attribute_names parameter of the scalar put_attributes
call.
:type replace: bool
:param replace: Whether the attribute values passed in will replace
existing values or will be added as addition values.
Defaults to True.
:rtype: bool
:return: True if successful
"""
domain, domain_name = self.get_domain_and_name(domain_or_name)
params = {'DomainName' : domain_name}
self._build_batch_list(params, items, replace)
return self.get_status('BatchPutAttributes', params, verb='POST')
def get_attributes(self, domain_or_name, item_name, attribute_names=None,
consistent_read=False, item=None):
"""
Retrieve attributes for a given item in a domain.
:type domain_or_name: string or :class:`boto.sdb.domain.Domain` object.
:param domain_or_name: Either the name of a domain or a Domain object
:type item_name: string
:param item_name: The name of the item whose attributes are
being retrieved.
:type attribute_names: string or list of strings
:param attribute_names: An attribute name or list of attribute names.
This parameter is optional. If not supplied, all attributes will
be retrieved for the item.
:type consistent_read: bool
:param consistent_read: When set to true, ensures that the most recent
data is returned.
:type item: :class:`boto.sdb.item.Item`
:keyword item: Instead of instantiating a new Item object, you may
specify one to update.
:rtype: :class:`boto.sdb.item.Item`
:return: An Item with the requested attribute name/values set on it
"""
domain, domain_name = self.get_domain_and_name(domain_or_name)
params = {'DomainName' : domain_name,
'ItemName' : item_name}
if consistent_read:
params['ConsistentRead'] = 'true'
if attribute_names:
if not isinstance(attribute_names, list):
attribute_names = [attribute_names]
self.build_list_params(params, attribute_names, 'AttributeName')
response = self.make_request('GetAttributes', params)
body = response.read()
if response.status == 200:
if item is None:
item = self.item_cls(domain, item_name)
h = handler.XmlHandler(item, self)
xml.sax.parseString(body, h)
return item
else:
raise SDBResponseError(response.status, response.reason, body)
def delete_attributes(self, domain_or_name, item_name, attr_names=None,
expected_value=None):
"""
Delete attributes from a given item in a domain.
:type domain_or_name: string or :class:`boto.sdb.domain.Domain` object.
:param domain_or_name: Either the name of a domain or a Domain object
:type item_name: string
:param item_name: The name of the item whose attributes are being
deleted.
:type attributes: dict, list or :class:`boto.sdb.item.Item`
:param attributes: Either a list containing attribute names which
will cause all values associated with that attribute
name to be deleted or a dict or Item containing the
attribute names and keys and list of values to
delete as the value. If no value is supplied,
all attribute name/values for the item will be
deleted.
:type expected_value: list
:param expected_value: If supplied, this is a list or tuple consisting
of a single attribute name and expected value. The list can be
of the form:
* ['name', 'value']
In which case the call will first verify that the attribute "name"
of this item has a value of "value". If it does, the delete
will proceed, otherwise a ConditionalCheckFailed error will be
returned. The list can also be of the form:
* ['name', True|False]
which will simply check for the existence (True) or
non-existence (False) of the attribute.
:rtype: bool
:return: True if successful
"""
domain, domain_name = self.get_domain_and_name(domain_or_name)
params = {'DomainName':domain_name,
'ItemName' : item_name}
if attr_names:
if isinstance(attr_names, list):
self._build_name_list(params, attr_names)
elif isinstance(attr_names, dict) or isinstance(attr_names, self.item_cls):
self._build_name_value_list(params, attr_names)
if expected_value:
self._build_expected_value(params, expected_value)
return self.get_status('DeleteAttributes', params)
def batch_delete_attributes(self, domain_or_name, items):
"""
Delete multiple items in a domain.
:type domain_or_name: string or :class:`boto.sdb.domain.Domain` object.
:param domain_or_name: Either the name of a domain or a Domain object
:type items: dict or dict-like object
:param items: A dictionary-like object. The keys of the dictionary are
the item names and the values are either:
* dictionaries of attribute names/values, exactly the
same as the attribute_names parameter of the scalar
put_attributes call. The attribute name/value pairs
will only be deleted if they match the name/value
pairs passed in.
* None which means that all attributes associated
with the item should be deleted.
:return: True if successful
"""
domain, domain_name = self.get_domain_and_name(domain_or_name)
params = {'DomainName' : domain_name}
self._build_batch_list(params, items, False)
return self.get_status('BatchDeleteAttributes', params, verb='POST')
def select(self, domain_or_name, query='', next_token=None,
consistent_read=False):
"""
Returns a set of Attributes for item names within domain_name that
match the query. The query must be expressed in using the SELECT
style syntax rather than the original SimpleDB query language.
Even though the select request does not require a domain object,
a domain object must be passed into this method so the Item objects
returned can point to the appropriate domain.
:type domain_or_name: string or :class:`boto.sdb.domain.Domain` object
:param domain_or_name: Either the name of a domain or a Domain object
:type query: string
:param query: The SimpleDB query to be performed.
:type consistent_read: bool
:param consistent_read: When set to true, ensures that the most recent
data is returned.
:rtype: ResultSet
:return: An iterator containing the results.
"""
domain, domain_name = self.get_domain_and_name(domain_or_name)
params = {'SelectExpression' : query}
if consistent_read:
params['ConsistentRead'] = 'true'
if next_token:
params['NextToken'] = next_token
try:
return self.get_list('Select', params, [('Item', self.item_cls)],
parent=domain)
except SDBResponseError, e:
e.body = "Query: %s\n%s" % (query, e.body)
raise e
|
|
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (C) 2013 Association of Universities for Research in Astronomy
# (AURA)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of AURA and its representatives may not be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
"""The code in this module is mostly copy/pasted out of the distutils2 source
code, as recommended by Tarek Ziade. As such, it may be subject to some change
as distutils2 development continues, and will have to be kept up to date.
I didn't want to use it directly from distutils2 itself, since I do not want it
to be an installation dependency for our packages yet--it is still too unstable
(the latest version on PyPI doesn't even install).
"""
# These first two imports are not used, but are needed to get around an
# irritating Python bug that can crop up when using ./setup.py test.
# See: http://www.eby-sarna.com/pipermail/peak/2010-May/003355.html
try:
import multiprocessing # noqa
except ImportError:
pass
import logging # noqa
from collections import defaultdict
import io
import os
import re
import shlex
import sys
import traceback
import distutils.ccompiler
from distutils import errors
from distutils import log
import pkg_resources
from setuptools import dist as st_dist
from setuptools import extension
try:
import ConfigParser as configparser
except ImportError:
import configparser
from pbr import extra_files
import pbr.hooks
# A simplified RE for this; just checks that the line ends with version
# predicates in ()
_VERSION_SPEC_RE = re.compile(r'\s*(.*?)\s*\((.*)\)\s*$')
# Mappings from setup() keyword arguments to setup.cfg options;
# The values are (section, option) tuples, or simply (section,) tuples if
# the option has the same name as the setup() argument
CFG_TO_PY_SETUP_ARGS = (
(('metadata', 'name'), 'name'),
(('metadata', 'version'), 'version'),
(('metadata', 'author'), 'author'),
(('metadata', 'author_email'), 'author_email'),
(('metadata', 'maintainer'), 'maintainer'),
(('metadata', 'maintainer_email'), 'maintainer_email'),
(('metadata', 'home_page'), 'url'),
(('metadata', 'project_urls'), 'project_urls'),
(('metadata', 'summary'), 'description'),
(('metadata', 'keywords'), 'keywords'),
(('metadata', 'description'), 'long_description'),
(
('metadata', 'description_content_type'),
'long_description_content_type',
),
(('metadata', 'download_url'), 'download_url'),
(('metadata', 'classifier'), 'classifiers'),
(('metadata', 'platform'), 'platforms'), # **
(('metadata', 'license'), 'license'),
# Use setuptools install_requires, not
# broken distutils requires
(('metadata', 'requires_dist'), 'install_requires'),
(('metadata', 'setup_requires_dist'), 'setup_requires'),
(('metadata', 'python_requires'), 'python_requires'),
(('metadata', 'requires_python'), 'python_requires'),
(('metadata', 'provides_dist'), 'provides'), # **
(('metadata', 'provides_extras'), 'provides_extras'),
(('metadata', 'obsoletes_dist'), 'obsoletes'), # **
(('files', 'packages_root'), 'package_dir'),
(('files', 'packages'), 'packages'),
(('files', 'package_data'), 'package_data'),
(('files', 'namespace_packages'), 'namespace_packages'),
(('files', 'data_files'), 'data_files'),
(('files', 'scripts'), 'scripts'),
(('files', 'modules'), 'py_modules'), # **
(('global', 'commands'), 'cmdclass'),
# Not supported in distutils2, but provided for
# backwards compatibility with setuptools
(('backwards_compat', 'zip_safe'), 'zip_safe'),
(('backwards_compat', 'tests_require'), 'tests_require'),
(('backwards_compat', 'dependency_links'), 'dependency_links'),
(('backwards_compat', 'include_package_data'), 'include_package_data'),
)
# setup() arguments that can have multiple values in setup.cfg
MULTI_FIELDS = ("classifiers",
"platforms",
"install_requires",
"provides",
"obsoletes",
"namespace_packages",
"packages",
"package_data",
"data_files",
"scripts",
"py_modules",
"dependency_links",
"setup_requires",
"tests_require",
"keywords",
"cmdclass",
"provides_extras")
# setup() arguments that can have mapping values in setup.cfg
MAP_FIELDS = ("project_urls",)
# setup() arguments that contain boolean values
BOOL_FIELDS = ("zip_safe", "include_package_data")
CSV_FIELDS = ()
def shlex_split(path):
if os.name == 'nt':
# shlex cannot handle paths that contain backslashes, treating those
# as escape characters.
path = path.replace("\\", "/")
return [x.replace("/", "\\") for x in shlex.split(path)]
return shlex.split(path)
def resolve_name(name):
"""Resolve a name like ``module.object`` to an object and return it.
Raise ImportError if the module or name is not found.
"""
parts = name.split('.')
cursor = len(parts) - 1
module_name = parts[:cursor]
attr_name = parts[-1]
while cursor > 0:
try:
ret = __import__('.'.join(module_name), fromlist=[attr_name])
break
except ImportError:
if cursor == 0:
raise
cursor -= 1
module_name = parts[:cursor]
attr_name = parts[cursor]
ret = ''
for part in parts[cursor:]:
try:
ret = getattr(ret, part)
except AttributeError:
raise ImportError(name)
return ret
def cfg_to_args(path='setup.cfg', script_args=()):
"""Distutils2 to distutils1 compatibility util.
This method uses an existing setup.cfg to generate a dictionary of
keywords that can be used by distutils.core.setup(kwargs**).
:param path:
The setup.cfg path.
:param script_args:
List of commands setup.py was called with.
:raises DistutilsFileError:
When the setup.cfg file is not found.
"""
# The method source code really starts here.
if sys.version_info >= (3, 0):
parser = configparser.ConfigParser()
else:
parser = configparser.SafeConfigParser()
if not os.path.exists(path):
raise errors.DistutilsFileError("file '%s' does not exist" %
os.path.abspath(path))
try:
parser.read(path, encoding='utf-8')
except TypeError:
# Python 2 doesn't accept the encoding kwarg
parser.read(path)
config = {}
for section in parser.sections():
config[section] = dict()
for k, value in parser.items(section):
config[section][k.replace('-', '_')] = value
# Run setup_hooks, if configured
setup_hooks = has_get_option(config, 'global', 'setup_hooks')
package_dir = has_get_option(config, 'files', 'packages_root')
# Add the source package directory to sys.path in case it contains
# additional hooks, and to make sure it's on the path before any existing
# installations of the package
if package_dir:
package_dir = os.path.abspath(package_dir)
sys.path.insert(0, package_dir)
try:
if setup_hooks:
setup_hooks = [
hook for hook in split_multiline(setup_hooks)
if hook != 'pbr.hooks.setup_hook']
for hook in setup_hooks:
hook_fn = resolve_name(hook)
try:
hook_fn(config)
except SystemExit:
log.error('setup hook %s terminated the installation')
except Exception:
e = sys.exc_info()[1]
log.error('setup hook %s raised exception: %s\n' %
(hook, e))
log.error(traceback.format_exc())
sys.exit(1)
# Run the pbr hook
pbr.hooks.setup_hook(config)
kwargs = setup_cfg_to_setup_kwargs(config, script_args)
# Set default config overrides
kwargs['include_package_data'] = True
kwargs['zip_safe'] = False
register_custom_compilers(config)
ext_modules = get_extension_modules(config)
if ext_modules:
kwargs['ext_modules'] = ext_modules
entry_points = get_entry_points(config)
if entry_points:
kwargs['entry_points'] = entry_points
# Handle the [files]/extra_files option
files_extra_files = has_get_option(config, 'files', 'extra_files')
if files_extra_files:
extra_files.set_extra_files(split_multiline(files_extra_files))
finally:
# Perform cleanup if any paths were added to sys.path
if package_dir:
sys.path.pop(0)
return kwargs
def setup_cfg_to_setup_kwargs(config, script_args=()):
"""Convert config options to kwargs.
Processes the setup.cfg options and converts them to arguments accepted
by setuptools' setup() function.
"""
kwargs = {}
# Temporarily holds install_requires and extra_requires while we
# parse env_markers.
all_requirements = {}
for alias, arg in CFG_TO_PY_SETUP_ARGS:
section, option = alias
in_cfg_value = has_get_option(config, section, option)
if not in_cfg_value and arg == "long_description":
in_cfg_value = has_get_option(config, section, "description_file")
if in_cfg_value:
in_cfg_value = split_multiline(in_cfg_value)
value = ''
for filename in in_cfg_value:
description_file = io.open(filename, encoding='utf-8')
try:
value += description_file.read().strip() + '\n\n'
finally:
description_file.close()
in_cfg_value = value
if not in_cfg_value:
continue
if arg in CSV_FIELDS:
in_cfg_value = split_csv(in_cfg_value)
if arg in MULTI_FIELDS:
in_cfg_value = split_multiline(in_cfg_value)
elif arg in MAP_FIELDS:
in_cfg_map = {}
for i in split_multiline(in_cfg_value):
k, v = i.split('=', 1)
in_cfg_map[k.strip()] = v.strip()
in_cfg_value = in_cfg_map
elif arg in BOOL_FIELDS:
# Provide some flexibility here...
if in_cfg_value.lower() in ('true', 't', '1', 'yes', 'y'):
in_cfg_value = True
else:
in_cfg_value = False
if in_cfg_value:
if arg in ('install_requires', 'tests_require'):
# Replaces PEP345-style version specs with the sort expected by
# setuptools
in_cfg_value = [_VERSION_SPEC_RE.sub(r'\1\2', pred)
for pred in in_cfg_value]
if arg == 'install_requires':
# Split install_requires into package,env_marker tuples
# These will be re-assembled later
install_requires = []
requirement_pattern = (
r'(?P<package>[^;]*);?(?P<env_marker>[^#]*?)(?:\s*#.*)?$')
for requirement in in_cfg_value:
m = re.match(requirement_pattern, requirement)
requirement_package = m.group('package').strip()
env_marker = m.group('env_marker').strip()
install_requires.append((requirement_package, env_marker))
all_requirements[''] = install_requires
elif arg == 'package_dir':
in_cfg_value = {'': in_cfg_value}
elif arg in ('package_data', 'data_files'):
data_files = {}
firstline = True
prev = None
for line in in_cfg_value:
if '=' in line:
key, value = line.split('=', 1)
key_unquoted = shlex_split(key.strip())[0]
key, value = (key_unquoted, value.strip())
if key in data_files:
# Multiple duplicates of the same package name;
# this is for backwards compatibility of the old
# format prior to d2to1 0.2.6.
prev = data_files[key]
prev.extend(shlex_split(value))
else:
prev = data_files[key.strip()] = shlex_split(value)
elif firstline:
raise errors.DistutilsOptionError(
'malformed package_data first line %r (misses '
'"=")' % line)
else:
prev.extend(shlex_split(line.strip()))
firstline = False
if arg == 'data_files':
# the data_files value is a pointlessly different structure
# from the package_data value
data_files = sorted(data_files.items())
in_cfg_value = data_files
elif arg == 'cmdclass':
cmdclass = {}
dist = st_dist.Distribution()
for cls_name in in_cfg_value:
cls = resolve_name(cls_name)
cmd = cls(dist)
cmdclass[cmd.get_command_name()] = cls
in_cfg_value = cmdclass
kwargs[arg] = in_cfg_value
# Transform requirements with embedded environment markers to
# setuptools' supported marker-per-requirement format.
#
# install_requires are treated as a special case of extras, before
# being put back in the expected place
#
# fred =
# foo:marker
# bar
# -> {'fred': ['bar'], 'fred:marker':['foo']}
if 'extras' in config:
requirement_pattern = (
r'(?P<package>[^:]*):?(?P<env_marker>[^#]*?)(?:\s*#.*)?$')
extras = config['extras']
# Add contents of test-requirements, if any, into an extra named
# 'test' if one does not already exist.
if 'test' not in extras:
from pbr import packaging
extras['test'] = "\n".join(packaging.parse_requirements(
packaging.TEST_REQUIREMENTS_FILES)).replace(';', ':')
for extra in extras:
extra_requirements = []
requirements = split_multiline(extras[extra])
for requirement in requirements:
m = re.match(requirement_pattern, requirement)
extras_value = m.group('package').strip()
env_marker = m.group('env_marker')
extra_requirements.append((extras_value, env_marker))
all_requirements[extra] = extra_requirements
# Transform the full list of requirements into:
# - install_requires, for those that have no extra and no
# env_marker
# - named extras, for those with an extra name (which may include
# an env_marker)
# - and as a special case, install_requires with an env_marker are
# treated as named extras where the name is the empty string
extras_require = {}
for req_group in all_requirements:
for requirement, env_marker in all_requirements[req_group]:
if env_marker:
extras_key = '%s:(%s)' % (req_group, env_marker)
# We do not want to poison wheel creation with locally
# evaluated markers. sdists always re-create the egg_info
# and as such do not need guarded, and pip will never call
# multiple setup.py commands at once.
if 'bdist_wheel' not in script_args:
try:
if pkg_resources.evaluate_marker('(%s)' % env_marker):
extras_key = req_group
except SyntaxError:
log.error(
"Marker evaluation failed, see the following "
"error. For more information see: "
"http://docs.openstack.org/"
"pbr/latest/user/using.html#environment-markers"
)
raise
else:
extras_key = req_group
extras_require.setdefault(extras_key, []).append(requirement)
kwargs['install_requires'] = extras_require.pop('', [])
kwargs['extras_require'] = extras_require
return kwargs
def register_custom_compilers(config):
"""Handle custom compilers.
This has no real equivalent in distutils, where additional compilers could
only be added programmatically, so we have to hack it in somehow.
"""
compilers = has_get_option(config, 'global', 'compilers')
if compilers:
compilers = split_multiline(compilers)
for compiler in compilers:
compiler = resolve_name(compiler)
# In distutils2 compilers these class attributes exist; for
# distutils1 we just have to make something up
if hasattr(compiler, 'name'):
name = compiler.name
else:
name = compiler.__name__
if hasattr(compiler, 'description'):
desc = compiler.description
else:
desc = 'custom compiler %s' % name
module_name = compiler.__module__
# Note; this *will* override built in compilers with the same name
# TODO(embray): Maybe display a warning about this?
cc = distutils.ccompiler.compiler_class
cc[name] = (module_name, compiler.__name__, desc)
# HACK!!!! Distutils assumes all compiler modules are in the
# distutils package
sys.modules['distutils.' + module_name] = sys.modules[module_name]
def get_extension_modules(config):
"""Handle extension modules"""
EXTENSION_FIELDS = ("sources",
"include_dirs",
"define_macros",
"undef_macros",
"library_dirs",
"libraries",
"runtime_library_dirs",
"extra_objects",
"extra_compile_args",
"extra_link_args",
"export_symbols",
"swig_opts",
"depends")
ext_modules = []
for section in config:
if ':' in section:
labels = section.split(':', 1)
else:
# Backwards compatibility for old syntax; don't use this though
labels = section.split('=', 1)
labels = [label.strip() for label in labels]
if (len(labels) == 2) and (labels[0] == 'extension'):
ext_args = {}
for field in EXTENSION_FIELDS:
value = has_get_option(config, section, field)
# All extension module options besides name can have multiple
# values
if not value:
continue
value = split_multiline(value)
if field == 'define_macros':
macros = []
for macro in value:
macro = macro.split('=', 1)
if len(macro) == 1:
macro = (macro[0].strip(), None)
else:
macro = (macro[0].strip(), macro[1].strip())
macros.append(macro)
value = macros
ext_args[field] = value
if ext_args:
if 'name' not in ext_args:
ext_args['name'] = labels[1]
ext_modules.append(extension.Extension(ext_args.pop('name'),
**ext_args))
return ext_modules
def get_entry_points(config):
"""Process the [entry_points] section of setup.cfg.
Processes setup.cfg to handle setuptools entry points. This is, of course,
not a standard feature of distutils2/packaging, but as there is not
currently a standard alternative in packaging, we provide support for them.
"""
if 'entry_points' not in config:
return {}
return dict((option, split_multiline(value))
for option, value in config['entry_points'].items())
def has_get_option(config, section, option):
if section in config and option in config[section]:
return config[section][option]
else:
return False
def split_multiline(value):
"""Special behaviour when we have a multi line options"""
value = [element for element in
(line.strip() for line in value.split('\n'))
if element and not element.startswith('#')]
return value
def split_csv(value):
"""Special behaviour when we have a comma separated options"""
value = [element for element in
(chunk.strip() for chunk in value.split(','))
if element]
return value
# The following classes are used to hack Distribution.command_options a bit
class DefaultGetDict(defaultdict):
"""Like defaultdict, but get() also sets and returns the default value."""
def get(self, key, default=None):
if default is None:
default = self.default_factory()
return super(DefaultGetDict, self).setdefault(key, default)
|
|
'''
TODO (29.05.2012):
1) show 1x, 2x, 3x threshold (as line)
2) auto scale in y axis? (calc and save min & max values of buffer)
3) draw y axis?
4) 'max_nbr_buffers_transmitted' must be 1 and 'framesize' must be 512 otherwise we get in trouble in RT mode.
5) set 'SHIFT_VIEW' in update() and dequeue in 'do_draw'? does this get rid of the shift / lag? --> IT DOES NOT!
6) how do I connect points across VBOs? currently only points inside a VBO are connected.
7) make code modular so that I don't have keep to versions up-to-date.
0) WINDOWS only:
A) if you are planning to run this program on Windows (32bit and 64 bit), make
sure to install python 32bit - 64bit python on Windows won't work with pyglet!
B) install 32bit installer of 'setuptools' http://pypi.python.org/pypi/setuptools
C) $ cd c:\python27\Scripts
$ easy_install numpy
D) set the nvidia driver 3D settings to 'performance' if you want highest FPS
1) you need to install a recent version of pyglet to run this program:
$ hg clone https://pyglet.googlecode.com/hg/ pyglet
$ sudo python setup.py install
# on windows do:
# d:
# cd d:\code\pyglet
# c:\Python27\python.exe setup.py install
2) you also need numpy to be installed; on ubuntu do:
$ sudo apt-get install python-numpy
3) Ubuntu / Linux only: in case this applications freezes make sure the following
points are met:
- Nvidia driver 280.13; I had lots of problems with version 290 & 295
- latest pyglet dev version is installed (see point 1). I tried both pyglet-1.1.2 and
pyglet-1.1.4 that come with ubuntu but I get very poor performance.
4) check remaining 'TODO' sections
Profiling)
A) per function
$ python -m cProfile pyglet_vbo_test7.py
B) per line
$ sudo /usr/bin/easy_install line_profiler
# add decorator '@profile' in front of each function
$ kernprof.py -l pyglet_vbo_test7.py
$ python /usr/local/lib/python2.7/dist-packages/line_profiler-1.0b3-py2.7-linux-x86_64.egg/line_profiler.py pyglet_vbo_test7.py.lprof > prof.txt
$ python /usr/local/lib/python2.7/dist-packages/RunSnakeRun-2.0.2a1-py2.7.egg/runsnakerun/runsnake.py prof.txt
C) with runsnakerun GUI - not compatible with method B)
$ sudo /usr/bin/easy_install RunSnakeRun
$ python -m cProfile -o pyglet_vbo_test7.profile pyglet_vbo_test7.py
$ python /usr/local/lib/python2.7/dist-packages/RunSnakeRun-2.0.2a1-py2.7.egg/runsnakerun/runsnake.py pyglet_vbo_test7.profile
'''
''' turn on debugger if necessary
import pdb
pdb.set_trace()
'''
import pyglet
from pyglet.gl import *
from ctypes import pointer, sizeof
import numpy as np
import random
from time import time
from math import ceil, floor
''' mmap stuff '''
import os, sys
import mmap
from datetime import datetime
from struct import unpack, pack
############
#
from pOpenGLbasic import calc_VOB_numbers
# switch between drawing modes. all modes render ~ the same amount of data points.
# mode = 0; few segments -> high FPS since not many gl* calls
# mode = 1; many segments -> low FPS since gl* calls are executed many more times.
MODE = 1
# default window dimensions
WIN_HEIGHT_DEFAULT = 800
WIN_WIDTH_DEFAULT = 800
# 512 is neuralynx specific.
NBR_DATA_POINTS_PER_BUFFER = 1.0
NBR_DATA_POINTS_PER_BUFFER_INT = int(NBR_DATA_POINTS_PER_BUFFER)
SCANRATE = 1
SECONDS_TO_VISUALIZE_PER_PANEL = 1.0
# approximate number of data point per VBO. will change and be adjusted so that
# this number is a multiple of NBR_DATA_POINTS_PER_BUFFER
NBR_DATA_POINTS_PER_VBO = 200
# how many times per second should we call the update function?
#CALL_UPDATE_X_TIMES_PER_SECOND = 67.0
# TODO: check what a reasonable value for 'CALL_UPDATE_X_TIMES_PER_SECOND' is.
# going from 67.0 to 60.0 gives me a huge performance improvement.
CALL_UPDATE_X_TIMES_PER_SECOND = 60.0
# into how many data panels should we split up the window?
NBR_PANELS = 1
# use same color for all segments?
USE_UNIFORM_COLOR = True
# default color to be used by 'USE_UNIFORM_COLOR'
DEFAULT_COLOR = [1, 0, 0]
# y scaling factors for spike and noise values.
SPIKE_SIZE = 200
NOISE_SIZE = 100
# numpy's randint is exclusive, therefore we need to add one.
NOISE_SIZE_NP = NOISE_SIZE + 1
# generate spike every N points
if MODE == 0:
GENERATE_SPIKE_EVERY_N_POINTS = 10000
elif MODE == 1:
GENERATE_SPIKE_EVERY_N_POINTS = 128
# where to put the 0/0 point of the data points.
X_OFFSET_PANEL = 20
Y_OFFSET_PANEL = 200
# update counter used to determine when to generate a new segment of data.
update_counter = 1;
SHIFT_VIEW = False
# enable debug 'print' statements?
DEBUG = 0
# number of independent data streams?
# e.g., 'StimOMatic' feeds in one spike and one LFP channel
NBR_INDEPENDENT_CHANNELS = 2
# should we use multiprocessing if possible? this might speed things up.
USE_MULTIPROCESSING = False
MULTIPROCESSING_NBR_PROCESSES = 12
DO_PROFILE = False
PLUGIN_NAME = 'pCtrlLFP'
# should we use mmap to receive data from matlab?
USE_MMAP = 0
MMAP_BYTES_PER_FLOAT = 8
# null string used to initalize memory
MMAP_NULL_HEX = '\x00'
# Definitions for 'glColorPointer' and 'glVertexPointer'
n_COORDINATES_PER_VERTEX = 2
BYTES_PER_POINT = 8
# indicator values used to confirm that data is received.
DATA_RECEIVED_ACK_NUM = 3.14159265
DATA_RECEIVED_ACK_STR = pack('d', DATA_RECEIVED_ACK_NUM)
NBR_BUFFERS_ZERO_STR = pack('d', 0)
# where's your temporary directory? mmap will write into it.
TMP_DIR = '/tmp'
if os.name == 'nt': # windows systems
# make sure you use double '\\' to separate directories
TMP_DIR = 'c:\\temp'
else: # unix systems
TMP_DIR = '/tmp'
TMP_DIR = TMP_DIR + os.sep + PLUGIN_NAME
MMAP_stats_file = TMP_DIR + os.sep + 'bla_stats'
# location of shared file(s)
MMAP_FILENAME = []
for j in range(NBR_INDEPENDENT_CHANNELS):
MMAP_FILENAME.append(TMP_DIR + os.sep + 'bla' + str(j+1))
# number of elements to store in memory
MMAP_STORE_LENGTH = MMAP_BYTES_PER_FLOAT * int(NBR_DATA_POINTS_PER_BUFFER)
################## dependent parameters / settings
output = calc_VOB_numbers(NBR_DATA_POINTS_PER_VBO, NBR_DATA_POINTS_PER_BUFFER, SECONDS_TO_VISUALIZE_PER_PANEL, SCANRATE)
NBR_DATA_POINTS_PER_VBO, NBR_VBOS_PER_PANEL, SECONDS_TO_VISUALIZE_PER_PANEL = output
# default X values
X_MIN = 0
X_MAX = float(WIN_WIDTH_DEFAULT) - X_OFFSET_PANEL
# shift each VBO by how much in X & Y direction, relative to the previous VBO?
SHIFT_Y_BY = 0
SHIFT_X_BY = abs(X_MIN) + abs(X_MAX)
# while generating the fake data, what is the stepsize between individual x data
# points?
STEPSIZE_X = float(SHIFT_X_BY) / NBR_DATA_POINTS_PER_VBO
# how much distance do 'NBR_DATA_POINTS_PER_BUFFER' points cover in x direction?
SHIFT_X_SINGLE_BUFFER = STEPSIZE_X * NBR_DATA_POINTS_PER_BUFFER
##################
# default window dimensions
WIN_HEIGHT_current = WIN_HEIGHT_DEFAULT
WIN_WIDTH_current = WIN_WIDTH_DEFAULT
''' decorator to quickly switch between profiling and no profiling '''
def do_profile(cond):
def resdec(f):
if not cond:
return f
return profile(f)
return resdec
@do_profile(DO_PROFILE)
def generate_line_segment_zeros(x_shift=SHIFT_X_BY, min_x=X_MIN, max_x=X_MAX, step_size=STEPSIZE_X):
''' same as 'generate_line_segment' but will generate zero y-values '''
zeros = True
x, y = generate_points(min_x, max_x, x_shift, step_size, zeros)
return create_2dim_list_from_arrays(x, y)
@do_profile(DO_PROFILE)
def generate_line_segment(x_shift=SHIFT_X_BY, min_x=X_MIN, max_x=X_MAX, step_size=STEPSIZE_X):
# ~ 1ms
x, y = generate_points(min_x, max_x, x_shift, step_size)
return create_2dim_list_from_arrays(x, y)
@do_profile(DO_PROFILE)
def generate_numbers_for_x_vector(x, zeros = False):
nbr_elements = len(x)
if zeros: # generate zeros
# TODO: check whether we need to add offset (Y_OFFSET_PANEL + 1)
y = np.zeros(nbr_elements)# + Y_OFFSET_PANEL + 1
else: # generate random values.
# generate a vector of random numbers in range [0, 1]
# y = [random.random() for i in range(nbr_elements)]
y = np.random.random(nbr_elements)
# generate a scaling vector of random numbers in range [1, NOISE_SIZE]
# this vector will scale each data point
# y_scale = [random.randint(1, NOISE_SIZE) for i in range(nbr_elements)]
y_scale = np.random.randint(1, NOISE_SIZE_NP, nbr_elements)
# generate a spike every 'GENERATE_SPIKE_EVERY_N_POINTS' data points
# generate an intial offset so that spikes don't occur at same position.
y_scale_offset = np.random.randint(1, GENERATE_SPIKE_EVERY_N_POINTS)
y_scale[GENERATE_SPIKE_EVERY_N_POINTS - 1 + y_scale_offset::GENERATE_SPIKE_EVERY_N_POINTS] = SPIKE_SIZE
# rescale each data point accordingly
y = (y * y_scale) + SHIFT_Y_BY + Y_OFFSET_PANEL
return y
@do_profile(DO_PROFILE)
def generate_points(min_x=X_MIN, max_x=X_MAX, x_shift=SHIFT_X_BY, step_size = STEPSIZE_X, zeros = False):
# < 0.1ms
# 'range' can only generate integer arrays
# x = np.array(range(min_x, max_x), int)
# use 'arrange' from numpy to generate a float array
x = np.arange(min_x, max_x, step_size)
x = x + x_shift
y = generate_numbers_for_x_vector(x, zeros)
return x, y
@do_profile(DO_PROFILE)
def create_2dim_list_from_arrays(x, y):
data = []
for i, j in zip(x, y):
data.extend([i, j])
return data
@do_profile(DO_PROFILE)
def transform_line_points_to_data_format_for_GPU(line_points):
# ~ 0.2ms
#print "nbr data points generated: " + str(len(line_points) / 2)
return (GLfloat*len(line_points))(*line_points)
@do_profile(DO_PROFILE)
def generate_color_for_segment():
# < 0.1ms
# generate well visible (not too dark) colors
if not USE_UNIFORM_COLOR:
while True:
color = [random.random() for j in xrange(0, 3)]
if sum(color) > 0.5:
break
else:
color = [1, 0, 0]
return color
@do_profile(DO_PROFILE)
def create_VBO():
# < 0.1ms
vbo_id = GLuint()
# generates 1 buffer object names, which are stored in pointer(vbo_id)
glGenBuffers(1, pointer(vbo_id))
return vbo_id
@do_profile(DO_PROFILE)
def create_VBO_send_data_to_VBO(data):
# < 0.1ms
vbo_id = create_VBO()
send_data_to_VBO(vbo_id, data)
return vbo_id
@do_profile(DO_PROFILE)
def send_data_to_VBO(vbo_id, data):
# < 0.1ms
# binds the named buffer object
glBindBuffer(GL_ARRAY_BUFFER, vbo_id)
# creates and initializes a buffer object's data store -> transfers data
# from the CPU to the GPU.
# TODO: check whether GL_DYNAMIC_DRAW or GL_STREAM_DRAW is faster.
# GL_STREAM_DRAW should be faster when updating the buffer @ every frame?
# see redbook page 95 & 96.
glBufferData(GL_ARRAY_BUFFER, sizeof(data), data, GL_DYNAMIC_DRAW)
@do_profile(DO_PROFILE)
def overwrite_line_segment_on_GPU(x_shift=SHIFT_X_BY, line_points=False, vbo_to_update=False):
# ~ 0.3ms
if not vbo_to_update:
print "!! no vbo pointer found - aborting !!"
print "update_counter: %d " % update_counter
return
if not line_points:
if DEBUG:
print "overwrite_line_segment_on_GPU: need to generate points"
line_points = generate_line_segment(x_shift)
data = transform_line_points_to_data_format_for_GPU(line_points)
color = generate_color_for_segment()
nbr_points = len(line_points)/2
# update data on VBO
send_data_to_VBO(vbo_to_update, data)
return nbr_points, color
@do_profile(DO_PROFILE)
def create_vbos(NBR_PANELS, NBR_VBOS_PER_PANEL):
vbos = [ [None] * int(NBR_VBOS_PER_PANEL) for i in xrange(NBR_PANELS) ]
for panel in range(NBR_PANELS):
for vbo in range(NBR_VBOS_PER_PANEL):
vbos[panel][vbo] = create_VBO()
return vbos
@do_profile(DO_PROFILE)
def create_initial_data(nPanels, nVbosPerPanel, nDataPointsPerVbo):
data = [ [None] * int(nVbosPerPanel) for i in xrange(nPanels) ]
for panel in range(nPanels):
for vbo in range(nVbosPerPanel):
curr_x_offset = (vbo * SHIFT_X_BY) + X_OFFSET_PANEL
#print "vbo %d, offset %d " % (vbo, curr_x_offset)
if (vbo + 1) == nVbosPerPanel:
tmp = generate_line_segment_zeros(x_shift=curr_x_offset)
else:
tmp = generate_line_segment(x_shift=curr_x_offset)
data[panel][vbo] = transform_line_points_to_data_format_for_GPU(tmp)
return data, curr_x_offset
@do_profile(DO_PROFILE)
def create_initial_colors(nPanels, nVbosPerPanel):
colors = [ [None] * int(nVbosPerPanel) for i in xrange(nPanels) ]
for panel in range(nPanels):
for vbo in range(nVbosPerPanel):
colors[panel][vbo] = generate_color_for_segment()
return colors
@do_profile(DO_PROFILE)
def initialize_vbos_with_start_data(NBR_PANELS, NBR_VBOS_PER_PANEL, vbos, data):
for panel in range(NBR_PANELS):
for vbo in range(NBR_VBOS_PER_PANEL):
send_data_to_VBO(vbos[panel][vbo], data[panel][vbo])
@do_profile(DO_PROFILE)
def setup_vbo_stuff(NBR_PANELS, NBR_VBOS_PER_PANEL, NBR_DATA_POINTS_PER_VBO):
t0 = time()
vbos = create_vbos(NBR_PANELS, NBR_VBOS_PER_PANEL)
data, curr_x_offset = create_initial_data(NBR_PANELS, NBR_VBOS_PER_PANEL, NBR_DATA_POINTS_PER_VBO)
initialize_vbos_with_start_data(NBR_PANELS, NBR_VBOS_PER_PANEL, vbos, data)
colors = create_initial_colors(NBR_PANELS, NBR_VBOS_PER_PANEL)
print 'initial setup time was %f seconds.' %(time() - t0)
return vbos, colors, curr_x_offset
def setup_plotting_queue():
# setup plotting queue
import collections
max_nbr_buffers = 20000
plot_queue = collections.deque([], max_nbr_buffers)
return plot_queue
@do_profile(DO_PROFILE)
def update_line_segment_on_GPU(vbo_id, pointer_offset, data):
# bind buffer and overwrite position with offset 'pos_to_overwrite*BYTES_PER_POINT'
#try:
glBindBuffer(GL_ARRAY_BUFFER, vbo_id)
glBufferSubData(GL_ARRAY_BUFFER, pointer_offset, sizeof(data), data)
#except:
#print "pointer_offset: ", pointer_offset
#print "sizeof(data): ", sizeof(data)
#pass
@do_profile(DO_PROFILE)
def calc_x_values_single_buffer():
x_values = np.arange(0, SHIFT_X_SINGLE_BUFFER, STEPSIZE_X)
return x_values
@do_profile(DO_PROFILE)
def append_data_to_plot_queue(new_data, nbr_buffers_per_mmap_file):
# reformat data so that the buffers from 'j' mmap files
# are paired together.
for j in range(int(min(nbr_buffers_per_mmap_file))):
data_to_add = []
for k in range(len(new_data)):
data_to_add.append(new_data[k][j])
# append 'data_to_add' to end (right side) of queue
plot_queue.append(data_to_add)
@do_profile(DO_PROFILE)
def get_data_from_plot_queue():
# remove & return left most element from queue
data = []
if len(plot_queue) > 0:
data = plot_queue.popleft()
return data
@do_profile(DO_PROFILE)
def request_new_data():
''' generates new raw data or grabs new data from MMAP '''
if USE_MMAP == 1:
new_data = get_data_from_mmap()
#update_data_stream_status(new_data)
#print new_data
else:
new_data = []
# get the x-spacing right
x_values = calc_x_values_single_buffer()
for j in xrange(NBR_INDEPENDENT_CHANNELS):
# put data into zero-th buffer
new_data.append([generate_numbers_for_x_vector(x_values)])
nbr_mmap_files = len(new_data)
nbr_buffers_per_mmap_file = np.zeros(nbr_mmap_files)
empty_data = np.zeros(nbr_mmap_files)
for j in range(nbr_mmap_files):
# update number of buffers in this 'file'. Will fail
# if len(new_data) != NBR_INDEPENDENT_CHANNELS
try:
nbr_buffers_per_mmap_file[j] = len(new_data[j])
except:
continue
# check whether the first buffer of the current mmap file is empty
sum_data = sum(new_data[j][0])
if sum_data == 0 or sum_data == DATA_RECEIVED_ACK_NUM:
empty_data[j] = 1
# print empty_data
return new_data, empty_data, nbr_buffers_per_mmap_file
def transform_vector_of_buffers_to_GPU_format(raw_data, x_shift_single_buffer_current):
# calc correct x_value
x_values = calc_x_values_single_buffer() + x_shift_single_buffer_current
nbr_mmap_files = len(raw_data)
data = []
for j in range(nbr_mmap_files):
line_points = create_2dim_list_from_arrays(x_values, raw_data[j])
data.append(transform_line_points_to_data_format_for_GPU(line_points))
return data
def mmap_stats_go_to_nbr_received_buffers_pos():
# go to 2nd position relative to 0.
mmap_stats.seek(MMAP_BYTES_PER_FLOAT * 2, 0)
@do_profile(DO_PROFILE)
def get_nbr_received_buffers_from_mmap():
# go to position where 'number of new buffers' is stored
mmap_stats_go_to_nbr_received_buffers_pos()
# read-in the string value
nbr_buffers_received = mmap_stats.read(MMAP_BYTES_PER_FLOAT)
# convert into decimal value
nbr_buffers_received = unpack('d', nbr_buffers_received)[0]
# debugging:
#print str(nbr_buffers_received) + ' number buffers received'
return nbr_buffers_received
def create_empty_data_buffer(nbr_mmap_files, zeros, nbr_buffers = 1):
# pre-allocate each buffer
buffers = []
for buffer_index in xrange(nbr_buffers):
# create deep copy of zeros, otherwise we create multiple references to
# the same object.
zeros_copy = zeros.copy()
buffers.append(zeros)
data = []
for mmap_file_index in xrange(nbr_mmap_files):
# put data into zero-th buffer
data.append(buffers)
return data
@do_profile(DO_PROFILE)
def splitIterator(text, size):
# assert size > 0, "size should be > 0"
for start in range(0, len(text), size):
yield text[start:start + size]
prev_sum = 0
MMAP_NO_DATA_INDICATE_ZERO = False
MMAP_NO_DATA_INDICATE_NON_ZERO = True
@do_profile(DO_PROFILE)
def get_data_from_mmap():
#
#t0 = time()
nbr_buffers_received = get_nbr_received_buffers_from_mmap()
nbr_mmap_files = len(mmap_data)
zeros = np.zeros(NBR_DATA_POINTS_PER_BUFFER_INT)
''' no new buffers - generate one empty dummy buffer and return '''
if nbr_buffers_received == 0 or nbr_buffers_received == -1:
return create_empty_data_buffer(nbr_mmap_files, zeros)
nbr_buffers_received = int(nbr_buffers_received)
nbr_elements = nbr_buffers_received * NBR_DATA_POINTS_PER_BUFFER_INT
range_nbr_mmap_files = range(nbr_mmap_files)
# check if there's any data that's ready for pickup.
new_data_found = np.zeros(nbr_mmap_files)
for mmap_file_index in range_nbr_mmap_files:
# go to beginning of memory mapped area
mmap_data[mmap_file_index].seek(0)
# quit right away if no new data has been written yet.
this_element = mmap_data[mmap_file_index].read(MMAP_BYTES_PER_FLOAT)
this_element = unpack('d', this_element)[0]
if round(this_element, 8) != DATA_RECEIVED_ACK_NUM:
new_data_found[mmap_file_index] = 1
# none of the files contain new data
if sum(new_data_found) == 0:
return create_empty_data_buffer(nbr_mmap_files, zeros, nbr_buffers_received)
''' read out transferred data '''
data = []
# this is ~ 10ms slower.
#data = np.zeros((nbr_mmap_files, nbr_buffers_received, NBR_DATA_POINTS_PER_BUFFER_INT))
# at least one new buffer has arrived.
for mmap_file_index in range_nbr_mmap_files:
#'''
# pre-allocate each buffer
buffers = []
for buffer_index in xrange(nbr_buffers_received):
# DONE: find out what the problem here is:
# there seems to be a bug in python on windows, or I don't understand the way things work:
# if I create 'zeros' outside this loop, the second time that 'zeros' gets called,
# it will contain all values found in data[mmap_file_index][buffer][j]. Therefore I have to re-generate
# the 'zeros' for each mmap_file_index'th loop.
# SOLUTION:
# We need to make a 'deep-copy' of zeros, otherwise we are just
# passing a reference to the same object (which is a np.array object).
zero_copy = zeros.copy()
buffers.append(zero_copy)
# add all buffers to mmap_file_index'th data stream.
data.append(buffers)
#'''
# go to beginning of memory mapped area & read out all elements
mmap_data[mmap_file_index].seek(0)
all_values_string = mmap_data[mmap_file_index].read(nbr_elements * MMAP_BYTES_PER_FLOAT)
# 0.1632 per call in debugger
# grab sub-list so we avoid having to call this list by its index.
this_data = data[mmap_file_index]
# unpack all values at once
unpacked_values = unpack("d" * nbr_elements, all_values_string)
# using list comprehension is better than a regular loop with random array access
this_data = [unpacked_values[i:i+NBR_DATA_POINTS_PER_BUFFER_INT] for i in xrange(0, nbr_elements, NBR_DATA_POINTS_PER_BUFFER_INT)]
# slower version of above line.
#for abs_idx in range(nbr_elements):
# this_data[abs_idx / NBR_DATA_POINTS_PER_BUFFER_INT][abs_idx % NBR_DATA_POINTS_PER_BUFFER_INT] = unpacked_values[abs_idx]
# write-back sub-list
data[mmap_file_index] = this_data
''' original version.
# these next few lines are responsible for 90% of the time spent in this function.
# 0.4974s per call in debugger
element_values_list = list(splitIterator(all_values_string, MMAP_BYTES_PER_FLOAT))
for abs_element_index in range(nbr_elements):
this_element = element_values_list[abs_element_index]
this_element = unpack('d', this_element)[0]
buffer_nbr = abs_element_index / NBR_DATA_POINTS_PER_BUFFER_INT
index_in_buffer = abs_element_index % NBR_DATA_POINTS_PER_BUFFER_INT
data[mmap_file_index][buffer_nbr][index_in_buffer] = this_element
'''
''' useless alternatives
# even worse: -> ~ 0.0063 secs per call
unpacked_values = [unpack('d', element_values_list[j])[0] for j in range(nbr_elements)]
# worst: ~0.0160 secs per call
buffer_ids = np.arange(nbr_elements) / NBR_DATA_POINTS_PER_BUFFER_INT
index_in_buffer_id = np.arange(nbr_elements) % NBR_DATA_POINTS_PER_BUFFER_INT
for abs_element_index in range(nbr_elements):
data[mmap_file_index][buffer_ids[abs_element_index]][index_in_buffer_id[abs_element_index]] = unpacked_values[abs_element_index]
'''
#t1 = time()
#print 'get_data_from_mmap() takes %f seconds' %(t1-t0)
# go to beginning of memory mapped area and overwrite first value with
# ACK string so that the sender knows that it is safe to overwrite the
# previous data (== send new data).
for mmap_file_index in range_nbr_mmap_files:
mmap_data[mmap_file_index].seek(0)
mmap_data[mmap_file_index].write(DATA_RECEIVED_ACK_STR)
# overwrite the 'number of buffers received' field with zero, so that we don't
# keep reading in this very same data.
mmap_stats_go_to_nbr_received_buffers_pos()
mmap_stats.write(NBR_BUFFERS_ZERO_STR)
return data
@do_profile(DO_PROFILE)
def update_vbo_with_data_from_plot_queue():
global x_shift_current, x_shift_single_buffer_current
global pointer_shift
global vbos, colors
global c_vbo # counter needed for VBO positioning
global pointer_offset, nbr_points_rendered_in_last_vbo
for j in xrange(NBR_BUFFERS_TO_UPDATE):
# grab 'raw_data' from beginning of plot queue.
raw_data = get_data_from_plot_queue()
data = transform_vector_of_buffers_to_GPU_format(raw_data, x_shift_single_buffer_current)
### VBO POSITIONING
pos_to_overwrite = c_vbo % (NBR_DATA_POINTS_PER_VBO / NBR_DATA_POINTS_PER_BUFFER)
nbr_points_rendered_in_last_vbo = int(NBR_DATA_POINTS_PER_BUFFER * pos_to_overwrite)
# at which location in the memory (in bytes) of the VBO should we replace the data?
# also needed for plotting.
pointer_offset = nbr_points_rendered_in_last_vbo * BYTES_PER_POINT
nbr_data_streams = len(data)
for panel in range(NBR_PANELS):
update_line_segment_on_GPU(vbos[panel][-1], pointer_offset, data[panel % nbr_data_streams])
c_vbo += 1
x_shift_single_buffer_current += SHIFT_X_SINGLE_BUFFER
pointer_shift += NBR_DATA_POINTS_PER_BUFFER
# check whether we reached the end of the VBO and thus need to rotate it.
if pointer_shift == NBR_DATA_POINTS_PER_VBO:
pointer_shift, pointer_offset, x_shift_current, vbos, colors, c_vbo = rotate_vbos_clear_last_vbo(pointer_shift, pointer_offset, x_shift_current, vbos, colors, c_vbo)
@do_profile(DO_PROFILE)
def rotate_vbos_clear_last_vbo(pointer_shift, pointer_offset, x_shift_current, vbos, colors, c_vbo):
# reset pointer offsets / shifts
# TODO: clean up and clarify 'pointer_shift' vs 'pointer_offset'!
pointer_shift = 0
pointer_offset = 0
c_vbo = 0
x_shift_current += SHIFT_X_BY
''' this is not fast enough and will lead to jitter effects
# generate new data set for each panel
tmp_points = [ [None] for j in range(NBR_PANELS)]
for panel in range(NBR_PANELS):
tmp_points_panel = generate_line_segment_zeros(x_shift=x_shift_current)
tmp_points[panel] = transform_line_points_to_data_format_for_GPU(tmp_points_panel)
'''
for panel in range(NBR_PANELS):
this_vbo = vbos[panel][0]
this_color = colors[panel][0]
# Delete current vbo and replace with new one.
# We could just re-use the current vbo, however this might lead to 'blinking' artifacts
# with the first VBO (probably because of incorrect referencing).
# By deleting the VBO, we make sure that this VBO is not being used for plotting.
glDeleteBuffers(1, pointer(this_vbo))
this_vbo = create_VBO()
# bind VBO and allocate memory.
glBindBuffer(GL_ARRAY_BUFFER, this_vbo)
glBufferData(GL_ARRAY_BUFFER, n_COORDINATES_PER_VERTEX * NBR_DATA_POINTS_PER_VBO * BYTES_PER_POINT, None, GL_DYNAMIC_DRAW)
# vbo pointer & color from arrays
vbos[panel] = vbos[panel][1:]
colors[panel] = colors[panel][1:]
# add color and pointer to VBO
vbos[panel].append(this_vbo)
colors[panel].append(this_color)
return pointer_shift, pointer_offset, x_shift_current, vbos, colors, c_vbo
@do_profile(DO_PROFILE)
def update_data_stream_status(data):
global prev_sum, MMAP_NO_DATA_INDICATE_ZERO, MMAP_NO_DATA_INDICATE_NON_ZERO
# check if new data has arrived and tell user
# we only check for the first data stream - I'm assuming here that either
# all channels or no channels with fail.
nbr_mmap_files = 0
buffer_to_check = 0
current_sum = sum(data[nbr_mmap_files][buffer_to_check])
if current_sum == prev_sum:
if prev_sum == 0:
# indicate zero state only once
if not MMAP_NO_DATA_INDICATE_ZERO:
print datetime.now(), ' - No new data received (sum(data) == zero)'
MMAP_NO_DATA_INDICATE_ZERO = True
else:
if not MMAP_NO_DATA_INDICATE_NON_ZERO:
print datetime.now(), ' - No new data received (sum(data) != zero)'
MMAP_NO_DATA_INDICATE_NON_ZERO = True
else:
if MMAP_NO_DATA_INDICATE_ZERO:
MMAP_NO_DATA_INDICATE_ZERO = False
print datetime.now(), ' - New data received!'
if MMAP_NO_DATA_INDICATE_NON_ZERO:
MMAP_NO_DATA_INDICATE_NON_ZERO = False
print datetime.now(), ' - New data received!'
prev_sum = current_sum
# t1 = time()
# print 'get_data_from_mmap() takes %f seconds' %(t1-t0)
@do_profile(DO_PROFILE)
def create_mmap_file_on_disk(fname):
# (over-) write file
fd = os.open(fname, os.O_CREAT | os.O_TRUNC | os.O_RDWR)
assert os.write(fd, MMAP_NULL_HEX * MMAP_STORE_LENGTH)
os.close(fd)
@do_profile(DO_PROFILE)
def setup_mmap(filenames):
# matlab:
# m = memmapfile('/tmp/bla', 'Format', 'double', 'Writable', true)
# m.Data = sin(linspace(200, 203, 512))*100
# m.Data = linspace(200, 300, 512);
# t = timer('TimerFcn', 'm.Data=sin(linspace(200, 203, 512)) * rand(1)*512;', 'Period', 0.015, 'ExecutionMode', 'fixedRate');
# start(t)
mmap_false = False
mmap_data = []
for i in range(len(filenames)):
fname = filenames[i]
# check if file exists
if not os.path.isfile(fname):
# check if directory exists
path_to_file = os.path.dirname(fname)
if not os.path.isdir(path_to_file):
print "Directory '" + path_to_file + "' not found - creating it."
os.makedirs(path_to_file)
create_mmap_file_on_disk(fname)
# initialize the memory map
f = open(fname, "r+b")
mmap_data.append(mmap.mmap(f.fileno(), 0))
# initialize memory with default value
for j in range(len(mmap_data)):
mmap_data[i][j] = MMAP_NULL_HEX
return mmap_data
##################### MAIN #####################################################
# animation is enabled by default. you can pause / resume it by pressing 'a'
DO_ANIMATE = True
DO_NEXT_STEP = False
''' BEGIN setup part 1 '''
if USE_MMAP:
# initialize MMAP
mmap_data = setup_mmap(MMAP_FILENAME)
if not mmap_data:
print "Could not read mmap-file. Aborting."
sys.exit(1)
if not os.path.isfile(MMAP_stats_file):
create_mmap_file_on_disk(MMAP_stats_file)
f = open(MMAP_stats_file, "r+b")
mmap_stats = mmap.mmap(f.fileno(), 0)
vbos, colors, x_shift_current = setup_vbo_stuff(NBR_PANELS, NBR_VBOS_PER_PANEL, NBR_DATA_POINTS_PER_VBO)
# TODO: clarify difference between 'x_shift_single_buffer_current' and 'x_shift_current'
x_shift_single_buffer_current = x_shift_current
plot_queue = setup_plotting_queue()
info_str = "%d panels; %d segments per panel; %d number of points per segment." % ( NBR_PANELS, NBR_VBOS_PER_PANEL, NBR_DATA_POINTS_PER_VBO )
print info_str
# setup window
window = pyglet.window.Window(width=WIN_WIDTH_DEFAULT, height=WIN_HEIGHT_DEFAULT, resizable=True)
window.set_caption(info_str)
# initialize FPS display
fps_display = pyglet.clock.ClockDisplay(interval=0.125, format='FPS %(fps).2f')
''' END setup part 1 '''
''' BEGIN periodic event function - check whether we need to replace a VBO '''
# variables needed while updating the VBOs
pointer_shift = 0
pointer_offset = 0
nbr_points_rendered_in_last_vbo = 0
c_vbo = 0
# definitions needed for dequeueing of plot buffers.
NBR_BUFFERS_TO_UPDATE = 1
MIN_NBR_BUFFERS_NECESSARY_FOR_UPDATE = NBR_BUFFERS_TO_UPDATE
@do_profile(DO_PROFILE)
def update(dt):
# ~ 24 ms, generating new data set for each panel
# ~ 6 ms, generating only one new data set and re-using it.
# ~ 0.4 ms, without 'generate_line_segment' and 'overwrite_line_segment_on_GPU'
if not DO_ANIMATE:
# quit right away if animation is disabled. Ideally we would want to still
# compute at least the next set of 'tmp_points', however we need to make sure that
# 'x_shift_current' doesn't get updated more than once (or 'SHIFT_X_BY' is updated
# accordingly).
return
if DO_NEXT_STEP:
raw_input('please press key to continue ')
if DEBUG:
print "update_counter in 'update()' %d " % update_counter
t0 = time()
''' START 'DATA MANAGEMENT' '''
# pick up new data from mmap or other system (i.e. generated)
new_data, new_data_is_empty, nbr_buffers_per_mmap_file = request_new_data()
# don't add empty data to the queue
# don't use 'NBR_INDEPENDENT_CHANNELS' here, because we might be skipping this channel
if sum(new_data_is_empty) != len(new_data):
append_data_to_plot_queue(new_data, nbr_buffers_per_mmap_file)
''' END 'DATA MANAGEMENT' '''
''' START 'dequeue enough buffers and prepare them for plotting' '''
# don't purge entire queue - keep at least one element in queue.
if len(plot_queue) < MIN_NBR_BUFFERS_NECESSARY_FOR_UPDATE:
return
# dequeue buffers and update VBOs
update_vbo_with_data_from_plot_queue()
''' END 'dequeue enough buffers and prepare them for plotting' '''
# indicate that view needs to be shifted
global SHIFT_VIEW
SHIFT_VIEW = True
if DEBUG:
t1 = time()
print 'update() takes %f seconds' %(t1-t0)
pyglet.clock.schedule_interval(update, 1.0/CALL_UPDATE_X_TIMES_PER_SECOND)
''' END periodic event function '''
from pyglet.window import key
KEYPRESS_STEPSIZE = 10
zoom = 0
currentScale = 1
@window.event
@do_profile(DO_PROFILE)
def on_key_press(symbol, modifiers):
global DO_ANIMATE, DO_NEXT_STEP, KEYPRESS_STEPSIZE, zoom, currentScale
global x_shift_single_buffer_current
global plot_queue
# turn animation on / off.
if symbol == key.A:
DO_ANIMATE = not DO_ANIMATE
if DO_ANIMATE:
print 'animation on'
else:
print 'animation off'
elif symbol == key.C:
plot_queue = setup_plotting_queue()
print "Cleared Plot-Queue"
elif symbol == key.Q:
print "Plot-Queue size: %d" % (len(plot_queue))
# zero the plot along the x axis. in case of drifting, this should get the
# back onto the screen.
elif symbol == key.Z:
glTranslatef(+x_shift_single_buffer_current, 0.0, 0.0)
fps_display.label.x = fps_display.label.x - x_shift_single_buffer_current
x_shift_single_buffer_current = 0
x_shift_current = 0
elif symbol == key.S:
DO_NEXT_STEP = not DO_NEXT_STEP
elif symbol == key.LEFT:
glTranslatef(-KEYPRESS_STEPSIZE, 0.0, 0.0)
elif symbol == key.RIGHT:
glTranslatef(KEYPRESS_STEPSIZE, 0.0, 0.0)
elif (symbol == key.PLUS or symbol == key.NUM_ADD):
KEYPRESS_STEPSIZE += 10
print 'step size is now %d ' % KEYPRESS_STEPSIZE
elif (symbol == key.MINUS or symbol == key.NUM_SUBTRACT):
KEYPRESS_STEPSIZE -= 10
KEYPRESS_STEPSIZE = max(10, KEYPRESS_STEPSIZE)
print 'step size is now %d ' % KEYPRESS_STEPSIZE
else:
print '%s key, %s modifier was pressed' % (symbol, modifiers)
''' zooming
elif symbol == key.Z:
if modifiers == key.MOD_ALT + 16:
#zoom -= 0.5;
#glOrtho(+1.5 + zoom, 1.0 + zoom, +2.0 + zoom, 0.5 + zoom, +1.0, -3.5)
#currentScale -= 0.1
#glScaled(currentScale, currentScale, 1);
elif modifiers == key.MOD_SHIFT + 16:
#zoom += 0.5;
#glOrtho(-1.5 + zoom, 1.0 - zoom, -2.0 + zoom, 0.5 - zoom, -1.0, 3.5)
#currentScale += 0.1
#glScaled(currentScale, currentScale, 1);
'''
''' rotations
elif symbol == key.PAGEDOWN:
# we need to move objects into center, before rotating
#glRotatef(0.5, 1, 0, 0)
# need to move object back to original position
elif symbol == key.PAGEUP:
# we need to move objects into center, before rotating
#glRotatef(-0.5, 1, 0, 0)
# need to move object back to original position
'''
'''
BEGIN 'on_resize' function - can only be defined once 'window' exists
'''
@window.event
@do_profile(DO_PROFILE)
def on_resize(width, height):
global WIN_HEIGHT_current, WIN_WIDTH_current
WIN_HEIGHT_current = height
WIN_WIDTH_current = width
# TODO: currently we only rescale the Y dimension. Add X-Scaling!
if DEBUG:
print "new height %d " %(height)
print "new width %d " %(width)
''' END 'on_resize' function - can only be defined once 'window' exists '''
'''
BEGIN 'draw' function - can only be defined once 'window' exists
The EventLoop will dispatch this event when the window should be redrawn.
This will happen during idle time after any window events and after any
scheduled functions were called.
'''
@window.event
@do_profile(DO_PROFILE)
def on_draw():
# ~ 21ms (test6 was ~260ms)
global SHIFT_VIEW
# clear buffers to preset values
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
# TODO:
# maybe we should move back to the origin and translate from there?
# glLoadIdentity()
# glTranslatef(-x_shift_single_buffer_current/2, 0.0, 0.0)
if SHIFT_VIEW:
#local_shift = (SHIFT_X_BY/CALL_UPDATE_X_TIMES_PER_SECOND)
# TODO: fix 'local_shift', right now we override it to '1'
# 'SHIFT_X_BY' needs to be an integral number, otherwise we get
# artifacts of single points moving up and down between shifts.
local_shift = NBR_BUFFERS_TO_UPDATE * STEPSIZE_X * NBR_DATA_POINTS_PER_BUFFER
#local_shift = 1
glTranslatef(-local_shift, 0.0, 0.0)
# shift location of FPS display by same amount - but in opposite direction
# TODO: this must be because of a different reference point?
fps_display.label.x = fps_display.label.x + local_shift
SHIFT_VIEW = False
if USE_UNIFORM_COLOR:
glColor3f(DEFAULT_COLOR[0], DEFAULT_COLOR[1], DEFAULT_COLOR[2])
height_per_panel = (WIN_HEIGHT_current / NBR_PANELS)
for panel in range(NBR_PANELS):
#glViewport(x, y, w, h)
glViewport(0, panel * height_per_panel, WIN_WIDTH_current, height_per_panel)
# plot each VBO
for segment in range(NBR_VBOS_PER_PANEL):
if not USE_UNIFORM_COLOR:
this_color = colors[panel][segment]
glColor3f(this_color[0], this_color[1], this_color[2])
# bind the named buffer object so we can work with it.
glBindBuffer(GL_ARRAY_BUFFER, vbos[panel][segment])
## TODO!
''' hide individual buffers in first VBO so that points disappear
smoothly in the first buffer '''
this_pointer_offset = 0
nbr_points_to_draw = NBR_DATA_POINTS_PER_VBO
if segment == 0:
this_pointer_offset = pointer_offset
nbr_points_to_draw = NBR_DATA_POINTS_PER_VBO - (pointer_offset / BYTES_PER_POINT)
elif segment == NBR_VBOS_PER_PANEL - 1:
# TODO: is 'nbr_points_rendered_in_last_vbo' correct? or are we plotting too few points?
this_pointer_offset = 0
nbr_points_to_draw = nbr_points_rendered_in_last_vbo
# specifies the location and data format of an array of vertex coordinates to use when rendering
glVertexPointer(n_COORDINATES_PER_VERTEX, GL_FLOAT, 0, this_pointer_offset)
# render primitives from array data
glDrawArrays(GL_LINE_STRIP, 0, nbr_points_to_draw)
# update the FPS display.
glViewport(0, 0, WIN_WIDTH_current, WIN_HEIGHT_current)
fps_display.draw()
''' END 'draw' function - can only be defined once 'window' exists '''
''' BEGIN setup part 2 '''
glClearColor(0, 0, 0, 1.0)
# enable VERTEX_ARRAY mode.
glEnableClientState(GL_VERTEX_ARRAY)
# start application event loop
pyglet.app.run()
'''
print "quit counter " + str(on_draw_quit_counter)
print "re-draw counter " + str(on_draw_redraw_counter)
print "update counter " + str(update_counter)
'''
''' END setup part 2 '''
|
|
#!/usr/bin/env python
#
# Copyright 2012 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities for working with threads and ``Futures``.
``Futures`` are a pattern for concurrent programming introduced in
Python 3.2 in the `concurrent.futures` package (this package has also
been backported to older versions of Python and can be installed with
``pip install futures``). Tornado will use `concurrent.futures.Future` if
it is available; otherwise it will use a compatible class defined in this
module.
"""
from __future__ import absolute_import, division, print_function, with_statement
import functools
import sys
from tornado.stack_context import ExceptionStackContext, wrap
from tornado.util import raise_exc_info, ArgReplacer
try:
from concurrent import futures
except ImportError:
futures = None
class ReturnValueIgnoredError(Exception):
pass
class _DummyFuture(object):
def __init__(self):
self._done = False
self._result = None
self._exception = None
self._callbacks = []
def cancel(self):
return False
def cancelled(self):
return False
def running(self):
return not self._done
def done(self):
return self._done
def result(self, timeout=None):
self._check_done()
if self._exception:
raise self._exception
return self._result
def exception(self, timeout=None):
self._check_done()
if self._exception:
return self._exception
else:
return None
def add_done_callback(self, fn):
if self._done:
fn(self)
else:
self._callbacks.append(fn)
def set_result(self, result):
self._result = result
self._set_done()
def set_exception(self, exception):
self._exception = exception
self._set_done()
def _check_done(self):
if not self._done:
raise Exception("DummyFuture does not support blocking for results")
def _set_done(self):
self._done = True
for cb in self._callbacks:
# TODO: error handling
cb(self)
self._callbacks = None
if futures is None:
Future = _DummyFuture
else:
Future = futures.Future
class TracebackFuture(Future):
"""Subclass of `Future` which can store a traceback with
exceptions.
The traceback is automatically available in Python 3, but in the
Python 2 futures backport this information is discarded.
"""
def __init__(self):
super(TracebackFuture, self).__init__()
self.__exc_info = None
def exc_info(self):
return self.__exc_info
def set_exc_info(self, exc_info):
"""Traceback-aware replacement for
`~concurrent.futures.Future.set_exception`.
"""
self.__exc_info = exc_info
self.set_exception(exc_info[1])
def result(self, timeout=None):
if self.__exc_info is not None:
raise_exc_info(self.__exc_info)
else:
return super(TracebackFuture, self).result(timeout=timeout)
class DummyExecutor(object):
def submit(self, fn, *args, **kwargs):
future = TracebackFuture()
try:
future.set_result(fn(*args, **kwargs))
except Exception:
future.set_exc_info(sys.exc_info())
return future
def shutdown(self, wait=True):
pass
dummy_executor = DummyExecutor()
def run_on_executor(fn):
"""Decorator to run a synchronous method asynchronously on an executor.
The decorated method may be called with a ``callback`` keyword
argument and returns a future.
This decorator should be used only on methods of objects with attributes
``executor`` and ``io_loop``.
"""
@functools.wraps(fn)
def wrapper(self, *args, **kwargs):
callback = kwargs.pop("callback", None)
future = self.executor.submit(fn, self, *args, **kwargs)
if callback:
self.io_loop.add_future(future,
lambda future: callback(future.result()))
return future
return wrapper
_NO_RESULT = object()
def return_future(f):
"""Decorator to make a function that returns via callback return a
`Future`.
The wrapped function should take a ``callback`` keyword argument
and invoke it with one argument when it has finished. To signal failure,
the function can simply raise an exception (which will be
captured by the `.StackContext` and passed along to the ``Future``).
From the caller's perspective, the callback argument is optional.
If one is given, it will be invoked when the function is complete
with `Future.result()` as an argument. If the function fails, the
callback will not be run and an exception will be raised into the
surrounding `.StackContext`.
If no callback is given, the caller should use the ``Future`` to
wait for the function to complete (perhaps by yielding it in a
`.gen.engine` function, or passing it to `.IOLoop.add_future`).
Usage::
@return_future
def future_func(arg1, arg2, callback):
# Do stuff (possibly asynchronous)
callback(result)
@gen.engine
def caller(callback):
yield future_func(arg1, arg2)
callback()
Note that ``@return_future`` and ``@gen.engine`` can be applied to the
same function, provided ``@return_future`` appears first. However,
consider using ``@gen.coroutine`` instead of this combination.
"""
replacer = ArgReplacer(f, 'callback')
@functools.wraps(f)
def wrapper(*args, **kwargs):
future = TracebackFuture()
callback, args, kwargs = replacer.replace(
lambda value=_NO_RESULT: future.set_result(value),
args, kwargs)
def handle_error(typ, value, tb):
future.set_exc_info((typ, value, tb))
return True
exc_info = None
with ExceptionStackContext(handle_error):
try:
result = f(*args, **kwargs)
if result is not None:
raise ReturnValueIgnoredError(
"@return_future should not be used with functions "
"that return values")
except:
exc_info = sys.exc_info()
raise
if exc_info is not None:
# If the initial synchronous part of f() raised an exception,
# go ahead and raise it to the caller directly without waiting
# for them to inspect the Future.
raise_exc_info(exc_info)
# If the caller passed in a callback, schedule it to be called
# when the future resolves. It is important that this happens
# just before we return the future, or else we risk confusing
# stack contexts with multiple exceptions (one here with the
# immediate exception, and again when the future resolves and
# the callback triggers its exception by calling future.result()).
if callback is not None:
def run_callback(future):
result = future.result()
if result is _NO_RESULT:
callback()
else:
callback(future.result())
future.add_done_callback(wrap(run_callback))
return future
return wrapper
def chain_future(a, b):
"""Chain two futures together so that when one completes, so does the other.
The result (success or failure) of ``a`` will be copied to ``b``.
"""
def copy(future):
assert future is a
if (isinstance(a, TracebackFuture) and isinstance(b, TracebackFuture)
and a.exc_info() is not None):
b.set_exc_info(a.exc_info())
elif a.exception() is not None:
b.set_exception(a.exception())
else:
b.set_result(a.result())
a.add_done_callback(copy)
|
|
#!/usr/bin/env python3
import unittest
from framework import VppTestCase, VppTestRunner
from scapy.layers.inet import IP, TCP
from scapy.layers.inet6 import IPv6
from scapy.layers.l2 import Ether
from scapy.packet import Raw
class TestMSSClamp(VppTestCase):
""" TCP MSS Clamping Test Case """
def setUp(self):
super(TestMSSClamp, self).setUp()
# create 2 pg interfaces
self.create_pg_interfaces(range(2))
for i in self.pg_interfaces:
i.admin_up()
i.config_ip4()
i.resolve_arp()
i.config_ip6()
i.resolve_ndp()
def tearDown(self):
for i in self.pg_interfaces:
i.unconfig_ip4()
i.unconfig_ip6()
i.admin_down()
super(TestMSSClamp, self).tearDown()
def verify_pkt(self, rx, expected_mss):
# check that the MSS size equals the expected value
# and the IP and TCP checksums are correct
tcp = rx[TCP]
tcp_csum = tcp.chksum
del tcp.chksum
ip_csum = 0
if (rx.haslayer(IP)):
ip_csum = rx[IP].chksum
del rx[IP].chksum
opt = tcp.options
self.assertEqual(opt[0][0], 'MSS')
self.assertEqual(opt[0][1], expected_mss)
# recalculate checksums
rx = rx.__class__(bytes(rx))
tcp = rx[TCP]
self.assertEqual(tcp_csum, tcp.chksum)
if (rx.haslayer(IP)):
self.assertEqual(ip_csum, rx[IP].chksum)
def send_and_verify_ip4(self, src_pg, dst_pg, mss, expected_mss):
# IPv4 TCP packet with the requested MSS option.
# from a host on src_pg to a host on dst_pg.
p = (Ether(dst=src_pg.local_mac,
src=src_pg.remote_mac) /
IP(src=src_pg.remote_ip4,
dst=dst_pg.remote_ip4) /
TCP(sport=1234, dport=1234,
flags="S",
options=[('MSS', (mss)), ('EOL', None)]) /
Raw('\xa5' * 100))
rxs = self.send_and_expect(src_pg, p * 65, dst_pg)
for rx in rxs:
self.verify_pkt(rx, expected_mss)
def send_and_verify_ip6(self, src_pg, dst_pg, mss, expected_mss):
#
# IPv6 TCP packet with the requested MSS option.
# from a host on src_pg to a host on dst_pg.
#
p = (Ether(dst=src_pg.local_mac,
src=src_pg.remote_mac) /
IPv6(src=src_pg.remote_ip6,
dst=dst_pg.remote_ip6) /
TCP(sport=1234, dport=1234,
flags="S",
options=[('MSS', (mss)), ('EOL', None)]) /
Raw('\xa5' * 100))
rxs = self.send_and_expect(src_pg, p * 65, dst_pg)
for rx in rxs:
self.verify_pkt(rx, expected_mss)
def test_tcp_mss_clamping_ip4_tx(self):
""" IP4 TCP MSS Clamping TX """
# enable the TCP MSS clamping feature to lower the MSS to 1424.
self.vapi.mss_clamp_enable_disable(self.pg1.sw_if_index,
ipv4_mss=1424, ipv6_mss=0,
ipv4_direction=3, ipv6_direction=0)
# Verify that the feature is enabled.
rv, reply = self.vapi.mss_clamp_get(sw_if_index=self.pg1.sw_if_index)
self.assertEqual(reply[0].ipv4_mss, 1424)
self.assertEqual(reply[0].ipv4_direction, 3)
# Send syn packets and verify that the MSS value is lowered.
self.send_and_verify_ip4(self.pg0, self.pg1, 1460, 1424)
# check the stats
stats = self.statistics.get_counter(
'/err/tcp-mss-clamping-ip4-out/clamped')
self.assertEqual(sum(stats), 65)
# Send syn packets with small enough MSS values and verify they are
# unchanged.
self.send_and_verify_ip4(self.pg0, self.pg1, 1400, 1400)
# enable the the feature only in TX direction
# and change the max MSS value
self.vapi.mss_clamp_enable_disable(self.pg1.sw_if_index,
ipv4_mss=1420, ipv6_mss=0,
ipv4_direction=2, ipv6_direction=0)
# Send syn packets and verify that the MSS value is lowered.
self.send_and_verify_ip4(self.pg0, self.pg1, 1460, 1420)
# enable the the feature only in RX direction
self.vapi.mss_clamp_enable_disable(self.pg1.sw_if_index,
ipv4_mss=1424, ipv6_mss=0,
ipv4_direction=1, ipv6_direction=0)
# Send the packets again and ensure they are unchanged.
self.send_and_verify_ip4(self.pg0, self.pg1, 1460, 1460)
# disable the feature
self.vapi.mss_clamp_enable_disable(self.pg1.sw_if_index,
ipv4_mss=0, ipv6_mss=0,
ipv4_direction=0, ipv6_direction=0)
# Send the packets again and ensure they are unchanged.
self.send_and_verify_ip4(self.pg0, self.pg1, 1460, 1460)
def test_tcp_mss_clamping_ip4_rx(self):
""" IP4 TCP MSS Clamping RX """
# enable the TCP MSS clamping feature to lower the MSS to 1424.
self.vapi.mss_clamp_enable_disable(self.pg1.sw_if_index,
ipv4_mss=1424, ipv6_mss=0,
ipv4_direction=3, ipv6_direction=0)
# Verify that the feature is enabled.
rv, reply = self.vapi.mss_clamp_get(sw_if_index=self.pg1.sw_if_index)
self.assertEqual(reply[0].ipv4_mss, 1424)
self.assertEqual(reply[0].ipv4_direction, 3)
# Send syn packets and verify that the MSS value is lowered.
self.send_and_verify_ip4(self.pg1, self.pg0, 1460, 1424)
# check the stats
stats = self.statistics.get_counter(
'/err/tcp-mss-clamping-ip4-in/clamped')
self.assertEqual(sum(stats), 65)
# Send syn packets with small enough MSS values and verify they are
# unchanged.
self.send_and_verify_ip4(self.pg1, self.pg0, 1400, 1400)
# enable the the feature only in RX direction
# and change the max MSS value
self.vapi.mss_clamp_enable_disable(self.pg1.sw_if_index,
ipv4_mss=1420, ipv6_mss=0,
ipv4_direction=1, ipv6_direction=0)
# Send syn packets and verify that the MSS value is lowered.
self.send_and_verify_ip4(self.pg1, self.pg0, 1460, 1420)
# enable the the feature only in TX direction
self.vapi.mss_clamp_enable_disable(self.pg1.sw_if_index,
ipv4_mss=1424, ipv6_mss=0,
ipv4_direction=2, ipv6_direction=0)
# Send the packets again and ensure they are unchanged.
self.send_and_verify_ip4(self.pg1, self.pg0, 1460, 1460)
# disable the feature
self.vapi.mss_clamp_enable_disable(self.pg1.sw_if_index,
ipv4_mss=0, ipv6_mss=0,
ipv4_direction=0, ipv6_direction=0)
# Send the packets again and ensure they are unchanged.
self.send_and_verify_ip4(self.pg1, self.pg0, 1460, 1460)
def test_tcp_mss_clamping_ip6_tx(self):
""" IP6 TCP MSS Clamping TX """
# enable the TCP MSS clamping feature to lower the MSS to 1424.
self.vapi.mss_clamp_enable_disable(self.pg1.sw_if_index,
ipv4_mss=0, ipv6_mss=1424,
ipv4_direction=0, ipv6_direction=3)
# Verify that the feature is enabled.
rv, reply = self.vapi.mss_clamp_get(sw_if_index=self.pg1.sw_if_index)
self.assertEqual(reply[0].ipv6_mss, 1424)
self.assertEqual(reply[0].ipv6_direction, 3)
# Send syn packets and verify that the MSS value is lowered.
self.send_and_verify_ip6(self.pg0, self.pg1, 1460, 1424)
# check the stats
stats = self.statistics.get_counter(
'/err/tcp-mss-clamping-ip6-out/clamped')
self.assertEqual(sum(stats), 65)
# Send syn packets with small enough MSS values and verify they are
# unchanged.
self.send_and_verify_ip6(self.pg0, self.pg1, 1400, 1400)
# enable the the feature only in TX direction
# and change the max MSS value
self.vapi.mss_clamp_enable_disable(self.pg1.sw_if_index,
ipv4_mss=0, ipv6_mss=1420,
ipv4_direction=0, ipv6_direction=2)
# Send syn packets and verify that the MSS value is lowered.
self.send_and_verify_ip6(self.pg0, self.pg1, 1460, 1420)
# enable the the feature only in RX direction
self.vapi.mss_clamp_enable_disable(self.pg1.sw_if_index,
ipv4_mss=0, ipv6_mss=1424,
ipv4_direction=0, ipv6_direction=1)
# Send the packets again and ensure they are unchanged.
self.send_and_verify_ip6(self.pg0, self.pg1, 1460, 1460)
# disable the feature
self.vapi.mss_clamp_enable_disable(self.pg1.sw_if_index,
ipv4_mss=0, ipv6_mss=0,
ipv4_direction=0, ipv6_direction=0)
# Send the packets again and ensure they are unchanged.
self.send_and_verify_ip6(self.pg0, self.pg1, 1460, 1460)
def test_tcp_mss_clamping_ip6_rx(self):
""" IP6 TCP MSS Clamping RX """
# enable the TCP MSS clamping feature to lower the MSS to 1424.
self.vapi.mss_clamp_enable_disable(self.pg1.sw_if_index,
ipv4_mss=0, ipv6_mss=1424,
ipv4_direction=0, ipv6_direction=3)
# Verify that the feature is enabled.
rv, reply = self.vapi.mss_clamp_get(sw_if_index=self.pg1.sw_if_index)
self.assertEqual(reply[0].ipv6_mss, 1424)
self.assertEqual(reply[0].ipv6_direction, 3)
# Send syn packets and verify that the MSS value is lowered.
self.send_and_verify_ip6(self.pg1, self.pg0, 1460, 1424)
# check the stats
stats = self.statistics.get_counter(
'/err/tcp-mss-clamping-ip6-in/clamped')
self.assertEqual(sum(stats), 65)
# Send syn packets with small enough MSS values and verify they are
# unchanged.
self.send_and_verify_ip6(self.pg1, self.pg0, 1400, 1400)
# enable the the feature only in RX direction
# and change the max MSS value
self.vapi.mss_clamp_enable_disable(self.pg1.sw_if_index,
ipv4_mss=0, ipv6_mss=1420,
ipv4_direction=0, ipv6_direction=1)
# Send syn packets and verify that the MSS value is lowered.
self.send_and_verify_ip6(self.pg1, self.pg0, 1460, 1420)
# enable the the feature only in TX direction
self.vapi.mss_clamp_enable_disable(self.pg1.sw_if_index,
ipv4_mss=0, ipv6_mss=1424,
ipv4_direction=0, ipv6_direction=2)
# Send the packets again and ensure they are unchanged.
self.send_and_verify_ip6(self.pg1, self.pg0, 1460, 1460)
# disable the feature
self.vapi.mss_clamp_enable_disable(self.pg1.sw_if_index,
ipv4_mss=0, ipv6_mss=0,
ipv4_direction=0, ipv6_direction=0)
# Send the packets again and ensure they are unchanged.
self.send_and_verify_ip6(self.pg1, self.pg0, 1460, 1460)
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import pkgutil
import shutil
import tempfile
from xml.dom import minidom
from pants.backend.jvm.targets.java_tests import JavaTests
from pants.backend.jvm.targets.jvm_target import JvmTarget
from pants.backend.project_info.tasks.ide_gen import IdeGen, Project
from pants.backend.python.targets.python_tests import PythonTests
from pants.base.build_environment import get_buildroot
from pants.base.generator import Generator, TemplateData
from pants.base.source_root import SourceRoot
from pants.scm.git import Git
from pants.util.dirutil import safe_mkdir, safe_walk
_TEMPLATE_BASEDIR = 'templates/idea'
_VERSIONS = {
'9': '12', # 9 and 12 are ipr/iml compatible
'10': '12', # 10 and 12 are ipr/iml compatible
'11': '12', # 11 and 12 are ipr/iml compatible
'12': '12'
}
_SCALA_VERSION_DEFAULT = '2.9'
_SCALA_VERSIONS = {
'2.8': 'Scala 2.8',
_SCALA_VERSION_DEFAULT: 'Scala 2.9',
'2.10': 'Scala 2.10',
'2.10-virt': 'Scala 2.10 virtualized'
}
class IdeaGen(IdeGen):
@classmethod
def register_options(cls, register):
super(IdeaGen, cls).register_options(register)
register('--version', choices=sorted(list(_VERSIONS.keys())), default='11',
help='The IntelliJ IDEA version the project config should be generated for.')
register('--merge', action='store_true', default=True,
help='Merge any manual customizations in existing '
'Intellij IDEA configuration. If False, manual customizations '
'will be over-written.')
register('--open', action='store_true', default=True,
help='Attempts to open the generated project in IDEA.')
register('--bash', action='store_true',
help='Adds a bash facet to the generated project configuration.')
register('--scala-language-level',
choices=_SCALA_VERSIONS.keys(), default=_SCALA_VERSION_DEFAULT,
help='Set the scala language level used for IDEA linting.')
register('--scala-maximum-heap-size-mb', type=int, default=512,
help='Sets the maximum heap size (in megabytes) for scalac.')
register('--fsc', action='store_true', default=False,
help='If the project contains any scala targets this specifies the '
'fsc compiler should be enabled.')
register('--java-encoding', default='UTF-8',
help='Sets the file encoding for java files in this project.')
register('--java-maximum-heap-size-mb', type=int, default=512,
help='Sets the maximum heap size (in megabytes) for javac.')
register('--exclude-maven-target', action='store_true', default=False,
help="Exclude 'target' directories for directories containing "
"pom.xml files. These directories contain generated code and"
"copies of files staged for deployment.")
register('--exclude_folders', action='append',
default=[
'.pants.d/compile',
'.pants.d/ivy',
'.pants.d/python',
'.pants.d/resources',
],
help='Adds folders to be excluded from the project configuration.')
register('--annotation-processing-enabled', action='store_true',
help='Tell IntelliJ IDEA to run annotation processors.')
register('--annotation-generated-sources-dir', default='generated', advanced=True,
help='Directory relative to --project-dir to write annotation processor sources.')
register('--annotation-generated-test-sources-dir', default='generated_tests', advanced=True,
help='Directory relative to --project-dir to write annotation processor sources.')
register('--annotation-processor', action='append', advanced=True,
help='Add a Class name of a specific annotation processor to run.')
def __init__(self, *args, **kwargs):
super(IdeaGen, self).__init__(*args, **kwargs)
self.intellij_output_dir = os.path.join(self.gen_project_workdir, 'out')
self.nomerge = not self.get_options().merge
self.open = self.get_options().open
self.bash = self.get_options().bash
self.scala_language_level = _SCALA_VERSIONS.get(
self.get_options().scala_language_level, None)
self.scala_maximum_heap_size = self.get_options().scala_maximum_heap_size_mb
self.fsc = self.get_options().fsc
self.java_encoding = self.get_options().java_encoding
self.java_maximum_heap_size = self.get_options().java_maximum_heap_size_mb
idea_version = _VERSIONS[self.get_options().version]
self.project_template = os.path.join(_TEMPLATE_BASEDIR,
'project-{}.mustache'.format(idea_version))
self.module_template = os.path.join(_TEMPLATE_BASEDIR,
'module-{}.mustache'.format(idea_version))
self.project_filename = os.path.join(self.cwd,
'{}.ipr'.format(self.project_name))
self.module_filename = os.path.join(self.gen_project_workdir,
'{}.iml'.format(self.project_name))
@staticmethod
def _maven_targets_excludes(repo_root):
excludes = []
for (dirpath, dirnames, filenames) in safe_walk(repo_root):
if "pom.xml" in filenames:
excludes.append(os.path.join(os.path.relpath(dirpath, start=repo_root), "target"))
return excludes
@staticmethod
def _sibling_is_test(source_set):
"""Determine if a SourceSet represents a test path.
Non test targets that otherwise live in test target roots (say a java_library), must
be marked as test for IDEA to correctly link the targets with the test code that uses
them. Therefore we check to see if the source root registered to the path or any of its sibling
source roots are defined with a test type.
:param source_set: SourceSet to analyze
:returns: True if the SourceSet represents a path containing tests
"""
def has_test_type(types):
for target_type in types:
# TODO(Eric Ayers) Find a way for a target to identify itself instead of a hard coded list
if target_type in (JavaTests, PythonTests):
return True
return False
if source_set.path:
path = os.path.join(source_set.source_base, source_set.path)
else:
path = source_set.source_base
sibling_paths = SourceRoot.find_siblings_by_path(path)
for sibling_path in sibling_paths:
if has_test_type(SourceRoot.types(sibling_path)):
return True
return False
@property
def annotation_processing_template(self):
return TemplateData(
enabled=self.get_options().annotation_processing_enabled,
rel_source_output_dir=os.path.join('..','..','..',
self.get_options().annotation_generated_sources_dir),
source_output_dir=
os.path.join(self.gen_project_workdir,
self.get_options().annotation_generated_sources_dir),
rel_test_source_output_dir=os.path.join('..','..','..',
self.get_options().annotation_generated_test_sources_dir),
test_source_output_dir=
os.path.join(self.gen_project_workdir,
self.get_options().annotation_generated_test_sources_dir),
processors=[{'class_name' : processor}
for processor in self.get_options().annotation_processor],
)
def generate_project(self, project):
def create_content_root(source_set):
root_relative_path = os.path.join(source_set.source_base, source_set.path) \
if source_set.path else source_set.source_base
if self.get_options().infer_test_from_siblings:
is_test = IdeaGen._sibling_is_test(source_set)
else:
is_test = source_set.is_test
if source_set.resources_only:
if source_set.is_test:
content_type = 'java-test-resource'
else:
content_type = 'java-resource'
else:
content_type = ''
sources = TemplateData(
path=root_relative_path,
package_prefix=source_set.path.replace('/', '.') if source_set.path else None,
is_test=is_test,
content_type=content_type
)
return TemplateData(
path=root_relative_path,
sources=[sources],
exclude_paths=[os.path.join(source_set.source_base, x) for x in source_set.excludes],
)
content_roots = [create_content_root(source_set) for source_set in project.sources]
if project.has_python:
content_roots.extend(create_content_root(source_set) for source_set in project.py_sources)
scala = None
if project.has_scala:
scala = TemplateData(
language_level=self.scala_language_level,
maximum_heap_size=self.scala_maximum_heap_size,
fsc=self.fsc,
compiler_classpath=project.scala_compiler_classpath
)
exclude_folders = []
if self.get_options().exclude_maven_target:
exclude_folders += IdeaGen._maven_targets_excludes(get_buildroot())
exclude_folders += self.get_options().exclude_folders
java_language_level = None
for target in project.targets:
if isinstance(target, JvmTarget):
if java_language_level is None or java_language_level < target.platform.source_level:
java_language_level = target.platform.source_level
if java_language_level is not None:
java_language_level = 'JDK_{0}_{1}'.format(*java_language_level.components[:2])
configured_module = TemplateData(
root_dir=get_buildroot(),
path=self.module_filename,
content_roots=content_roots,
bash=self.bash,
python=project.has_python,
scala=scala,
internal_jars=[cp_entry.jar for cp_entry in project.internal_jars],
internal_source_jars=[cp_entry.source_jar for cp_entry in project.internal_jars
if cp_entry.source_jar],
external_jars=[cp_entry.jar for cp_entry in project.external_jars],
external_javadoc_jars=[cp_entry.javadoc_jar for cp_entry in project.external_jars
if cp_entry.javadoc_jar],
external_source_jars=[cp_entry.source_jar for cp_entry in project.external_jars
if cp_entry.source_jar],
annotation_processing=self.annotation_processing_template,
extra_components=[],
exclude_folders=exclude_folders,
java_language_level=java_language_level,
)
outdir = os.path.abspath(self.intellij_output_dir)
if not os.path.exists(outdir):
os.makedirs(outdir)
configured_project = TemplateData(
root_dir=get_buildroot(),
outdir=outdir,
git_root=Git.detect_worktree(),
modules=[configured_module],
java=TemplateData(
encoding=self.java_encoding,
maximum_heap_size=self.java_maximum_heap_size,
jdk=self.java_jdk,
language_level='JDK_1_{}'.format(self.java_language_level)
),
resource_extensions=list(project.resource_extensions),
scala=scala,
checkstyle_classpath=';'.join(project.checkstyle_classpath),
debug_port=project.debug_port,
annotation_processing=self.annotation_processing_template,
extra_components=[],
)
existing_project_components = None
existing_module_components = None
if not self.nomerge:
# Grab the existing components, which may include customized ones.
existing_project_components = self._parse_xml_component_elements(self.project_filename)
existing_module_components = self._parse_xml_component_elements(self.module_filename)
# Generate (without merging in any extra components).
safe_mkdir(os.path.abspath(self.intellij_output_dir))
ipr = self._generate_to_tempfile(
Generator(pkgutil.get_data(__name__, self.project_template), project=configured_project))
iml = self._generate_to_tempfile(
Generator(pkgutil.get_data(__name__, self.module_template), module=configured_module))
if not self.nomerge:
# Get the names of the components we generated, and then delete the
# generated files. Clunky, but performance is not an issue, and this
# is an easy way to get those component names from the templates.
extra_project_components = self._get_components_to_merge(existing_project_components, ipr)
extra_module_components = self._get_components_to_merge(existing_module_components, iml)
os.remove(ipr)
os.remove(iml)
# Generate again, with the extra components.
ipr = self._generate_to_tempfile(Generator(pkgutil.get_data(__name__, self.project_template),
project=configured_project.extend(extra_components=extra_project_components)))
iml = self._generate_to_tempfile(Generator(pkgutil.get_data(__name__, self.module_template),
module=configured_module.extend(extra_components=extra_module_components)))
self.context.log.info('Generated IntelliJ project in {directory}'
.format(directory=self.gen_project_workdir))
shutil.move(ipr, self.project_filename)
shutil.move(iml, self.module_filename)
return self.project_filename if self.open else None
def _generate_to_tempfile(self, generator):
"""Applies the specified generator to a temp file and returns the path to that file.
We generate into a temp file so that we don't lose any manual customizations on error."""
(output_fd, output_path) = tempfile.mkstemp()
with os.fdopen(output_fd, 'w') as output:
generator.write(output)
return output_path
def _get_resource_extensions(self, project):
resource_extensions = set()
resource_extensions.update(project.resource_extensions)
# TODO(John Sirois): make test resources 1st class in ant build and punch this through to pants
# model
for _, _, files in safe_walk(os.path.join(get_buildroot(), 'tests', 'resources')):
resource_extensions.update(Project.extract_resource_extensions(files))
return resource_extensions
def _parse_xml_component_elements(self, path):
"""Returns a list of pairs (component_name, xml_fragment) where xml_fragment is the xml text of
that <component> in the specified xml file."""
if not os.path.exists(path):
return [] # No existing components.
dom = minidom.parse(path)
# .ipr and .iml files both consist of <component> elements directly under a root element.
return [(x.getAttribute('name'), x.toxml()) for x in dom.getElementsByTagName('component')]
def _get_components_to_merge(self, mergable_components, path):
"""Returns a list of the <component> fragments in mergable_components that are not
superceded by a <component> in the specified xml file.
mergable_components is a list of (name, xml_fragment) pairs."""
# As a convenience, we use _parse_xml_component_elements to get the
# superceding component names, ignoring the generated xml fragments.
# This is fine, since performance is not an issue.
generated_component_names = set(
[name for (name, _) in self._parse_xml_component_elements(path)])
return [x[1] for x in mergable_components if x[0] not in generated_component_names]
|
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta2DeploymentSpec(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'min_ready_seconds': 'int',
'paused': 'bool',
'progress_deadline_seconds': 'int',
'replicas': 'int',
'revision_history_limit': 'int',
'selector': 'V1LabelSelector',
'strategy': 'V1beta2DeploymentStrategy',
'template': 'V1PodTemplateSpec'
}
attribute_map = {
'min_ready_seconds': 'minReadySeconds',
'paused': 'paused',
'progress_deadline_seconds': 'progressDeadlineSeconds',
'replicas': 'replicas',
'revision_history_limit': 'revisionHistoryLimit',
'selector': 'selector',
'strategy': 'strategy',
'template': 'template'
}
def __init__(self, min_ready_seconds=None, paused=None, progress_deadline_seconds=None, replicas=None, revision_history_limit=None, selector=None, strategy=None, template=None):
"""
V1beta2DeploymentSpec - a model defined in Swagger
"""
self._min_ready_seconds = None
self._paused = None
self._progress_deadline_seconds = None
self._replicas = None
self._revision_history_limit = None
self._selector = None
self._strategy = None
self._template = None
self.discriminator = None
if min_ready_seconds is not None:
self.min_ready_seconds = min_ready_seconds
if paused is not None:
self.paused = paused
if progress_deadline_seconds is not None:
self.progress_deadline_seconds = progress_deadline_seconds
if replicas is not None:
self.replicas = replicas
if revision_history_limit is not None:
self.revision_history_limit = revision_history_limit
if selector is not None:
self.selector = selector
if strategy is not None:
self.strategy = strategy
self.template = template
@property
def min_ready_seconds(self):
"""
Gets the min_ready_seconds of this V1beta2DeploymentSpec.
Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)
:return: The min_ready_seconds of this V1beta2DeploymentSpec.
:rtype: int
"""
return self._min_ready_seconds
@min_ready_seconds.setter
def min_ready_seconds(self, min_ready_seconds):
"""
Sets the min_ready_seconds of this V1beta2DeploymentSpec.
Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)
:param min_ready_seconds: The min_ready_seconds of this V1beta2DeploymentSpec.
:type: int
"""
self._min_ready_seconds = min_ready_seconds
@property
def paused(self):
"""
Gets the paused of this V1beta2DeploymentSpec.
Indicates that the deployment is paused.
:return: The paused of this V1beta2DeploymentSpec.
:rtype: bool
"""
return self._paused
@paused.setter
def paused(self, paused):
"""
Sets the paused of this V1beta2DeploymentSpec.
Indicates that the deployment is paused.
:param paused: The paused of this V1beta2DeploymentSpec.
:type: bool
"""
self._paused = paused
@property
def progress_deadline_seconds(self):
"""
Gets the progress_deadline_seconds of this V1beta2DeploymentSpec.
The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s.
:return: The progress_deadline_seconds of this V1beta2DeploymentSpec.
:rtype: int
"""
return self._progress_deadline_seconds
@progress_deadline_seconds.setter
def progress_deadline_seconds(self, progress_deadline_seconds):
"""
Sets the progress_deadline_seconds of this V1beta2DeploymentSpec.
The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s.
:param progress_deadline_seconds: The progress_deadline_seconds of this V1beta2DeploymentSpec.
:type: int
"""
self._progress_deadline_seconds = progress_deadline_seconds
@property
def replicas(self):
"""
Gets the replicas of this V1beta2DeploymentSpec.
Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.
:return: The replicas of this V1beta2DeploymentSpec.
:rtype: int
"""
return self._replicas
@replicas.setter
def replicas(self, replicas):
"""
Sets the replicas of this V1beta2DeploymentSpec.
Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.
:param replicas: The replicas of this V1beta2DeploymentSpec.
:type: int
"""
self._replicas = replicas
@property
def revision_history_limit(self):
"""
Gets the revision_history_limit of this V1beta2DeploymentSpec.
The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.
:return: The revision_history_limit of this V1beta2DeploymentSpec.
:rtype: int
"""
return self._revision_history_limit
@revision_history_limit.setter
def revision_history_limit(self, revision_history_limit):
"""
Sets the revision_history_limit of this V1beta2DeploymentSpec.
The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.
:param revision_history_limit: The revision_history_limit of this V1beta2DeploymentSpec.
:type: int
"""
self._revision_history_limit = revision_history_limit
@property
def selector(self):
"""
Gets the selector of this V1beta2DeploymentSpec.
Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment.
:return: The selector of this V1beta2DeploymentSpec.
:rtype: V1LabelSelector
"""
return self._selector
@selector.setter
def selector(self, selector):
"""
Sets the selector of this V1beta2DeploymentSpec.
Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment.
:param selector: The selector of this V1beta2DeploymentSpec.
:type: V1LabelSelector
"""
self._selector = selector
@property
def strategy(self):
"""
Gets the strategy of this V1beta2DeploymentSpec.
The deployment strategy to use to replace existing pods with new ones.
:return: The strategy of this V1beta2DeploymentSpec.
:rtype: V1beta2DeploymentStrategy
"""
return self._strategy
@strategy.setter
def strategy(self, strategy):
"""
Sets the strategy of this V1beta2DeploymentSpec.
The deployment strategy to use to replace existing pods with new ones.
:param strategy: The strategy of this V1beta2DeploymentSpec.
:type: V1beta2DeploymentStrategy
"""
self._strategy = strategy
@property
def template(self):
"""
Gets the template of this V1beta2DeploymentSpec.
Template describes the pods that will be created.
:return: The template of this V1beta2DeploymentSpec.
:rtype: V1PodTemplateSpec
"""
return self._template
@template.setter
def template(self, template):
"""
Sets the template of this V1beta2DeploymentSpec.
Template describes the pods that will be created.
:param template: The template of this V1beta2DeploymentSpec.
:type: V1PodTemplateSpec
"""
if template is None:
raise ValueError("Invalid value for `template`, must not be `None`")
self._template = template
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1beta2DeploymentSpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
|
#!/usr/bin/python
import sys
import argparse
from argparse import RawTextHelpFormatter, SUPPRESS
#
# by James
# https://github.com/viaMorgoth/Steganize
#
# version: 1.0.1
#
#=======================================
#DEFINE THE GLOBAL VARIABLES OF THIS SCRIPT
#=======================================
#These are signatures that will be used to detect hidden messages
header = '6a68' # equivalent of 'jh' in hex
footer = '686a' # equivalent of 'hj' in hex
space = '20' # equivalent of a space in hex
#=======================================
#DEFINE THE FUNCTIONS OF THIS SCRIPT
#=======================================
def main():
args = get_args()
if args.e:
if args.password:
import_simplecrypt()
print "embedding '%s' into %s with password %s...." % (args.message, args.filename, args.password)
encode(args.message, args.filename, args.password)
else:
print "embedding '%s' into %s...." % (args.message, args.filename)
encode(args.message, args.filename)
elif args.d:
if args.password:
import_simplecrypt()
print "extracting message from %s with password: %s...." % (args.filename, args.password)
decode(args.filename, args.password)
else:
print "extracting message from %s...." % args.filename
decode(args.filename)
def import_simplecrypt():
try:
import simplecrypt
except Exception:
print 'You must install simplecrypt. Run the command \npip install -r requirements.txt'
sys.exit()
def get_args():
"""
Set up the arguments for the command line tool
Either -e or -d is required.
For both -e and -d --message and --filename are required
For -d --password is optional.
"""
parser = argparse.ArgumentParser(description='''
Welcome to Steganize. This program will encode and decode secret messages into jpg files.
**Encode**
steganize.py -e --message 'top secret' --filename inconspicuous.jpg
steganize.py -e --message /top_secret.txt --filename inconspicuous.jpg
steganize.py -e --message 'top_secret.txt' --filename inconspicuous.jpg p@ssw0rd!
**Decode**
steganize.py -d --filename inconspicuous.jpg
steganize.py -d --filename inconspicuous.jpg --password p@ssw0rd!
''', formatter_class=RawTextHelpFormatter, usage=SUPPRESS)
command_group = parser.add_mutually_exclusive_group(required=True)
command_group.add_argument('-e', action='store_true', help='Encode a secret message',)
command_group.add_argument('-d', action='store_true', help='Decode a secret message',)
parser.add_argument('--filename', help='Name of the file from which encoding/decoding takes place', required=True)
parser.add_argument('--message', help='Secret message to encode', required=False)
parser.add_argument('--password', help='Encrypt/decrypt message with PASSWORD', required=False)
return parser.parse_args()
def encode(msg, m_file, passwd=None):
"""This functions encodes the given secret into a destination file.
Args:
msg (str): For encoding commands the message or text file to encode
For decoding commands the file to decode
m_file (str): For encoding commands the file in which the message will be encoded
For decoding commands the password to decrypt the message
Kwargs:
password (str): For encoding commands the password to encrypt the message
"""
secret = get_secret_msg(msg)
#Convert the destination file into hex so that we can measure its free space
with open(m_file, "rb") as dest_file:
destination = dest_file.read()
if passwd is not None:
from simplecrypt import encrypt
secret = encrypt(passwd, secret)
msg_chars = len(secret)
secret = secret.encode('hex')
destination = destination.encode('hex')
#At this point 'secret'(str) and 'destination'(file) are now hex values(str)
#Free space in the destination is currently defined as spaces
#We decide if there is enough blank space to just plug in the secret message
free_space = size_of_free_space(destination)
write_steganized_output_file(free_space, msg_chars, m_file, secret, destination)
def get_secret_msg(msg):
"""
Decide if the user gave us a string or a text file to steganize
Args:
msg (str | file): the string or text file containing the secret message
"""
if msg[-4:] == '.txt':
with open(msg, "r") as secret_file:
secret = secret_file.read().replace('\n', '')
else:
secret = msg
return secret
def write_steganized_output_file(free_space, msg_chars, m_file, secret, destination):
"""
This function takes the secret and writes it into the originally given file
Args:
:param free_space(int): The amount of free space in the given file
:param msg_chars(int): The number of characters of the secret message
:param m_file(str): The name of the original file in which we place our staganized message
:param secret(str): The secret message that we are encoding/encrypting
:param destination(file): The file to which we will write out seganized message
:return:
"""
if free_space < msg_chars:
print 'Your message is too big for the amount of free space in the' \
' given file. Please shorten the message ' \
'or select a file with more free space. '
print 'There is space for ', free_space, ' characters.'
exit()
else:
text_to_replace = space * msg_chars + space * 4 # Add 4 spaces for our header/footer
secret = add_sig(secret)
destination = destination.replace(text_to_replace, secret, 1)
try:
destination = bytearray.fromhex(destination)
except ValueError, e:
print e
destination = destination[:-1]
destination += '0a' # new line in hex
destination = bytearray.fromhex(destination)
f = open('steganized_' + m_file, 'w')
f.write(destination)
f.close()
print m_file + ' successfully steganized!'
def size_of_free_space(m_input):
"""
Determine the amount of free space in a given file in relation to the size of the secret
:param m_input: the hex string value of the file in which we hide the secret
:return: an integer of the amount of free space in the given file
"""
m_max = m_input.count(space)
m_max_free = space * m_max
while True:
if m_max_free in m_input:
break
m_max_free = m_max_free[:-2].strip()
return m_max_free.count(space) - 4 # subtracting a total of 2 hex values to make room for the signature
def add_sig(secret):
"""
This function will add a signature "jh[SECRET]hj" to the secret message. This will be used in decoding.
:param secret: A string of the secret to be encoded
:return: The original secret with a header and footer on concatenated to the beginning and end
"""
return header + secret + footer
def decode(m_file, password=None):
"""This function finds and decodes secret messages in a given file
Args:
m_file (str): For decoding commands the file to decode
Kwargs:
password (str): For decoding commands the password to decrypt the message
"""
#Convert the steganized file into hex so that we can look for the secret
with open(m_file, "rb") as secret_file:
destination = secret_file.read()
secret_blob = destination.encode('hex')
#At this point 'secret_blob' is now a string of hex values
if sig_detected(secret_blob):
secret = simple_carve(secret_blob)
secret = secret.decode('hex')
if password is not None:
from simplecrypt import decrypt
secret = decrypt(password, secret)
print secret
else:
print 'No secret detected in file ' + str(m_file) # TODO use least significant pixel algorithm
def sig_detected(hex):
"""
Detect a signature "jh...hj" in a given hex string
:param hex: a string of hex characters of the file being analysed
:return: boolean value if the signature is detected
"""
if header in hex: # TODO use regex for this function
if footer in hex:
return True
else:
return False
def simple_carve(secret_blob):
"""
Carve a secret message that is in between a header and footer
:param secret_blob: a hex string of the file being analysed
:return: the string found in between the header and footer
"""
try:
start = secret_blob.index(header) + len(header)
end = secret_blob.index(footer, start)
return secret_blob[start:end]
except ValueError, e:
print e
exit()
if __name__ == "__main__":
main()
|
|
"""
This module defines handlers for storing sessions when handles
sessions of users connecting to the server.
There are two similar but separate stores of sessions:
ServerSessionHandler - this stores generic game sessions
for the game. These sessions has no knowledge about
how they are connected to the world.
PortalSessionHandler - this stores sessions created by
twisted protocols. These are dumb connectors that
handle network communication but holds no game info.
"""
import time
from django.conf import settings
from src.commands.cmdhandler import CMD_LOGINSTART
_PLAYERDB = None
# AMP signals
PCONN = chr(1) # portal session connect
PDISCONN = chr(2) # portal session disconnect
PSYNC = chr(3) # portal session sync
SLOGIN = chr(4) # server session login
SDISCONN = chr(5) # server session disconnect
SDISCONNALL = chr(6) # server session disconnect all
SSHUTD = chr(7) # server shutdown
SSYNC = chr(8) # server session sync
# i18n
from django.utils.translation import ugettext as _
SERVERNAME = settings.SERVERNAME
ALLOW_MULTISESSION = settings.ALLOW_MULTISESSION
IDLE_TIMEOUT = settings.IDLE_TIMEOUT
#-----------------------------------------------------------
# SessionHandler base class
#------------------------------------------------------------
class SessionHandler(object):
"""
This handler holds a stack of sessions.
"""
def __init__(self):
"""
Init the handler.
"""
self.sessions = {}
def get_sessions(self, include_unloggedin=False):
"""
Returns the connected session objects.
"""
if include_unloggedin:
return self.sessions.values()
else:
return [session for session in self.sessions.values() if session.logged_in]
def get_session(self, sessid):
"""
Get session by sessid
"""
return self.sessions.get(sessid, None)
def get_all_sync_data(self):
"""
Create a dictionary of sessdata dicts representing all
sessions in store.
"""
return dict((sessid, sess.get_sync_data()) for sessid, sess in self.sessions.items())
#------------------------------------------------------------
# Server-SessionHandler class
#------------------------------------------------------------
class ServerSessionHandler(SessionHandler):
"""
This object holds the stack of sessions active in the game at
any time.
A session register with the handler in two steps, first by
registering itself with the connect() method. This indicates an
non-authenticated session. Whenever the session is authenticated
the session together with the related player is sent to the login()
method.
"""
# AMP communication methods
def __init__(self):
"""
Init the handler.
"""
self.sessions = {}
self.server = None
self.server_data = {"servername":SERVERNAME}
def portal_connect(self, sessid, session):
"""
Called by Portal when a new session has connected.
Creates a new, unlogged-in game session.
"""
self.sessions[sessid] = session
session.execute_cmd(CMD_LOGINSTART)
def portal_disconnect(self, sessid):
"""
Called by Portal when portal reports a closing of a session
from the portal side.
"""
session = self.sessions.get(sessid, None)
if session:
session.disconnect()
del self.sessions[session.sessid]
self.session_count(-1)
def portal_session_sync(self, sesslist):
"""
Syncing all session ids of the portal with the ones of the server. This is instantiated
by the portal when reconnecting.
sesslist is a complete list of (sessid, session) pairs, matching the list on the portal.
if session was logged in, the amp handler will have logged them in before this point.
"""
for sess in self.sessions.values():
# we delete the old session to make sure to catch eventual lingering references.
del sess
for sess in sesslist:
self.sessions[sess.sessid] = sess
sess.at_sync()
def portal_shutdown(self):
"""
Called by server when shutting down the portal.
"""
self.server.amp_protocol.call_remote_PortalAdmin(0,
operation=SSHUTD,
data="")
# server-side access methods
def disconnect(self, session, reason=""):
"""
Called from server side to remove session and inform portal
of this fact.
"""
session = self.sessions.get(session.sessid, None)
if session:
sessid = session.sessid
del self.sessions[sessid]
# inform portal that session should be closed.
self.server.amp_protocol.call_remote_PortalAdmin(sessid,
operation=SDISCONN,
data=reason)
def login(self, session):
"""
Log in the previously unloggedin session and the player we by
now should know is connected to it. After this point we
assume the session to be logged in one way or another.
"""
# prep the session with player/user info
if not ALLOW_MULTISESSION:
# disconnect previous sessions.
self.disconnect_duplicate_sessions(session)
session.logged_in = True
# sync the portal to this session
sessdata = session.get_sync_data()
self.server.amp_protocol.call_remote_PortalAdmin(session.sessid,
operation=SLOGIN,
data=sessdata)
def all_sessions_portal_sync(self):
"""
This is called by the server when it reboots. It syncs all session data
to the portal. Returns a deferred!
"""
sessdata = self.get_all_sync_data()
return self.server.amp_protocol.call_remote_PortalAdmin(0,
operation=SSYNC,
data=sessdata)
def disconnect_all_sessions(self, reason=_("You have been disconnected.")):
"""
Cleanly disconnect all of the connected sessions.
"""
for session in self.sessions:
del session
# tell portal to disconnect all sessions
self.server.amp_protocol.call_remote_PortalAdmin(0,
operation=SDISCONNALL,
data=reason)
def disconnect_duplicate_sessions(self, curr_session, reason = _("Logged in from elsewhere. Disconnecting.") ):
"""
Disconnects any existing sessions with the same game object.
"""
curr_char = curr_session.get_character()
doublet_sessions = [sess for sess in self.sessions.values()
if sess.logged_in
and sess.get_character() == curr_char
and sess != curr_session]
for session in doublet_sessions:
self.disconnect(session, reason)
def validate_sessions(self):
"""
Check all currently connected sessions (logged in and not)
and see if any are dead.
"""
tcurr = time.time()
reason= _("Idle timeout exceeded, disconnecting.")
for session in (session for session in self.sessions.values()
if session.logged_in and IDLE_TIMEOUT > 0
and (tcurr - session.cmd_last) > IDLE_TIMEOUT):
self.disconnect(session, reason=reason)
def player_count(self):
"""
Get the number of connected players (not sessions since a player
may have more than one session connected if ALLOW_MULTISESSION is True)
Only logged-in players are counted here.
"""
return len(set(session.uid for session in self.sessions.values() if session.logged_in))
def sessions_from_player(self, player):
"""
Given a player, return any matching sessions.
"""
uid = player.uid
return [session for session in self.sessions.values() if session.logged_in and session.uid == uid]
def sessions_from_character(self, character):
"""
Given a game character, return any matching sessions.
"""
player = character.player
if player:
return self.sessions_from_player(player)
return None
def announce_all(self, message):
"""
Send message to all connected sessions
"""
for sess in self.sessions.values():
self.data_out(sess, message)
def data_out(self, session, string="", data=""):
"""
Sending data Server -> Portal
"""
self.server.amp_protocol.call_remote_MsgServer2Portal(sessid=session.sessid,
msg=string,
data=data)
def data_in(self, sessid, string="", data=""):
"""
Data Portal -> Server
"""
session = self.sessions.get(sessid, None)
if session:
session.execute_cmd(string)
# ignore 'data' argument for now; this is otherwise the place
# to put custom effects on the server due to data input, e.g.
# from a custom client.
def oob_data_in(self, sessid, data):
"""
OOB (Out-of-band) Data Portal -> Server
"""
session = self.sessions.get(sessid, None)
if session:
session.oob_data_in(data)
def oob_data_out(self, session, data):
"""
OOB (Out-of-band) Data Server -> Portal
"""
self.server.amp_protocol.call_remote_OOBServer2Portal(session.sessid,
data=data)
#------------------------------------------------------------
# Portal-SessionHandler class
#------------------------------------------------------------
class PortalSessionHandler(SessionHandler):
"""
This object holds the sessions connected to the portal at any time.
It is synced with the server's equivalent SessionHandler over the AMP
connection.
Sessions register with the handler using the connect() method. This
will assign a new unique sessionid to the session and send that sessid
to the server using the AMP connection.
"""
def __init__(self):
"""
Init the handler
"""
self.portal = None
self.sessions = {}
self.latest_sessid = 0
self.uptime = time.time()
self.connection_time = 0
def at_server_connection(self):
"""
Called when the Portal establishes connection with the
Server. At this point, the AMP connection is already
established.
"""
self.connection_time = time.time()
def connect(self, session):
"""
Called by protocol at first connect. This adds a not-yet authenticated session
using an ever-increasing counter for sessid.
"""
self.latest_sessid += 1
sessid = self.latest_sessid
session.sessid = sessid
sessdata = session.get_sync_data()
self.sessions[sessid] = session
# sync with server-side
self.portal.amp_protocol.call_remote_ServerAdmin(sessid,
operation=PCONN,
data=sessdata)
def disconnect(self, session):
"""
Called from portal side when the connection is closed from the portal side.
"""
sessid = session.sessid
self.portal.amp_protocol.call_remote_ServerAdmin(sessid,
operation=PDISCONN)
def server_disconnect(self, sessid, reason=""):
"""
Called by server to force a disconnect by sessid
"""
session = self.sessions.get(sessid, None)
if session:
session.disconnect(reason)
if sessid in self.sessions:
# in case sess.disconnect doesn't delete it
del self.sessions[sessid]
del session
def server_disconnect_all(self, reason=""):
"""
Called by server when forcing a clean disconnect for everyone.
"""
for session in self.sessions.values():
session.disconnect(reason)
del session
self.sessions = {}
def count_loggedin(self, include_unloggedin=False):
"""
Count loggedin connections, alternatively count all connections.
"""
return len(self.get_sessions(include_unloggedin=include_unloggedin))
def session_from_suid(self, suid):
"""
Given a session id, retrieve the session (this is primarily
intended to be called by web clients)
"""
return [sess for sess in self.get_sessions(include_unloggedin=True)
if hasattr(sess, 'suid') and sess.suid == suid]
def data_in(self, session, string="", data=""):
"""
Called by portal sessions for relaying data coming
in from the protocol to the server. data is
serialized before passed on.
"""
#print "portal_data_in:", string
self.portal.amp_protocol.call_remote_MsgPortal2Server(session.sessid,
msg=string,
data=data)
def announce_all(self, message):
"""
Send message to all connection sessions
"""
for session in self.sessions.values():
session.data_out(message)
def data_out(self, sessid, string="", data=""):
"""
Called by server for having the portal relay messages and data
to the correct session protocol.
"""
session = self.sessions.get(sessid, None)
if session:
session.data_out(string, data=data)
def oob_data_in(self, session, data):
"""
OOB (Out-of-band) data Portal -> Server
"""
print "portal_oob_data_in:", data
self.portal.amp_protocol.call_remote_OOBPortal2Server(session.sessid,
data=data)
def oob_data_out(self, sessid, data):
"""
OOB (Out-of-band) data Server -> Portal
"""
print "portal_oob_data_out:", data
session = self.sessions.get(sessid, None)
if session:
session.oob_data_out(data)
SESSIONS = ServerSessionHandler()
PORTAL_SESSIONS = PortalSessionHandler()
|
|
"""
flask.ext.hmacauth
---------------
This module provides HMAC-based authentication and authorization for
Flask. It lets you work with reuests in a database-independent manner.
initiate the HmacManager with a app and set account ID, signature and timestamp
"""
from flask import current_app, request, abort
from functools import update_wrapper
import hmac
import hashlib
import datetime
import urlparse
#simple macros where x is a request object
GET_TIMESTAMP = lambda x: x.values.get('TIMESTAMP')
GET_ACCOUNT = lambda x: x.values.get('ACCOUNT_ID')
GET_SIGNATURE = lambda x: x.headers.get('X-Auth-Signature')
class HmacManager(object):
"""
This object is used to hold the settings for authenticating requests. Instances of
:class:`HmacManager` are not bound to specific apps, so you can create one in the
main body of your code and then bind it to your app in a factory function.
"""
def __init__(self, account_broker, app=None, account_id=GET_ACCOUNT, signature=GET_SIGNATURE,
timestamp=GET_TIMESTAMP, valid_time=5, digest=hashlib.sha1):
"""
:param app Flask application container
:param account_broker AccountBroker object
:param account_id :type callable that takes a request object and :returns the Account ID (default
ACCOUNT_ID parameter in the query string or POST body)
:param signature :type callable that takes a request object and :returns the signature value (default
X-Auth-Signature header)
:param timestamp :type callable that takes a request object and :returns the timestamp (default
TIMESTAMP parameter in the query string or POST body)
:param valid_time :type integer, number of seconds a timestamp remains valid (default 20)
:param digest hashlib hash :type to be used in the signature (default sha1)
"""
self._account_id = account_id
self._signature = signature
self._timestamp = timestamp
self._account_broker = account_broker
self._valid_time = valid_time
self._digest = digest
if app is not None:
self.init_app(app)
def init_app(self, app):
app.hmac_manager = self
def is_authorized(self, request_obj, required_rights):
try:
timestamp = self._timestamp(request_obj)
assert timestamp is not None
except:
#TODO: add logging
return False
ts = datetime.datetime.fromtimestamp(float(timestamp))
#is the timestamp valid?
if ts < datetime.datetime.now()-datetime.timedelta(seconds=self._valid_time) \
or ts > datetime.datetime.now():
#TODO: add logging
return False
#do we have an account ID in the request?
try:
account_id = self._account_id(request_obj)
except:
#TODO: add logging
return False
#do we have a secret and rights for this account?
#implicitly, does this account exist?
secret = self._account_broker.get_secret(account_id)
if secret is None:
#TODO: add logging
return False
#Is the account active, valid, etc?
if not self._account_broker.is_active(account_id):
#TODO: add logging
return False
#hash the request URL and Body
hasher = hmac.new(secret, digestmod=self._digest)
#TODO: do we need encode() here?
url = urlparse.urlparse(request.url.encode(request.charset or 'utf-8'))
#TODO: hacky. what about POSTs without a query string?
hasher.update(url.path + "?" + url.query)
if request.method == "POST":
#TODO: check request length before calling get_data() to avoid memory exaustion issues
#see http://werkzeug.pocoo.org/docs/0.9/wrappers/#werkzeug.wrappers.BaseRequest.get_data
#and http://stackoverflow.com/questions/10999990/get-raw-post-body-in-python-flask-regardless-of-content-type-header
#these parameters should be the default, but just in case things change...
body = request.get_data(cache=True,as_text=False, parse_form_data=False)
hasher.update(body)
calculated_hash = hasher.hexdigest()
try:
sent_hash = self._signature(request_obj)
except:
#TODO: add logging
return False
#compare to what we got as the sig
if not calculated_hash == sent_hash:
#TODO: add logging
return False
#ensure this account has the required rights
#TODO: add logging
if required_rights is not None:
if isinstance(required_rights, list):
return self._account_broker.has_rights(account_id, required_rights)
else:
return self._account_broker.has_rights(account_id, [required_rights])
return True
class DictAccountBroker(object):
"""
Default minimal implementation of an AccountBroker. This implementation maintains
a dict in memory with structure:
{
account_id:
{
secret: "some secret string",
rights: ["someright", "someotherright"],
},
...
}
Your implementation can use whatever backing store you like as long as you provide
the following methods:
get_secret(account_id) - returns a string secret given an account ID. If the account does not exist, returns None
has_rights(account_id, rights) - returns True if account_id has all of the rights in the list
rights, otherwise returns False. Returns False if the account does not exist.
is_active(account_id) - returns True if account_id is active (for whatever definition you want
to define for active), otherwise returns False.
"""
def __init__(self, accounts=None):
if accounts is None:
self.accounts = {}
else:
self.accounts = accounts
#TODO: test
def add_accounts(self, accounts):
self.accounts.update(accounts)
#TODO: test
def del_accounts(self, accounts):
if isinstance(accounts, list):
for i in accounts:
del self.accounts[i]
else:
del self.accounts[accounts]
def get_secret(self, account):
try:
secret = self.accounts[account]["secret"]
except KeyError:
return None
return secret
def has_rights(self, account, rights):
try:
account_rights = self.accounts[account]["rights"]
except KeyError:
return False
if set(rights).issubset(account_rights):
return True
return False
def is_active(self, account):
if account in self.accounts:
return True
return False
class StaticAccountBroker(object):
#TODO: this doesn't work?
GET_ACCOUNT = lambda x: "dummy"
def __init__(self, secret=None):
if secret is None:
raise ValueError("you must provide a value for 'secret'")
self._secret = secret
def is_active(self, account):
return True
def get_secret(self, account):
return self._secret
def has_rights(self, account, rights):
return True
def hmac_auth(rights=None):
def decorator(f):
def wrapped_function(*args, **kwargs):
if current_app.hmac_manager.is_authorized(request, rights):
return f(*args, **kwargs)
else:
#TODO: make this custom, maybe a current_app.hmac_manager.error() call?
abort(403)
return update_wrapper(wrapped_function, f)
return decorator
|
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for Google App Engine
Utilities for making it easier to use OAuth 2.0 on Google App Engine.
"""
import cgi
import json
import logging
import os
import pickle
import threading
import httplib2
from google.appengine.api import app_identity
from google.appengine.api import memcache
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import login_required
from google.appengine.ext.webapp.util import run_wsgi_app
from oauth2client import GOOGLE_AUTH_URI
from oauth2client import GOOGLE_REVOKE_URI
from oauth2client import GOOGLE_TOKEN_URI
from oauth2client import clientsecrets
from oauth2client import util
from oauth2client import xsrfutil
from oauth2client.client import AccessTokenRefreshError
from oauth2client.client import AssertionCredentials
from oauth2client.client import Credentials
from oauth2client.client import Flow
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.client import Storage
# TODO(dhermes): Resolve import issue.
# This is a temporary fix for a Google internal issue.
try:
from google.appengine.ext import ndb
except ImportError:
ndb = None
logger = logging.getLogger(__name__)
OAUTH2CLIENT_NAMESPACE = 'oauth2client#ns'
XSRF_MEMCACHE_ID = 'xsrf_secret_key'
def _safe_html(s):
"""Escape text to make it safe to display.
Args:
s: string, The text to escape.
Returns:
The escaped text as a string.
"""
return cgi.escape(s, quote=1).replace("'", ''')
class InvalidClientSecretsError(Exception):
"""The client_secrets.json file is malformed or missing required fields."""
class InvalidXsrfTokenError(Exception):
"""The XSRF token is invalid or expired."""
class SiteXsrfSecretKey(db.Model):
"""Storage for the sites XSRF secret key.
There will only be one instance stored of this model, the one used for the
site.
"""
secret = db.StringProperty()
if ndb is not None:
class SiteXsrfSecretKeyNDB(ndb.Model):
"""NDB Model for storage for the sites XSRF secret key.
Since this model uses the same kind as SiteXsrfSecretKey, it can be used
interchangeably. This simply provides an NDB model for interacting with the
same data the DB model interacts with.
There should only be one instance stored of this model, the one used for the
site.
"""
secret = ndb.StringProperty()
@classmethod
def _get_kind(cls):
"""Return the kind name for this class."""
return 'SiteXsrfSecretKey'
def _generate_new_xsrf_secret_key():
"""Returns a random XSRF secret key.
"""
return os.urandom(16).encode("hex")
def xsrf_secret_key():
"""Return the secret key for use for XSRF protection.
If the Site entity does not have a secret key, this method will also create
one and persist it.
Returns:
The secret key.
"""
secret = memcache.get(XSRF_MEMCACHE_ID, namespace=OAUTH2CLIENT_NAMESPACE)
if not secret:
# Load the one and only instance of SiteXsrfSecretKey.
model = SiteXsrfSecretKey.get_or_insert(key_name='site')
if not model.secret:
model.secret = _generate_new_xsrf_secret_key()
model.put()
secret = model.secret
memcache.add(XSRF_MEMCACHE_ID, secret, namespace=OAUTH2CLIENT_NAMESPACE)
return str(secret)
class AppAssertionCredentials(AssertionCredentials):
"""Credentials object for App Engine Assertion Grants
This object will allow an App Engine application to identify itself to Google
and other OAuth 2.0 servers that can verify assertions. It can be used for the
purpose of accessing data stored under an account assigned to the App Engine
application itself.
This credential does not require a flow to instantiate because it represents
a two legged flow, and therefore has all of the required information to
generate and refresh its own access tokens.
"""
@util.positional(2)
def __init__(self, scope, **kwargs):
"""Constructor for AppAssertionCredentials
Args:
scope: string or iterable of strings, scope(s) of the credentials being
requested.
**kwargs: optional keyword args, including:
service_account_id: service account id of the application. If None or
unspecified, the default service account for the app is used.
"""
self.scope = util.scopes_to_string(scope)
self._kwargs = kwargs
self.service_account_id = kwargs.get('service_account_id', None)
# Assertion type is no longer used, but still in the parent class signature.
super(AppAssertionCredentials, self).__init__(None)
@classmethod
def from_json(cls, json_data):
data = json.loads(json_data)
return AppAssertionCredentials(data['scope'])
def _refresh(self, http_request):
"""Refreshes the access_token.
Since the underlying App Engine app_identity implementation does its own
caching we can skip all the storage hoops and just to a refresh using the
API.
Args:
http_request: callable, a callable that matches the method signature of
httplib2.Http.request, used to make the refresh request.
Raises:
AccessTokenRefreshError: When the refresh fails.
"""
try:
scopes = self.scope.split()
(token, _) = app_identity.get_access_token(
scopes, service_account_id=self.service_account_id)
except app_identity.Error as e:
raise AccessTokenRefreshError(str(e))
self.access_token = token
@property
def serialization_data(self):
raise NotImplementedError('Cannot serialize credentials for AppEngine.')
def create_scoped_required(self):
return not self.scope
def create_scoped(self, scopes):
return AppAssertionCredentials(scopes, **self._kwargs)
class FlowProperty(db.Property):
"""App Engine datastore Property for Flow.
Utility property that allows easy storage and retrieval of an
oauth2client.Flow"""
# Tell what the user type is.
data_type = Flow
# For writing to datastore.
def get_value_for_datastore(self, model_instance):
flow = super(FlowProperty,
self).get_value_for_datastore(model_instance)
return db.Blob(pickle.dumps(flow))
# For reading from datastore.
def make_value_from_datastore(self, value):
if value is None:
return None
return pickle.loads(value)
def validate(self, value):
if value is not None and not isinstance(value, Flow):
raise db.BadValueError('Property %s must be convertible '
'to a FlowThreeLegged instance (%s)' %
(self.name, value))
return super(FlowProperty, self).validate(value)
def empty(self, value):
return not value
if ndb is not None:
class FlowNDBProperty(ndb.PickleProperty):
"""App Engine NDB datastore Property for Flow.
Serves the same purpose as the DB FlowProperty, but for NDB models. Since
PickleProperty inherits from BlobProperty, the underlying representation of
the data in the datastore will be the same as in the DB case.
Utility property that allows easy storage and retrieval of an
oauth2client.Flow
"""
def _validate(self, value):
"""Validates a value as a proper Flow object.
Args:
value: A value to be set on the property.
Raises:
TypeError if the value is not an instance of Flow.
"""
logger.info('validate: Got type %s', type(value))
if value is not None and not isinstance(value, Flow):
raise TypeError('Property %s must be convertible to a flow '
'instance; received: %s.' % (self._name, value))
class CredentialsProperty(db.Property):
"""App Engine datastore Property for Credentials.
Utility property that allows easy storage and retrieval of
oath2client.Credentials
"""
# Tell what the user type is.
data_type = Credentials
# For writing to datastore.
def get_value_for_datastore(self, model_instance):
logger.info("get: Got type " + str(type(model_instance)))
cred = super(CredentialsProperty,
self).get_value_for_datastore(model_instance)
if cred is None:
cred = ''
else:
cred = cred.to_json()
return db.Blob(cred)
# For reading from datastore.
def make_value_from_datastore(self, value):
logger.info("make: Got type " + str(type(value)))
if value is None:
return None
if len(value) == 0:
return None
try:
credentials = Credentials.new_from_json(value)
except ValueError:
credentials = None
return credentials
def validate(self, value):
value = super(CredentialsProperty, self).validate(value)
logger.info("validate: Got type " + str(type(value)))
if value is not None and not isinstance(value, Credentials):
raise db.BadValueError('Property %s must be convertible '
'to a Credentials instance (%s)' %
(self.name, value))
#if value is not None and not isinstance(value, Credentials):
# return None
return value
if ndb is not None:
# TODO(dhermes): Turn this into a JsonProperty and overhaul the Credentials
# and subclass mechanics to use new_from_dict, to_dict,
# from_dict, etc.
class CredentialsNDBProperty(ndb.BlobProperty):
"""App Engine NDB datastore Property for Credentials.
Serves the same purpose as the DB CredentialsProperty, but for NDB models.
Since CredentialsProperty stores data as a blob and this inherits from
BlobProperty, the data in the datastore will be the same as in the DB case.
Utility property that allows easy storage and retrieval of Credentials and
subclasses.
"""
def _validate(self, value):
"""Validates a value as a proper credentials object.
Args:
value: A value to be set on the property.
Raises:
TypeError if the value is not an instance of Credentials.
"""
logger.info('validate: Got type %s', type(value))
if value is not None and not isinstance(value, Credentials):
raise TypeError('Property %s must be convertible to a credentials '
'instance; received: %s.' % (self._name, value))
def _to_base_type(self, value):
"""Converts our validated value to a JSON serialized string.
Args:
value: A value to be set in the datastore.
Returns:
A JSON serialized version of the credential, else '' if value is None.
"""
if value is None:
return ''
else:
return value.to_json()
def _from_base_type(self, value):
"""Converts our stored JSON string back to the desired type.
Args:
value: A value from the datastore to be converted to the desired type.
Returns:
A deserialized Credentials (or subclass) object, else None if the
value can't be parsed.
"""
if not value:
return None
try:
# Uses the from_json method of the implied class of value
credentials = Credentials.new_from_json(value)
except ValueError:
credentials = None
return credentials
class StorageByKeyName(Storage):
"""Store and retrieve a credential to and from the App Engine datastore.
This Storage helper presumes the Credentials have been stored as a
CredentialsProperty or CredentialsNDBProperty on a datastore model class, and
that entities are stored by key_name.
"""
@util.positional(4)
def __init__(self, model, key_name, property_name, cache=None, user=None):
"""Constructor for Storage.
Args:
model: db.Model or ndb.Model, model class
key_name: string, key name for the entity that has the credentials
property_name: string, name of the property that is a CredentialsProperty
or CredentialsNDBProperty.
cache: memcache, a write-through cache to put in front of the datastore.
If the model you are using is an NDB model, using a cache will be
redundant since the model uses an instance cache and memcache for you.
user: users.User object, optional. Can be used to grab user ID as a
key_name if no key name is specified.
"""
if key_name is None:
if user is None:
raise ValueError('StorageByKeyName called with no key name or user.')
key_name = user.user_id()
self._model = model
self._key_name = key_name
self._property_name = property_name
self._cache = cache
def _is_ndb(self):
"""Determine whether the model of the instance is an NDB model.
Returns:
Boolean indicating whether or not the model is an NDB or DB model.
"""
# issubclass will fail if one of the arguments is not a class, only need
# worry about new-style classes since ndb and db models are new-style
if isinstance(self._model, type):
if ndb is not None and issubclass(self._model, ndb.Model):
return True
elif issubclass(self._model, db.Model):
return False
raise TypeError('Model class not an NDB or DB model: %s.' % (self._model,))
def _get_entity(self):
"""Retrieve entity from datastore.
Uses a different model method for db or ndb models.
Returns:
Instance of the model corresponding to the current storage object
and stored using the key name of the storage object.
"""
if self._is_ndb():
return self._model.get_by_id(self._key_name)
else:
return self._model.get_by_key_name(self._key_name)
def _delete_entity(self):
"""Delete entity from datastore.
Attempts to delete using the key_name stored on the object, whether or not
the given key is in the datastore.
"""
if self._is_ndb():
ndb.Key(self._model, self._key_name).delete()
else:
entity_key = db.Key.from_path(self._model.kind(), self._key_name)
db.delete(entity_key)
@db.non_transactional(allow_existing=True)
def locked_get(self):
"""Retrieve Credential from datastore.
Returns:
oauth2client.Credentials
"""
credentials = None
if self._cache:
json = self._cache.get(self._key_name)
if json:
credentials = Credentials.new_from_json(json)
if credentials is None:
entity = self._get_entity()
if entity is not None:
credentials = getattr(entity, self._property_name)
if self._cache:
self._cache.set(self._key_name, credentials.to_json())
if credentials and hasattr(credentials, 'set_store'):
credentials.set_store(self)
return credentials
@db.non_transactional(allow_existing=True)
def locked_put(self, credentials):
"""Write a Credentials to the datastore.
Args:
credentials: Credentials, the credentials to store.
"""
entity = self._model.get_or_insert(self._key_name)
setattr(entity, self._property_name, credentials)
entity.put()
if self._cache:
self._cache.set(self._key_name, credentials.to_json())
@db.non_transactional(allow_existing=True)
def locked_delete(self):
"""Delete Credential from datastore."""
if self._cache:
self._cache.delete(self._key_name)
self._delete_entity()
class CredentialsModel(db.Model):
"""Storage for OAuth 2.0 Credentials
Storage of the model is keyed by the user.user_id().
"""
credentials = CredentialsProperty()
if ndb is not None:
class CredentialsNDBModel(ndb.Model):
"""NDB Model for storage of OAuth 2.0 Credentials
Since this model uses the same kind as CredentialsModel and has a property
which can serialize and deserialize Credentials correctly, it can be used
interchangeably with a CredentialsModel to access, insert and delete the
same entities. This simply provides an NDB model for interacting with the
same data the DB model interacts with.
Storage of the model is keyed by the user.user_id().
"""
credentials = CredentialsNDBProperty()
@classmethod
def _get_kind(cls):
"""Return the kind name for this class."""
return 'CredentialsModel'
def _build_state_value(request_handler, user):
"""Composes the value for the 'state' parameter.
Packs the current request URI and an XSRF token into an opaque string that
can be passed to the authentication server via the 'state' parameter.
Args:
request_handler: webapp.RequestHandler, The request.
user: google.appengine.api.users.User, The current user.
Returns:
The state value as a string.
"""
uri = request_handler.request.url
token = xsrfutil.generate_token(xsrf_secret_key(), user.user_id(),
action_id=str(uri))
return uri + ':' + token
def _parse_state_value(state, user):
"""Parse the value of the 'state' parameter.
Parses the value and validates the XSRF token in the state parameter.
Args:
state: string, The value of the state parameter.
user: google.appengine.api.users.User, The current user.
Raises:
InvalidXsrfTokenError: if the XSRF token is invalid.
Returns:
The redirect URI.
"""
uri, token = state.rsplit(':', 1)
if not xsrfutil.validate_token(xsrf_secret_key(), token, user.user_id(),
action_id=uri):
raise InvalidXsrfTokenError()
return uri
class OAuth2Decorator(object):
"""Utility for making OAuth 2.0 easier.
Instantiate and then use with oauth_required or oauth_aware
as decorators on webapp.RequestHandler methods.
::
decorator = OAuth2Decorator(
client_id='837...ent.com',
client_secret='Qh...wwI',
scope='https://www.googleapis.com/auth/plus')
class MainHandler(webapp.RequestHandler):
@decorator.oauth_required
def get(self):
http = decorator.http()
# http is authorized with the user's Credentials and can be used
# in API calls
"""
def set_credentials(self, credentials):
self._tls.credentials = credentials
def get_credentials(self):
"""A thread local Credentials object.
Returns:
A client.Credentials object, or None if credentials hasn't been set in
this thread yet, which may happen when calling has_credentials inside
oauth_aware.
"""
return getattr(self._tls, 'credentials', None)
credentials = property(get_credentials, set_credentials)
def set_flow(self, flow):
self._tls.flow = flow
def get_flow(self):
"""A thread local Flow object.
Returns:
A credentials.Flow object, or None if the flow hasn't been set in this
thread yet, which happens in _create_flow() since Flows are created
lazily.
"""
return getattr(self._tls, 'flow', None)
flow = property(get_flow, set_flow)
@util.positional(4)
def __init__(self, client_id, client_secret, scope,
auth_uri=GOOGLE_AUTH_URI,
token_uri=GOOGLE_TOKEN_URI,
revoke_uri=GOOGLE_REVOKE_URI,
user_agent=None,
message=None,
callback_path='/oauth2callback',
token_response_param=None,
_storage_class=StorageByKeyName,
_credentials_class=CredentialsModel,
_credentials_property_name='credentials',
**kwargs):
"""Constructor for OAuth2Decorator
Args:
client_id: string, client identifier.
client_secret: string client secret.
scope: string or iterable of strings, scope(s) of the credentials being
requested.
auth_uri: string, URI for authorization endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
token_uri: string, URI for token endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
revoke_uri: string, URI for revoke endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
user_agent: string, User agent of your application, default to None.
message: Message to display if there are problems with the OAuth 2.0
configuration. The message may contain HTML and will be presented on the
web interface for any method that uses the decorator.
callback_path: string, The absolute path to use as the callback URI. Note
that this must match up with the URI given when registering the
application in the APIs Console.
token_response_param: string. If provided, the full JSON response
to the access token request will be encoded and included in this query
parameter in the callback URI. This is useful with providers (e.g.
wordpress.com) that include extra fields that the client may want.
_storage_class: "Protected" keyword argument not typically provided to
this constructor. A storage class to aid in storing a Credentials object
for a user in the datastore. Defaults to StorageByKeyName.
_credentials_class: "Protected" keyword argument not typically provided to
this constructor. A db or ndb Model class to hold credentials. Defaults
to CredentialsModel.
_credentials_property_name: "Protected" keyword argument not typically
provided to this constructor. A string indicating the name of the field
on the _credentials_class where a Credentials object will be stored.
Defaults to 'credentials'.
**kwargs: dict, Keyword arguments are passed along as kwargs to
the OAuth2WebServerFlow constructor.
"""
self._tls = threading.local()
self.flow = None
self.credentials = None
self._client_id = client_id
self._client_secret = client_secret
self._scope = util.scopes_to_string(scope)
self._auth_uri = auth_uri
self._token_uri = token_uri
self._revoke_uri = revoke_uri
self._user_agent = user_agent
self._kwargs = kwargs
self._message = message
self._in_error = False
self._callback_path = callback_path
self._token_response_param = token_response_param
self._storage_class = _storage_class
self._credentials_class = _credentials_class
self._credentials_property_name = _credentials_property_name
def _display_error_message(self, request_handler):
request_handler.response.out.write('<html><body>')
request_handler.response.out.write(_safe_html(self._message))
request_handler.response.out.write('</body></html>')
def oauth_required(self, method):
"""Decorator that starts the OAuth 2.0 dance.
Starts the OAuth dance for the logged in user if they haven't already
granted access for this application.
Args:
method: callable, to be decorated method of a webapp.RequestHandler
instance.
"""
def check_oauth(request_handler, *args, **kwargs):
if self._in_error:
self._display_error_message(request_handler)
return
user = users.get_current_user()
# Don't use @login_decorator as this could be used in a POST request.
if not user:
request_handler.redirect(users.create_login_url(
request_handler.request.uri))
return
self._create_flow(request_handler)
# Store the request URI in 'state' so we can use it later
self.flow.params['state'] = _build_state_value(request_handler, user)
self.credentials = self._storage_class(
self._credentials_class, None,
self._credentials_property_name, user=user).get()
if not self.has_credentials():
return request_handler.redirect(self.authorize_url())
try:
resp = method(request_handler, *args, **kwargs)
except AccessTokenRefreshError:
return request_handler.redirect(self.authorize_url())
finally:
self.credentials = None
return resp
return check_oauth
def _create_flow(self, request_handler):
"""Create the Flow object.
The Flow is calculated lazily since we don't know where this app is
running until it receives a request, at which point redirect_uri can be
calculated and then the Flow object can be constructed.
Args:
request_handler: webapp.RequestHandler, the request handler.
"""
if self.flow is None:
redirect_uri = request_handler.request.relative_url(
self._callback_path) # Usually /oauth2callback
self.flow = OAuth2WebServerFlow(self._client_id, self._client_secret,
self._scope, redirect_uri=redirect_uri,
user_agent=self._user_agent,
auth_uri=self._auth_uri,
token_uri=self._token_uri,
revoke_uri=self._revoke_uri,
**self._kwargs)
def oauth_aware(self, method):
"""Decorator that sets up for OAuth 2.0 dance, but doesn't do it.
Does all the setup for the OAuth dance, but doesn't initiate it.
This decorator is useful if you want to create a page that knows
whether or not the user has granted access to this application.
From within a method decorated with @oauth_aware the has_credentials()
and authorize_url() methods can be called.
Args:
method: callable, to be decorated method of a webapp.RequestHandler
instance.
"""
def setup_oauth(request_handler, *args, **kwargs):
if self._in_error:
self._display_error_message(request_handler)
return
user = users.get_current_user()
# Don't use @login_decorator as this could be used in a POST request.
if not user:
request_handler.redirect(users.create_login_url(
request_handler.request.uri))
return
self._create_flow(request_handler)
self.flow.params['state'] = _build_state_value(request_handler, user)
self.credentials = self._storage_class(
self._credentials_class, None,
self._credentials_property_name, user=user).get()
try:
resp = method(request_handler, *args, **kwargs)
finally:
self.credentials = None
return resp
return setup_oauth
def has_credentials(self):
"""True if for the logged in user there are valid access Credentials.
Must only be called from with a webapp.RequestHandler subclassed method
that had been decorated with either @oauth_required or @oauth_aware.
"""
return self.credentials is not None and not self.credentials.invalid
def authorize_url(self):
"""Returns the URL to start the OAuth dance.
Must only be called from with a webapp.RequestHandler subclassed method
that had been decorated with either @oauth_required or @oauth_aware.
"""
url = self.flow.step1_get_authorize_url()
return str(url)
def http(self, *args, **kwargs):
"""Returns an authorized http instance.
Must only be called from within an @oauth_required decorated method, or
from within an @oauth_aware decorated method where has_credentials()
returns True.
Args:
*args: Positional arguments passed to httplib2.Http constructor.
**kwargs: Positional arguments passed to httplib2.Http constructor.
"""
return self.credentials.authorize(httplib2.Http(*args, **kwargs))
@property
def callback_path(self):
"""The absolute path where the callback will occur.
Note this is the absolute path, not the absolute URI, that will be
calculated by the decorator at runtime. See callback_handler() for how this
should be used.
Returns:
The callback path as a string.
"""
return self._callback_path
def callback_handler(self):
"""RequestHandler for the OAuth 2.0 redirect callback.
Usage::
app = webapp.WSGIApplication([
('/index', MyIndexHandler),
...,
(decorator.callback_path, decorator.callback_handler())
])
Returns:
A webapp.RequestHandler that handles the redirect back from the
server during the OAuth 2.0 dance.
"""
decorator = self
class OAuth2Handler(webapp.RequestHandler):
"""Handler for the redirect_uri of the OAuth 2.0 dance."""
@login_required
def get(self):
error = self.request.get('error')
if error:
errormsg = self.request.get('error_description', error)
self.response.out.write(
'The authorization request failed: %s' % _safe_html(errormsg))
else:
user = users.get_current_user()
decorator._create_flow(self)
credentials = decorator.flow.step2_exchange(self.request.params)
decorator._storage_class(
decorator._credentials_class, None,
decorator._credentials_property_name, user=user).put(credentials)
redirect_uri = _parse_state_value(str(self.request.get('state')),
user)
if decorator._token_response_param and credentials.token_response:
resp_json = json.dumps(credentials.token_response)
redirect_uri = util._add_query_parameter(
redirect_uri, decorator._token_response_param, resp_json)
self.redirect(redirect_uri)
return OAuth2Handler
def callback_application(self):
"""WSGI application for handling the OAuth 2.0 redirect callback.
If you need finer grained control use `callback_handler` which returns just
the webapp.RequestHandler.
Returns:
A webapp.WSGIApplication that handles the redirect back from the
server during the OAuth 2.0 dance.
"""
return webapp.WSGIApplication([
(self.callback_path, self.callback_handler())
])
class OAuth2DecoratorFromClientSecrets(OAuth2Decorator):
"""An OAuth2Decorator that builds from a clientsecrets file.
Uses a clientsecrets file as the source for all the information when
constructing an OAuth2Decorator.
::
decorator = OAuth2DecoratorFromClientSecrets(
os.path.join(os.path.dirname(__file__), 'client_secrets.json')
scope='https://www.googleapis.com/auth/plus')
class MainHandler(webapp.RequestHandler):
@decorator.oauth_required
def get(self):
http = decorator.http()
# http is authorized with the user's Credentials and can be used
# in API calls
"""
@util.positional(3)
def __init__(self, filename, scope, message=None, cache=None, **kwargs):
"""Constructor
Args:
filename: string, File name of client secrets.
scope: string or iterable of strings, scope(s) of the credentials being
requested.
message: string, A friendly string to display to the user if the
clientsecrets file is missing or invalid. The message may contain HTML
and will be presented on the web interface for any method that uses the
decorator.
cache: An optional cache service client that implements get() and set()
methods. See clientsecrets.loadfile() for details.
**kwargs: dict, Keyword arguments are passed along as kwargs to
the OAuth2WebServerFlow constructor.
"""
client_type, client_info = clientsecrets.loadfile(filename, cache=cache)
if client_type not in [
clientsecrets.TYPE_WEB, clientsecrets.TYPE_INSTALLED]:
raise InvalidClientSecretsError(
"OAuth2Decorator doesn't support this OAuth 2.0 flow.")
constructor_kwargs = dict(kwargs)
constructor_kwargs.update({
'auth_uri': client_info['auth_uri'],
'token_uri': client_info['token_uri'],
'message': message,
})
revoke_uri = client_info.get('revoke_uri')
if revoke_uri is not None:
constructor_kwargs['revoke_uri'] = revoke_uri
super(OAuth2DecoratorFromClientSecrets, self).__init__(
client_info['client_id'], client_info['client_secret'],
scope, **constructor_kwargs)
if message is not None:
self._message = message
else:
self._message = 'Please configure your application for OAuth 2.0.'
@util.positional(2)
def oauth2decorator_from_clientsecrets(filename, scope,
message=None, cache=None):
"""Creates an OAuth2Decorator populated from a clientsecrets file.
Args:
filename: string, File name of client secrets.
scope: string or list of strings, scope(s) of the credentials being
requested.
message: string, A friendly string to display to the user if the
clientsecrets file is missing or invalid. The message may contain HTML and
will be presented on the web interface for any method that uses the
decorator.
cache: An optional cache service client that implements get() and set()
methods. See clientsecrets.loadfile() for details.
Returns: An OAuth2Decorator
"""
return OAuth2DecoratorFromClientSecrets(filename, scope,
message=message, cache=cache)
|
|
"""HTML utilities suitable for global use."""
import re
from urllib.parse import (
parse_qsl, quote, unquote, urlencode, urlsplit, urlunsplit,
)
from django.utils.encoding import force_text
from django.utils.functional import keep_lazy, keep_lazy_text
from django.utils.http import RFC3986_GENDELIMS, RFC3986_SUBDELIMS
from django.utils.safestring import SafeData, SafeText, mark_safe
from django.utils.text import normalize_newlines
from .html_parser import HTMLParseError, HTMLParser
# Configuration for urlize() function.
TRAILING_PUNCTUATION_RE = re.compile(
'^' # Beginning of word
'(.*?)' # The URL in word
'([.,:;!]+)' # Allowed non-wrapping, trailing punctuation
'$' # End of word
)
WRAPPING_PUNCTUATION = [('(', ')'), ('<', '>'), ('[', ']'), ('<', '>'), ('"', '"'), ('\'', '\'')]
# List of possible strings used for bullets in bulleted lists.
DOTS = ['·', '*', '\u2022', '•', '•', '•']
unencoded_ampersands_re = re.compile(r'&(?!(\w+|#\d+);)')
word_split_re = re.compile(r'''([\s<>"']+)''')
simple_url_re = re.compile(r'^https?://\[?\w', re.IGNORECASE)
simple_url_2_re = re.compile(r'^www\.|^(?!http)\w[^@]+\.(com|edu|gov|int|mil|net|org)($|/.*)$', re.IGNORECASE)
simple_email_re = re.compile(r'^\S+@\S+\.\S+$')
@keep_lazy(str, SafeText)
def escape(text):
"""
Returns the given text with ampersands, quotes and angle brackets encoded
for use in HTML.
This function always escapes its input, even if it's already escaped and
marked as such. This may result in double-escaping. If this is a concern,
use conditional_escape() instead.
"""
return mark_safe(
force_text(text).replace('&', '&').replace('<', '<')
.replace('>', '>').replace('"', '"').replace("'", ''')
)
_js_escapes = {
ord('\\'): '\\u005C',
ord('\''): '\\u0027',
ord('"'): '\\u0022',
ord('>'): '\\u003E',
ord('<'): '\\u003C',
ord('&'): '\\u0026',
ord('='): '\\u003D',
ord('-'): '\\u002D',
ord(';'): '\\u003B',
ord('\u2028'): '\\u2028',
ord('\u2029'): '\\u2029'
}
# Escape every ASCII character with a value less than 32.
_js_escapes.update((ord('%c' % z), '\\u%04X' % z) for z in range(32))
@keep_lazy(str, SafeText)
def escapejs(value):
"""Hex encodes characters for use in JavaScript strings."""
return mark_safe(force_text(value).translate(_js_escapes))
def conditional_escape(text):
"""
Similar to escape(), except that it doesn't operate on pre-escaped strings.
This function relies on the __html__ convention used both by Django's
SafeData class and by third-party libraries like markupsafe.
"""
if hasattr(text, '__html__'):
return text.__html__()
else:
return escape(text)
def format_html(format_string, *args, **kwargs):
"""
Similar to str.format, but passes all arguments through conditional_escape,
and calls 'mark_safe' on the result. This function should be used instead
of str.format or % interpolation to build up small HTML fragments.
"""
args_safe = map(conditional_escape, args)
kwargs_safe = {k: conditional_escape(v) for (k, v) in kwargs.items()}
return mark_safe(format_string.format(*args_safe, **kwargs_safe))
def format_html_join(sep, format_string, args_generator):
"""
A wrapper of format_html, for the common case of a group of arguments that
need to be formatted using the same format string, and then joined using
'sep'. 'sep' is also passed through conditional_escape.
'args_generator' should be an iterator that returns the sequence of 'args'
that will be passed to format_html.
Example:
format_html_join('\n', "<li>{} {}</li>", ((u.first_name, u.last_name)
for u in users))
"""
return mark_safe(conditional_escape(sep).join(
format_html(format_string, *tuple(args))
for args in args_generator))
@keep_lazy_text
def linebreaks(value, autoescape=False):
"""Converts newlines into <p> and <br />s."""
value = normalize_newlines(force_text(value))
paras = re.split('\n{2,}', value)
if autoescape:
paras = ['<p>%s</p>' % escape(p).replace('\n', '<br />') for p in paras]
else:
paras = ['<p>%s</p>' % p.replace('\n', '<br />') for p in paras]
return '\n\n'.join(paras)
class MLStripper(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def handle_entityref(self, name):
self.fed.append('&%s;' % name)
def handle_charref(self, name):
self.fed.append('&#%s;' % name)
def get_data(self):
return ''.join(self.fed)
def _strip_once(value):
"""
Internal tag stripping utility used by strip_tags.
"""
s = MLStripper()
try:
s.feed(value)
except HTMLParseError:
return value
try:
s.close()
except HTMLParseError:
return s.get_data() + s.rawdata
else:
return s.get_data()
@keep_lazy_text
def strip_tags(value):
"""Returns the given HTML with all tags stripped."""
# Note: in typical case this loop executes _strip_once once. Loop condition
# is redundant, but helps to reduce number of executions of _strip_once.
value = force_text(value)
while '<' in value and '>' in value:
new_value = _strip_once(value)
if len(new_value) >= len(value):
# _strip_once was not able to detect more tags
break
value = new_value
return value
@keep_lazy_text
def strip_spaces_between_tags(value):
"""Returns the given HTML with spaces between tags removed."""
return re.sub(r'>\s+<', '><', force_text(value))
def smart_urlquote(url):
"Quotes a URL if it isn't already quoted."
def unquote_quote(segment):
segment = unquote(segment)
# Tilde is part of RFC3986 Unreserved Characters
# http://tools.ietf.org/html/rfc3986#section-2.3
# See also http://bugs.python.org/issue16285
segment = quote(segment, safe=RFC3986_SUBDELIMS + RFC3986_GENDELIMS + '~')
return force_text(segment)
# Handle IDN before quoting.
try:
scheme, netloc, path, query, fragment = urlsplit(url)
except ValueError:
# invalid IPv6 URL (normally square brackets in hostname part).
return unquote_quote(url)
try:
netloc = netloc.encode('idna').decode('ascii') # IDN -> ACE
except UnicodeError: # invalid domain part
return unquote_quote(url)
if query:
# Separately unquoting key/value, so as to not mix querystring separators
# included in query values. See #22267.
query_parts = [(unquote(q[0]), unquote(q[1]))
for q in parse_qsl(query, keep_blank_values=True)]
# urlencode will take care of quoting
query = urlencode(query_parts)
path = unquote_quote(path)
fragment = unquote_quote(fragment)
return urlunsplit((scheme, netloc, path, query, fragment))
@keep_lazy_text
def urlize(text, trim_url_limit=None, nofollow=False, autoescape=False):
"""
Converts any URLs in text into clickable links.
Works on http://, https://, www. links, and also on links ending in one of
the original seven gTLDs (.com, .edu, .gov, .int, .mil, .net, and .org).
Links can have trailing punctuation (periods, commas, close-parens) and
leading punctuation (opening parens) and it'll still do the right thing.
If trim_url_limit is not None, the URLs in the link text longer than this
limit will be truncated to trim_url_limit-3 characters and appended with
an ellipsis.
If nofollow is True, the links will get a rel="nofollow" attribute.
If autoescape is True, the link text and URLs will be autoescaped.
"""
safe_input = isinstance(text, SafeData)
def trim_url(x, limit=trim_url_limit):
if limit is None or len(x) <= limit:
return x
return '%s...' % x[:max(0, limit - 3)]
def unescape(text, trail):
"""
If input URL is HTML-escaped, unescape it so as we can safely feed it to
smart_urlquote. For example:
http://example.com?x=1&y=<2> => http://example.com?x=1&y=<2>
"""
unescaped = (text + trail).replace(
'&', '&').replace('<', '<').replace(
'>', '>').replace('"', '"').replace(''', "'")
if trail and unescaped.endswith(trail):
# Remove trail for unescaped if it was not consumed by unescape
unescaped = unescaped[:-len(trail)]
elif trail == ';':
# Trail was consumed by unescape (as end-of-entity marker), move it to text
text += trail
trail = ''
return text, unescaped, trail
def trim_punctuation(lead, middle, trail):
"""
Trim trailing and wrapping punctuation from `middle`. Return the items
of the new state.
"""
# Continue trimming until middle remains unchanged.
trimmed_something = True
while trimmed_something:
trimmed_something = False
# Trim trailing punctuation.
match = TRAILING_PUNCTUATION_RE.match(middle)
if match:
middle = match.group(1)
trail = match.group(2) + trail
trimmed_something = True
# Trim wrapping punctuation.
for opening, closing in WRAPPING_PUNCTUATION:
if middle.startswith(opening):
middle = middle[len(opening):]
lead += opening
trimmed_something = True
# Keep parentheses at the end only if they're balanced.
if (middle.endswith(closing) and
middle.count(closing) == middle.count(opening) + 1):
middle = middle[:-len(closing)]
trail = closing + trail
trimmed_something = True
return lead, middle, trail
words = word_split_re.split(force_text(text))
for i, word in enumerate(words):
if '.' in word or '@' in word or ':' in word:
# lead: Current punctuation trimmed from the beginning of the word.
# middle: Current state of the word.
# trail: Current punctuation trimmed from the end of the word.
lead, middle, trail = '', word, ''
# Deal with punctuation.
lead, middle, trail = trim_punctuation(lead, middle, trail)
# Make URL we want to point to.
url = None
nofollow_attr = ' rel="nofollow"' if nofollow else ''
if simple_url_re.match(middle):
middle, middle_unescaped, trail = unescape(middle, trail)
url = smart_urlquote(middle_unescaped)
elif simple_url_2_re.match(middle):
middle, middle_unescaped, trail = unescape(middle, trail)
url = smart_urlquote('http://%s' % middle_unescaped)
elif ':' not in middle and simple_email_re.match(middle):
local, domain = middle.rsplit('@', 1)
try:
domain = domain.encode('idna').decode('ascii')
except UnicodeError:
continue
url = 'mailto:%s@%s' % (local, domain)
nofollow_attr = ''
# Make link.
if url:
trimmed = trim_url(middle)
if autoescape and not safe_input:
lead, trail = escape(lead), escape(trail)
trimmed = escape(trimmed)
middle = '<a href="%s"%s>%s</a>' % (escape(url), nofollow_attr, trimmed)
words[i] = mark_safe('%s%s%s' % (lead, middle, trail))
else:
if safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
elif safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
return ''.join(words)
def avoid_wrapping(value):
"""
Avoid text wrapping in the middle of a phrase by adding non-breaking
spaces where there previously were normal spaces.
"""
return value.replace(" ", "\xa0")
def html_safe(klass):
"""
A decorator that defines the __html__ method. This helps non-Django
templates to detect classes whose __str__ methods return SafeText.
"""
if '__html__' in klass.__dict__:
raise ValueError(
"can't apply @html_safe to %s because it defines "
"__html__()." % klass.__name__
)
if '__str__' not in klass.__dict__:
raise ValueError(
"can't apply @html_safe to %s because it doesn't "
"define __str__()." % klass.__name__
)
klass_str = klass.__str__
klass.__str__ = lambda self: mark_safe(klass_str(self))
klass.__html__ = lambda self: str(self)
return klass
|
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# JewelCraft jewelry design toolkit for Blender.
# Copyright (C) 2015-2019 Mikhail Rachinskiy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# ##### END GPL LICENSE BLOCK #####
import bpy
from bpy.props import EnumProperty, FloatProperty
from bpy.types import Operator
from .. import var
from ..lib import asset, dynamic_list
class OBJECT_OT_jewelcraft_gem_add(Operator):
bl_label = "JewelCraft Make Gem"
bl_description = "Add gemstone to the scene"
bl_idname = "object.jewelcraft_gem_add"
bl_options = {"REGISTER", "UNDO"}
cut: EnumProperty(name="Cut", items=dynamic_list.cuts)
stone: EnumProperty(name="Stone", items=dynamic_list.stones)
size: FloatProperty(
name="Size",
default=1.0,
min=0.0001,
step=5,
precision=2,
unit="LENGTH",
)
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
layout.prop(self, "size")
layout.prop(self, "stone")
split = layout.split(factor=0.49)
split.row()
split.template_icon_view(self, "cut", show_labels=True)
def execute(self, context):
scene = context.scene
view_layer = context.view_layer
space_data = context.space_data
stone_name = asset.get_name(self.stone)
cut_name = asset.get_name(self.cut)
color = var.STONE_COLOR.get(self.stone) or self.color
for ob in context.selected_objects:
ob.select_set(False)
imported = asset.asset_import(filepath=var.GEM_ASSET_FILEPATH, ob_name=cut_name)
ob = imported.objects[0]
context.collection.objects.link(ob)
if space_data.local_view:
ob.local_view_set(space_data, True)
ob.scale *= self.size
ob.location = scene.cursor.location
ob.select_set(True)
ob["gem"] = {"cut": self.cut, "stone": self.stone}
asset.add_material(ob, name=stone_name, color=color, is_gem=True)
if context.mode == "EDIT_MESH":
asset.ob_copy_to_faces(ob)
bpy.ops.object.mode_set(mode="OBJECT")
view_layer.objects.active = ob
return {"FINISHED"}
def invoke(self, context, event):
self.color = asset.color_rnd()
wm = context.window_manager
return wm.invoke_props_dialog(self)
class OBJECT_OT_jewelcraft_gem_edit(Operator):
bl_label = "JewelCraft Edit Gem"
bl_description = "Edit selected gems"
bl_idname = "object.jewelcraft_gem_edit"
bl_options = {"REGISTER", "UNDO"}
cut: EnumProperty(name="Cut", items=dynamic_list.cuts, options={"SKIP_SAVE"})
stone: EnumProperty(name="Stone", items=dynamic_list.stones, options={"SKIP_SAVE"})
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
layout.prop(self, "stone")
split = layout.split(factor=0.49)
split.row()
split.template_icon_view(self, "cut", show_labels=True)
def execute(self, context):
obs = context.selected_objects
if self.cut != self.cut_orig:
cut_name = asset.get_name(self.cut)
imported = asset.asset_import(filepath=var.GEM_ASSET_FILEPATH, me_name=cut_name)
me = imported.meshes[0]
for ob in obs:
if "gem" in ob:
size_orig = ob.dimensions[1]
mats_orig = ob.data.materials
ob.data = me.copy()
ob["gem"]["cut"] = self.cut
ob.name = cut_name
ob.scale = (size_orig, size_orig, size_orig)
asset.apply_scale(ob)
for mat in mats_orig:
ob.data.materials.append(mat)
bpy.data.meshes.remove(me)
if self.stone != self.stone_orig:
stone_name = asset.get_name(self.stone)
color = var.STONE_COLOR.get(self.stone) or self.color
for ob in obs:
if "gem" in ob:
if ob.data.users > 1:
ob.data = ob.data.copy()
ob["gem"]["stone"] = self.stone
asset.add_material(ob, name=stone_name, color=color, is_gem=True)
return {"FINISHED"}
def invoke(self, context, event):
if not context.selected_objects or not context.object:
self.report({"ERROR"}, "At least one gem object must be selected")
return {"CANCELLED"}
ob = context.object
if "gem" in ob:
self.cut = ob["gem"]["cut"]
self.stone = ob["gem"]["stone"]
self.stone_orig = self.stone
self.cut_orig = self.cut
self.color = asset.color_rnd()
wm = context.window_manager
return wm.invoke_props_popup(self, event)
class OBJECT_OT_jewelcraft_gem_id_add(Operator):
bl_label = "JewelCraft Add Gem ID"
bl_description = "Add gem identifiers to selected objects"
bl_idname = "object.jewelcraft_gem_id_add"
bl_options = {"REGISTER", "UNDO"}
cut: EnumProperty(name="Cut", items=dynamic_list.cuts)
stone: EnumProperty(name="Stone", items=dynamic_list.stones)
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
layout.prop(self, "stone")
split = layout.split(factor=0.49)
split.row()
split.template_icon_view(self, "cut", show_labels=True)
def execute(self, context):
for ob in context.selected_objects:
if ob.type == "MESH":
ob["gem"] = {"cut": self.cut, "stone": self.stone}
return {"FINISHED"}
def invoke(self, context, event):
wm = context.window_manager
return wm.invoke_props_dialog(self)
class OBJECT_OT_jewelcraft_gem_id_convert_deprecated(Operator):
bl_label = "JewelCraft Convert Deprecated Gem IDs"
bl_description = "Convert deprecated gem identifiers to compatible for all objects in the scene"
bl_idname = "object.jewelcraft_gem_id_convert_deprecated"
bl_options = {"REGISTER", "UNDO"}
def execute(self, context):
obs = context.scene.objects
for ob in obs:
if ob.type == "MESH" and "gem" in ob.data:
if "gem" not in ob:
ob["gem"] = {}
for k, v in ob.data["gem"].items():
if k.lower() == "cut":
ob["gem"]["cut"] = v
elif k.lower() == "type":
ob["gem"]["stone"] = v
del ob.data["gem"]
if ob.data.users > 1:
for link in obs:
if link.data is ob.data:
link["gem"] = ob["gem"]
return {"FINISHED"}
def invoke(self, context, event):
wm = context.window_manager
return wm.invoke_confirm(self, event)
|
|
import requests
from django import forms
from django.utils.translation import ugettext, ugettext_lazy as _
from django_comments.signals import comment_was_posted
from django_comments.forms import CommentSecurityForm
from django_comments.models import Comment
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import force_text
from django.utils import timezone
from django.contrib.auth.models import User
from mezzanine.core.forms import Html5Mixin
from mezzanine.generic.models import ThreadedComment, Rating
from mezzanine.utils.views import ip_for_request
from mezzanine.utils.email import split_addresses, send_mail_template
from mezzanine.utils.cache import add_cache_bypass
from mezzanine.conf import settings
from .models import UserProfile
from hs_core.hydroshare.users import create_account
from hs_core.templatetags.hydroshare_tags import best_name
from hs_core.models import Party
from hydroshare import settings as hydroshare_settings
COMMENT_MAX_LENGTH = getattr(settings, 'COMMENT_MAX_LENGTH', 3000)
# This form.py is added by Hong Yi for customizing comments in HydroShare
# as part of effort to address issue https://github.com/hydroshare/hydroshare/issues/186
# In particular, we want to remove name, email, and url fields and
# want to link comments to user profile
class CommentDetailsForm(CommentSecurityForm):
"""
Handles the specific details of the comment (name, comment, etc.).
"""
comment = forms.CharField(label=_('Comment'), widget=forms.Textarea,
max_length=COMMENT_MAX_LENGTH)
def get_comment_object(self):
"""
Return a new (unsaved) comment object based on the information in this
form. Assumes that the form is already validated and will throw a
ValueError if not.
Does not set any of the fields that would come from a Request object
(i.e. ``user`` or ``ip_address``).
"""
if not self.is_valid():
raise ValueError("get_comment_object may only be called on valid forms")
CommentModel = self.get_comment_model()
new = CommentModel(**self.get_comment_create_data())
new = self.check_for_duplicate_comment(new)
return new
def get_comment_model(self):
"""
Get the comment model to create with this form. Subclasses in custom
comment apps should override this, get_comment_create_data, and perhaps
check_for_duplicate_comment to provide custom comment models.
"""
return Comment
def get_comment_create_data(self):
"""
Returns the dict of data to be used to create a comment. Subclasses in
custom comment apps that override get_comment_model can override this
method to add extra fields onto a custom comment model.
"""
return dict(
content_type=ContentType.objects.get_for_model(self.target_object),
object_pk=force_text(self.target_object._get_pk_val()),
comment=self.cleaned_data["comment"],
submit_date=timezone.now(),
site_id=settings.SITE_ID,
is_public=True,
is_removed=False,
)
def check_for_duplicate_comment(self, new):
"""
Check that a submitted comment isn't a duplicate. This might be caused
by someone posting a comment twice. If it is a dup, silently return the *previous* comment.
"""
possible_duplicates = self.get_comment_model()._default_manager.using(
self.target_object._state.db
).filter(
content_type=new.content_type,
object_pk=new.object_pk,
user_name=new.user_name,
user_email=new.user_email,
user_url=new.user_url,
)
for old in possible_duplicates:
if old.submit_date.date() == new.submit_date.date() and old.comment == new.comment:
return old
return new
class CommentForm(CommentDetailsForm):
honeypot = forms.CharField(required=False,
label=_('If you enter anything in this field '
'your comment will be treated as spam'))
def clean_honeypot(self):
"""Check that nothing's been entered into the honeypot."""
value = self.cleaned_data["honeypot"]
if value:
raise forms.ValidationError(self.fields["honeypot"].label)
return value
# added by Hong Yi for customizing THreadedCommentForm
class ThreadedCommentForm(CommentForm, Html5Mixin):
def __init__(self, request, *args, **kwargs):
"""
Set some initial field values from cookies or the logged in
user, and apply some HTML5 attributes to the fields if the
``FORMS_USE_HTML5`` setting is ``True``.
"""
kwargs.setdefault("initial", {})
super(ThreadedCommentForm, self).__init__(*args, **kwargs)
def get_comment_model(self):
"""
Use the custom comment model instead of the built-in one.
"""
return ThreadedComment
def save(self, request):
"""
Saves a new comment and sends any notification emails.
"""
comment = self.get_comment_object()
obj = comment.content_object
if request.user.is_authenticated():
comment.user = request.user
comment.user_name = best_name(comment.user)
comment.by_author = request.user == getattr(obj, "user", None)
comment.ip_address = ip_for_request(request)
comment.replied_to_id = self.data.get("replied_to")
comment.save()
comment_was_posted.send(sender=comment.__class__, comment=comment,
request=request)
notify_emails = split_addresses(settings.COMMENTS_NOTIFICATION_EMAILS)
notify_emails.append(obj.user.email)
reply_to_comment = comment.replied_to
if reply_to_comment is not None:
notify_emails.append(reply_to_comment.user.email)
if notify_emails:
subject = "[HydroShare Support] New comment by {c_name} for: {res_obj}".format(
c_name=comment.user_name, res_obj=str(obj))
context = {
"comment": comment,
"comment_url": add_cache_bypass(comment.get_absolute_url()),
"request": request,
"obj": obj,
}
send_mail_template(subject, "email/comment_notification",
settings.DEFAULT_FROM_EMAIL, notify_emails,
context)
return comment
class RatingForm(CommentSecurityForm):
"""
Form for a rating. Subclasses ``CommentSecurityForm`` to make use
of its easy setup for generic relations.
"""
value = 1
def __init__(self, request, *args, **kwargs):
self.request = request
super(RatingForm, self).__init__(*args, **kwargs)
def clean(self):
"""
Check unauthenticated user's cookie as a light check to
prevent duplicate votes.
"""
bits = (self.data["content_type"], self.data["object_pk"])
self.current = "%s.%s" % bits
request = self.request
self.previous = request.COOKIES.get("mezzanine-rating", "").split(",")
already_rated = self.current in self.previous
if already_rated and not self.request.user.is_authenticated():
raise forms.ValidationError(ugettext("Already rated."))
return 1
def save(self):
"""
Saves a new rating - authenticated users can update the
value if they've previously rated.
"""
user = self.request.user
rating_value = 1
rating_name = self.target_object.get_ratingfield_name()
rating_manager = getattr(self.target_object, rating_name)
if user.is_authenticated():
try:
rating_instance = rating_manager.get(user=user)
except Rating.DoesNotExist:
rating_instance = Rating(user=user, value=rating_value)
rating_manager.add(rating_instance, bulk=False)
else:
if rating_instance.value != int(rating_value):
rating_instance.value = rating_value
rating_instance.save()
else:
# User submitted the same rating as previously,
# which we treat as undoing the rating (like a toggle).
rating_instance.delete()
else:
rating_instance = Rating(value=rating_value)
rating_manager.add(rating_instance, bulk=False)
return rating_instance
class SignupForm(forms.ModelForm):
class Meta:
model = User
exclude = ['last_login', 'date_joined', 'password']
password1 = forms.CharField(label="Password", widget=forms.PasswordInput())
password2 = forms.CharField(label="Confirm Password", widget=forms.PasswordInput())
organization = forms.CharField(required=True)
user_type = forms.CharField(required=True)
country = forms.CharField(label='Country', required=True)
state = forms.CharField(label='State/Province', required=True)
def __init__(self, request, *args, **kwargs):
self.request = request
super(SignupForm, self).__init__(*args, **kwargs)
def verify_captcha(self):
url = hydroshare_settings.RECAPTCHA_VERIFY_URL
values = {
'secret': hydroshare_settings.RECAPTCHA_SECRET_KEY,
'response': self.request.POST.get('g-recaptcha-response')
}
response = requests.post(url, values)
result = response.json()
if(result["success"]):
return (True, [])
return (False, result["error-codes"])
def clean(self):
success, error_codes = self.verify_captcha()
if not success:
self.add_error(None, " ".join(error_codes))
def clean_password2(self):
data = self.cleaned_data
if data["password1"] == data["password2"]:
data["password"] = data["password1"]
return data
else:
raise forms.ValidationError("Password must be confirmed")
def clean_user_type(self):
data = self.cleaned_data['user_type']
if len(data.strip()) == 0:
raise forms.ValidationError("User type is a required field.")
return data
def clean_country(self):
data = self.cleaned_data['country']
if len(data.strip()) == 0:
raise forms.ValidationError("Country is a required field.")
return data
def clean_state(self):
data = self.cleaned_data['state']
if len(data.strip()) == 0:
raise forms.ValidationError("State is a required field.")
return data
def save(self, *args, **kwargs):
data = self.cleaned_data
return create_account(
email=data['email'],
username=data['username'],
organization=data['organization'],
first_name=data['first_name'],
last_name=data['last_name'],
superuser=False,
password=data['password'],
user_type=data['user_type'],
country=data['country'],
state=data['state'],
active=False,
)
class UserForm(forms.ModelForm):
class Meta:
model = User
fields = ['first_name', 'last_name', 'email']
def clean_first_name(self):
data = self.cleaned_data['first_name']
return data.strip()
def clean_last_name(self):
data = self.cleaned_data['last_name']
return data.strip()
def clean_email(self):
data = self.cleaned_data['email']
if len(data.strip()) == 0:
raise forms.ValidationError("Email is a required field.")
return data
class UserProfileForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(UserProfileForm, self).__init__(*args, **kwargs)
self.fields['identifiers'].required = False
class Meta:
model = UserProfile
exclude = ['user', 'public', 'create_irods_user_account']
def clean_organization(self):
data = self.cleaned_data['organization']
if data is None or len(data.strip()) == 0:
raise forms.ValidationError("Organization is a required field.")
return data
def clean_country(self):
data = self.cleaned_data['country']
if data is None or len(data.strip()) == 0:
raise forms.ValidationError("Country is a required field.")
return data
def clean_state(self):
data = self.cleaned_data['state']
if data is None or len(data.strip()) == 0:
raise forms.ValidationError("State is a required field.")
return data
def clean_identifiers(self):
data = self.cleaned_data['identifiers']
return Party.validate_identifiers(data)
|
|
from __future__ import unicode_literals
import datetime
import time
import re
import six
import itertools
from operator import attrgetter
from hashlib import md5
from boto3 import Session
from moto.compat import OrderedDict
from moto.core import BaseBackend, BaseModel, CloudFormationModel
from moto.core.utils import unix_time
from moto.core import ACCOUNT_ID
from .exceptions import (
StreamNotFoundError,
ShardNotFoundError,
ResourceInUseError,
ResourceNotFoundError,
InvalidArgumentError,
)
from .utils import (
compose_shard_iterator,
compose_new_shard_iterator,
decompose_shard_iterator,
)
class Record(BaseModel):
def __init__(self, partition_key, data, sequence_number, explicit_hash_key):
self.partition_key = partition_key
self.data = data
self.sequence_number = sequence_number
self.explicit_hash_key = explicit_hash_key
self.created_at_datetime = datetime.datetime.utcnow()
self.created_at = unix_time(self.created_at_datetime)
def to_json(self):
return {
"Data": self.data,
"PartitionKey": self.partition_key,
"SequenceNumber": str(self.sequence_number),
"ApproximateArrivalTimestamp": self.created_at,
}
class Shard(BaseModel):
def __init__(self, shard_id, starting_hash, ending_hash):
self._shard_id = shard_id
self.starting_hash = starting_hash
self.ending_hash = ending_hash
self.records = OrderedDict()
self.is_open = True
@property
def shard_id(self):
return "shardId-{0}".format(str(self._shard_id).zfill(12))
def get_records(self, last_sequence_id, limit):
last_sequence_id = int(last_sequence_id)
results = []
secs_behind_latest = 0
for sequence_number, record in self.records.items():
if sequence_number > last_sequence_id:
results.append(record)
last_sequence_id = sequence_number
very_last_record = self.records[next(reversed(self.records))]
secs_behind_latest = very_last_record.created_at - record.created_at
if len(results) == limit:
break
millis_behind_latest = int(secs_behind_latest * 1000)
return results, last_sequence_id, millis_behind_latest
def put_record(self, partition_key, data, explicit_hash_key):
# Note: this function is not safe for concurrency
if self.records:
last_sequence_number = self.get_max_sequence_number()
else:
last_sequence_number = 0
sequence_number = last_sequence_number + 1
self.records[sequence_number] = Record(
partition_key, data, sequence_number, explicit_hash_key
)
return sequence_number
def get_min_sequence_number(self):
if self.records:
return list(self.records.keys())[0]
return 0
def get_max_sequence_number(self):
if self.records:
return list(self.records.keys())[-1]
return 0
def get_sequence_number_at(self, at_timestamp):
if not self.records or at_timestamp < list(self.records.values())[0].created_at:
return 0
else:
# find the last item in the list that was created before
# at_timestamp
r = next(
(
r
for r in reversed(self.records.values())
if r.created_at < at_timestamp
),
None,
)
return r.sequence_number
def to_json(self):
response = {
"HashKeyRange": {
"EndingHashKey": str(self.ending_hash),
"StartingHashKey": str(self.starting_hash),
},
"SequenceNumberRange": {
"StartingSequenceNumber": self.get_min_sequence_number(),
},
"ShardId": self.shard_id,
}
if not self.is_open:
response["SequenceNumberRange"][
"EndingSequenceNumber"
] = self.get_max_sequence_number()
return response
class Stream(CloudFormationModel):
def __init__(self, stream_name, shard_count, retention_period_hours, region_name):
self.stream_name = stream_name
self.creation_datetime = datetime.datetime.now()
self.region = region_name
self.account_number = ACCOUNT_ID
self.shards = {}
self.tags = {}
self.status = "ACTIVE"
self.shard_count = None
self.update_shard_count(shard_count)
self.retention_period_hours = retention_period_hours
def update_shard_count(self, shard_count):
# ToDo: This was extracted from init. It's only accurate for new streams.
# It doesn't (yet) try to accurately mimic the more complex re-sharding behavior.
# It makes the stream as if it had been created with this number of shards.
# Logically consistent, but not what AWS does.
self.shard_count = shard_count
step = 2 ** 128 // shard_count
hash_ranges = itertools.chain(
map(lambda i: (i, i * step, (i + 1) * step), range(shard_count - 1)),
[(shard_count - 1, (shard_count - 1) * step, 2 ** 128)],
)
for index, start, end in hash_ranges:
shard = Shard(index, start, end)
self.shards[shard.shard_id] = shard
@property
def arn(self):
return "arn:aws:kinesis:{region}:{account_number}:{stream_name}".format(
region=self.region,
account_number=self.account_number,
stream_name=self.stream_name,
)
def get_shard(self, shard_id):
if shard_id in self.shards:
return self.shards[shard_id]
else:
raise ShardNotFoundError(shard_id)
def get_shard_for_key(self, partition_key, explicit_hash_key):
if not isinstance(partition_key, six.string_types):
raise InvalidArgumentError("partition_key")
if len(partition_key) > 256:
raise InvalidArgumentError("partition_key")
if explicit_hash_key:
if not isinstance(explicit_hash_key, six.string_types):
raise InvalidArgumentError("explicit_hash_key")
key = int(explicit_hash_key)
if key >= 2 ** 128:
raise InvalidArgumentError("explicit_hash_key")
else:
key = int(md5(partition_key.encode("utf-8")).hexdigest(), 16)
for shard in self.shards.values():
if shard.starting_hash <= key < shard.ending_hash:
return shard
def put_record(
self, partition_key, explicit_hash_key, sequence_number_for_ordering, data
):
shard = self.get_shard_for_key(partition_key, explicit_hash_key)
sequence_number = shard.put_record(partition_key, data, explicit_hash_key)
return sequence_number, shard.shard_id
def to_json(self):
return {
"StreamDescription": {
"StreamARN": self.arn,
"StreamName": self.stream_name,
"StreamStatus": self.status,
"HasMoreShards": False,
"RetentionPeriodHours": self.retention_period_hours,
"Shards": [shard.to_json() for shard in self.shards.values()],
}
}
def to_json_summary(self):
return {
"StreamDescriptionSummary": {
"StreamARN": self.arn,
"StreamName": self.stream_name,
"StreamStatus": self.status,
"StreamCreationTimestamp": six.text_type(self.creation_datetime),
"OpenShardCount": self.shard_count,
}
}
@staticmethod
def cloudformation_name_type():
return "Name"
@staticmethod
def cloudformation_type():
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-stream.html
return "AWS::Kinesis::Stream"
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
properties = cloudformation_json.get("Properties", {})
shard_count = properties.get("ShardCount", 1)
retention_period_hours = properties.get("RetentionPeriodHours", resource_name)
tags = {
tag_item["Key"]: tag_item["Value"]
for tag_item in properties.get("Tags", [])
}
backend = kinesis_backends[region_name]
stream = backend.create_stream(
resource_name, shard_count, retention_period_hours, region_name
)
if any(tags):
backend.add_tags_to_stream(stream.stream_name, tags)
return stream
@classmethod
def update_from_cloudformation_json(
cls, original_resource, new_resource_name, cloudformation_json, region_name,
):
properties = cloudformation_json["Properties"]
if Stream.is_replacement_update(properties):
resource_name_property = cls.cloudformation_name_type()
if resource_name_property not in properties:
properties[resource_name_property] = new_resource_name
new_resource = cls.create_from_cloudformation_json(
properties[resource_name_property], cloudformation_json, region_name
)
properties[resource_name_property] = original_resource.name
cls.delete_from_cloudformation_json(
original_resource.name, cloudformation_json, region_name
)
return new_resource
else: # No Interruption
if "ShardCount" in properties:
original_resource.update_shard_count(properties["ShardCount"])
if "RetentionPeriodHours" in properties:
original_resource.retention_period_hours = properties[
"RetentionPeriodHours"
]
if "Tags" in properties:
original_resource.tags = {
tag_item["Key"]: tag_item["Value"]
for tag_item in properties.get("Tags", [])
}
return original_resource
@classmethod
def delete_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
backend = kinesis_backends[region_name]
backend.delete_stream(resource_name)
@staticmethod
def is_replacement_update(properties):
properties_requiring_replacement_update = ["BucketName", "ObjectLockEnabled"]
return any(
[
property_requiring_replacement in properties
for property_requiring_replacement in properties_requiring_replacement_update
]
)
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == "Arn":
return self.arn
raise UnformattedGetAttTemplateException()
@property
def physical_resource_id(self):
return self.stream_name
class FirehoseRecord(BaseModel):
def __init__(self, record_data):
self.record_id = 12345678
self.record_data = record_data
class DeliveryStream(BaseModel):
def __init__(self, stream_name, **stream_kwargs):
self.name = stream_name
self.redshift_username = stream_kwargs.get("redshift_username")
self.redshift_password = stream_kwargs.get("redshift_password")
self.redshift_jdbc_url = stream_kwargs.get("redshift_jdbc_url")
self.redshift_role_arn = stream_kwargs.get("redshift_role_arn")
self.redshift_copy_command = stream_kwargs.get("redshift_copy_command")
self.s3_config = stream_kwargs.get("s3_config")
self.extended_s3_config = stream_kwargs.get("extended_s3_config")
self.redshift_s3_role_arn = stream_kwargs.get("redshift_s3_role_arn")
self.redshift_s3_bucket_arn = stream_kwargs.get("redshift_s3_bucket_arn")
self.redshift_s3_prefix = stream_kwargs.get("redshift_s3_prefix")
self.redshift_s3_compression_format = stream_kwargs.get(
"redshift_s3_compression_format", "UNCOMPRESSED"
)
self.redshift_s3_buffering_hints = stream_kwargs.get(
"redshift_s3_buffering_hints"
)
self.records = []
self.status = "ACTIVE"
self.created_at = datetime.datetime.utcnow()
self.last_updated = datetime.datetime.utcnow()
@property
def arn(self):
return "arn:aws:firehose:us-east-1:{1}:deliverystream/{0}".format(
self.name, ACCOUNT_ID
)
def destinations_to_dict(self):
if self.s3_config:
return [
{"DestinationId": "string", "S3DestinationDescription": self.s3_config}
]
elif self.extended_s3_config:
return [
{
"DestinationId": "string",
"ExtendedS3DestinationDescription": self.extended_s3_config,
}
]
else:
return [
{
"DestinationId": "string",
"RedshiftDestinationDescription": {
"ClusterJDBCURL": self.redshift_jdbc_url,
"CopyCommand": self.redshift_copy_command,
"RoleARN": self.redshift_role_arn,
"S3DestinationDescription": {
"BucketARN": self.redshift_s3_bucket_arn,
"BufferingHints": self.redshift_s3_buffering_hints,
"CompressionFormat": self.redshift_s3_compression_format,
"Prefix": self.redshift_s3_prefix,
"RoleARN": self.redshift_s3_role_arn,
},
"Username": self.redshift_username,
},
}
]
def to_dict(self):
return {
"DeliveryStreamDescription": {
"CreateTimestamp": time.mktime(self.created_at.timetuple()),
"DeliveryStreamARN": self.arn,
"DeliveryStreamName": self.name,
"DeliveryStreamStatus": self.status,
"Destinations": self.destinations_to_dict(),
"HasMoreDestinations": False,
"LastUpdateTimestamp": time.mktime(self.last_updated.timetuple()),
"VersionId": "string",
}
}
def put_record(self, record_data):
record = FirehoseRecord(record_data)
self.records.append(record)
return record
class KinesisBackend(BaseBackend):
def __init__(self):
self.streams = OrderedDict()
self.delivery_streams = {}
def create_stream(
self, stream_name, shard_count, retention_period_hours, region_name
):
if stream_name in self.streams:
raise ResourceInUseError(stream_name)
stream = Stream(stream_name, shard_count, retention_period_hours, region_name)
self.streams[stream_name] = stream
return stream
def describe_stream(self, stream_name):
if stream_name in self.streams:
return self.streams[stream_name]
else:
raise StreamNotFoundError(stream_name)
def describe_stream_summary(self, stream_name):
return self.describe_stream(stream_name)
def list_streams(self):
return self.streams.values()
def delete_stream(self, stream_name):
if stream_name in self.streams:
return self.streams.pop(stream_name)
raise StreamNotFoundError(stream_name)
def get_shard_iterator(
self,
stream_name,
shard_id,
shard_iterator_type,
starting_sequence_number,
at_timestamp,
):
# Validate params
stream = self.describe_stream(stream_name)
shard = stream.get_shard(shard_id)
shard_iterator = compose_new_shard_iterator(
stream_name,
shard,
shard_iterator_type,
starting_sequence_number,
at_timestamp,
)
return shard_iterator
def get_records(self, shard_iterator, limit):
decomposed = decompose_shard_iterator(shard_iterator)
stream_name, shard_id, last_sequence_id = decomposed
stream = self.describe_stream(stream_name)
shard = stream.get_shard(shard_id)
records, last_sequence_id, millis_behind_latest = shard.get_records(
last_sequence_id, limit
)
next_shard_iterator = compose_shard_iterator(
stream_name, shard, last_sequence_id
)
return next_shard_iterator, records, millis_behind_latest
def put_record(
self,
stream_name,
partition_key,
explicit_hash_key,
sequence_number_for_ordering,
data,
):
stream = self.describe_stream(stream_name)
sequence_number, shard_id = stream.put_record(
partition_key, explicit_hash_key, sequence_number_for_ordering, data
)
return sequence_number, shard_id
def put_records(self, stream_name, records):
stream = self.describe_stream(stream_name)
response = {"FailedRecordCount": 0, "Records": []}
for record in records:
partition_key = record.get("PartitionKey")
explicit_hash_key = record.get("ExplicitHashKey")
data = record.get("Data")
sequence_number, shard_id = stream.put_record(
partition_key, explicit_hash_key, None, data
)
response["Records"].append(
{"SequenceNumber": sequence_number, "ShardId": shard_id}
)
return response
def split_shard(self, stream_name, shard_to_split, new_starting_hash_key):
stream = self.describe_stream(stream_name)
if shard_to_split not in stream.shards:
raise ResourceNotFoundError(shard_to_split)
if not re.match(r"0|([1-9]\d{0,38})", new_starting_hash_key):
raise InvalidArgumentError(new_starting_hash_key)
new_starting_hash_key = int(new_starting_hash_key)
shard = stream.shards[shard_to_split]
last_id = sorted(stream.shards.values(), key=attrgetter("_shard_id"))[
-1
]._shard_id
if shard.starting_hash < new_starting_hash_key < shard.ending_hash:
new_shard = Shard(last_id + 1, new_starting_hash_key, shard.ending_hash)
shard.ending_hash = new_starting_hash_key
stream.shards[new_shard.shard_id] = new_shard
else:
raise InvalidArgumentError(new_starting_hash_key)
records = shard.records
shard.records = OrderedDict()
for index in records:
record = records[index]
stream.put_record(
record.partition_key, record.explicit_hash_key, None, record.data
)
def merge_shards(self, stream_name, shard_to_merge, adjacent_shard_to_merge):
stream = self.describe_stream(stream_name)
if shard_to_merge not in stream.shards:
raise ResourceNotFoundError(shard_to_merge)
if adjacent_shard_to_merge not in stream.shards:
raise ResourceNotFoundError(adjacent_shard_to_merge)
shard1 = stream.shards[shard_to_merge]
shard2 = stream.shards[adjacent_shard_to_merge]
if shard1.ending_hash == shard2.starting_hash:
shard1.ending_hash = shard2.ending_hash
elif shard2.ending_hash == shard1.starting_hash:
shard1.starting_hash = shard2.starting_hash
else:
raise InvalidArgumentError(adjacent_shard_to_merge)
del stream.shards[shard2.shard_id]
for index in shard2.records:
record = shard2.records[index]
shard1.put_record(
record.partition_key, record.data, record.explicit_hash_key
)
""" Firehose """
def create_delivery_stream(self, stream_name, **stream_kwargs):
stream = DeliveryStream(stream_name, **stream_kwargs)
self.delivery_streams[stream_name] = stream
return stream
def get_delivery_stream(self, stream_name):
if stream_name in self.delivery_streams:
return self.delivery_streams[stream_name]
else:
raise StreamNotFoundError(stream_name)
def list_delivery_streams(self):
return self.delivery_streams.values()
def delete_delivery_stream(self, stream_name):
self.delivery_streams.pop(stream_name)
def put_firehose_record(self, stream_name, record_data):
stream = self.get_delivery_stream(stream_name)
record = stream.put_record(record_data)
return record
def list_tags_for_stream(
self, stream_name, exclusive_start_tag_key=None, limit=None
):
stream = self.describe_stream(stream_name)
tags = []
result = {"HasMoreTags": False, "Tags": tags}
for key, val in sorted(stream.tags.items(), key=lambda x: x[0]):
if limit and len(tags) >= limit:
result["HasMoreTags"] = True
break
if exclusive_start_tag_key and key < exclusive_start_tag_key:
continue
tags.append({"Key": key, "Value": val})
return result
def add_tags_to_stream(self, stream_name, tags):
stream = self.describe_stream(stream_name)
stream.tags.update(tags)
def remove_tags_from_stream(self, stream_name, tag_keys):
stream = self.describe_stream(stream_name)
for key in tag_keys:
if key in stream.tags:
del stream.tags[key]
kinesis_backends = {}
for region in Session().get_available_regions("kinesis"):
kinesis_backends[region] = KinesisBackend()
for region in Session().get_available_regions("kinesis", partition_name="aws-us-gov"):
kinesis_backends[region] = KinesisBackend()
for region in Session().get_available_regions("kinesis", partition_name="aws-cn"):
kinesis_backends[region] = KinesisBackend()
|
|
"""
Tests for dit.distribution.
"""
import pytest
from dit import Distribution, ScalarDistribution
from dit.distribution import BaseDistribution
from dit.exceptions import ditException, InvalidNormalization
def test_dist_iter1():
outcomes = ['00', '01', '10', '11']
pmf = [1 / 4] * 4
d = Distribution(outcomes, pmf)
for o in d:
assert o in outcomes
for o1, o2 in zip(d, outcomes):
assert o1 == o2
def test_dist_iter2():
outcomes = ['00', '01', '10', '11']
pmf = [1 / 4] * 4
d = Distribution(outcomes, pmf)
for o in reversed(d):
assert o in outcomes
for o1, o2 in zip(reversed(d), reversed(outcomes)):
assert o1 == o2
def test_numerical():
outcomes = ['00', '01', '10', '11']
pmf = [1 / 4] * 4
d = Distribution(outcomes, pmf)
assert d.is_numerical()
@pytest.mark.parametrize('i', range(10))
def test_rand(i):
outcomes = ['00', '01', '10', '11']
pmf = [1 / 4] * 4
d = Distribution(outcomes, pmf)
assert d.rand() in outcomes
def test_to_dict():
outcomes = ['00', '01', '10', '11']
pmf = [1 / 4] * 4
d = Distribution(outcomes, pmf)
dd = d.to_dict()
for o, p in dd.items():
assert d[o] == pytest.approx(p)
def test_validate1():
outcomes = ['00', '01', '10', '11']
pmf = [1 / 4] * 4
d = Distribution(outcomes, pmf)
assert d.validate()
assert BaseDistribution.validate(d)
def test_validate2():
outcomes = ['00', '01', '10', '11']
pmf = [1 / 4] * 4
d = Distribution(outcomes, pmf)
d['00'] = 0
with pytest.raises(InvalidNormalization):
d.validate()
with pytest.raises(InvalidNormalization):
BaseDistribution.validate(d)
def test_zipped1():
outcomes = ['00', '01', '10', '11']
pmf = [1 / 4] * 4
d = Distribution(outcomes, pmf)
zipped = d.zipped(mode='pants')
with pytest.raises(ditException):
list(zipped)
def test_to_string1():
# Basic
outcomes = ['00', '01', '10', '11']
pmf = [1 / 4] * 4
d = Distribution(outcomes, pmf)
s = d.to_string()
s_ = """Class: Distribution
Alphabet: ('0', '1') for all rvs
Base: linear
Outcome Class: str
Outcome Length: 2
RV Names: None
x p(x)
00 0.25
01 0.25
10 0.25
11 0.25"""
assert s == s_
def test_to_string2():
# Test with exact.
outcomes = ['00', '01', '10', '11']
pmf = [1 / 4] * 4
d = Distribution(outcomes, pmf)
s = d.to_string(exact=True)
s_ = """Class: Distribution
Alphabet: ('0', '1') for all rvs
Base: linear
Outcome Class: str
Outcome Length: 2
RV Names: None
x p(x)
00 1/4
01 1/4
10 1/4
11 1/4"""
assert s == s_
def test_to_string3():
# Test printing
outcomes = ['00', '01', '10', '11']
pmf = [1 / 4] * 4
d = Distribution(outcomes, pmf)
s_ = """Class: Distribution
Alphabet: ('0', '1') for all rvs
Base: linear
Outcome Class: str
Outcome Length: 2
RV Names: None
x p(x)
00 0.25
01 0.25
10 0.25
11 0.25"""
# context manager?
import sys
from io import StringIO
sio = StringIO()
try:
old = sys.stdout
sys.stdout = sio
print(d, end='')
finally:
sys.stdout = old
sio.seek(0)
s = sio.read()
assert s == s_
def test_to_string4():
# Basic with marginal
outcomes = ['00', '01', '10', '11']
pmf = [1 / 4] * 4
d = Distribution(outcomes, pmf)
d = d.marginal([0])
s = d.to_string()
s_ = """Class: Distribution
Alphabet: ('0', '1') for all rvs
Base: linear
Outcome Class: str
Outcome Length: 1
RV Names: None
x p(x)
0 0.5
1 0.5"""
assert s == s_
def test_to_string5():
# Basic with marginal and mask
outcomes = ['00', '01', '10', '11']
pmf = [1 / 4] * 4
d = Distribution(outcomes, pmf)
d = d.marginal([0])
s = d.to_string(show_mask=True)
s_ = """Class: Distribution
Alphabet: ('0', '1') for all rvs
Base: linear
Outcome Class: str
Outcome Length: 1 (mask: 2)
RV Names: None
x p(x)
0* 0.5
1* 0.5"""
assert s == s_
def test_to_string6():
# Basic
outcomes = ['00', '01', '10', '11']
pmf = [1 / 4] * 4
d = Distribution(outcomes, pmf)
s = d.to_string(digits=1)
s_ = """Class: Distribution
Alphabet: ('0', '1') for all rvs
Base: linear
Outcome Class: str
Outcome Length: 2
RV Names: None
x p(x)
00 0.2
01 0.2
10 0.2
11 0.2"""
assert s == s_
def test_to_string7():
# Basic
outcomes = ['00', '01', '10', '11']
pmf = [1 / 4] * 4
d = ScalarDistribution(outcomes, pmf)
s = d.to_string()
s_ = """Class: ScalarDistribution
Alphabet: ('00', '01', '10', '11')
Base: linear
x p(x)
00 0.25
01 0.25
10 0.25
11 0.25"""
assert s == s_
def test_to_string8():
outcomes = ['00', '01', '10', '11']
pmf = [1 / 4] * 4
d = Distribution(outcomes, pmf)
d = d.marginal([0])
s = d.to_string(show_mask='!')
s_ = """Class: Distribution
Alphabet: ('0', '1') for all rvs
Base: linear
Outcome Class: str
Outcome Length: 1 (mask: 2)
RV Names: None
x p(x)
0! 0.5
1! 0.5"""
assert s == s_
def test_to_string9():
# Basic
outcomes = ['00', '01', '10', '11']
pmf = [1 / 4] * 4
d = Distribution(outcomes, pmf)
d.set_base(2)
s = d.to_string()
s_ = """Class: Distribution
Alphabet: ('0', '1') for all rvs
Base: 2
Outcome Class: str
Outcome Length: 2
RV Names: None
x log p(x)
00 -2.0
01 -2.0
10 -2.0
11 -2.0"""
assert s == s_
def test_to_string10():
# Basic
d = ScalarDistribution([], sample_space=[0, 1], validate=False)
s = d.to_string()
s_ = """Class: ScalarDistribution
Alphabet: (0, 1)
Base: 2
x log p(x)"""
assert s == s_
def test_prepare_string1():
# Basic
outcomes = ['00', '01', '10', '11']
pmf = [1 / 4] * 4
d = ScalarDistribution(outcomes, pmf)
from dit.distribution import prepare_string
with pytest.raises(ditException):
prepare_string(d, show_mask=True)
def test_prepare_string2():
# Basic
outcomes = ['00', '01', '10', '11']
pmf = [1 / 4] * 4
d = ScalarDistribution(outcomes, pmf)
from dit.distribution import prepare_string
with pytest.raises(ditException):
prepare_string(d, str_outcomes=True)
def test_prepare_string3():
outcomes = [(0, 0), (0, 1), (1, 0), (1, 1)]
pmf = [1 / 4] * 4
d = Distribution(outcomes, pmf)
s_ = """Class: Distribution
Alphabet: (0, 1) for all rvs
Base: linear
Outcome Class: tuple
Outcome Length: 2
RV Names: None
x p(x)
00 0.25
01 0.25
10 0.25
11 0.25"""
s = d.to_string(str_outcomes=True)
assert s == s_
def test_prepare_string4():
class WeirdInt(int):
def __str__(self):
raise Exception
outcomes = [(0, 0), (0, 1), (1, 0), (1, 1)]
outcomes = [(WeirdInt(x), WeirdInt(y)) for (x, y) in outcomes]
pmf = [1 / 4] * 4
d = Distribution(outcomes, pmf)
s_ = """Class: Distribution
Alphabet: (0, 1) for all rvs
Base: linear
Outcome Class: tuple
Outcome Length: 2
RV Names: None
x p(x)
(0, 0) 0.25
(0, 1) 0.25
(1, 0) 0.25
(1, 1) 0.25"""
s = d.to_string(str_outcomes=True)
assert s == s_
def test_really_big_words():
"""
Test to ensure that large but sparse outcomes are fast.
"""
outcomes = ['01' * 45, '10' * 45]
pmf = [1 / 2] * 2
d = Distribution(outcomes, pmf)
d = d.coalesce([range(30), range(30, 60), range(60, 90)])
new_outcomes = (('10' * 15,) * 3, ('01' * 15,) * 3)
assert d.outcomes == new_outcomes
def test_multivariate_lookup():
# issue #156
outcomes = ['000', '010', '100', '111']
pmf = [1 / 4] * 4
d = Distribution(outcomes, pmf)
assert d['000'] == 1 / 4
assert d['0', '0', '0'] == 1 / 4
with pytest.raises(Exception): # noqa: PT011
d['0', '0', 'A'] # should raise exception
|
|
# coding=utf-8
# Copyright 2018 DPR Authors, The Hugging Face Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch DPR model for Open Domain Question Answering."""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from torch import Tensor, nn
from ...file_utils import (
ModelOutput,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_outputs import BaseModelOutputWithPooling
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from ..bert.modeling_bert import BertModel
from .configuration_dpr import DPRConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "DPRConfig"
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST = [
"facebook/dpr-ctx_encoder-single-nq-base",
"facebook/dpr-ctx_encoder-multiset-base",
]
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST = [
"facebook/dpr-question_encoder-single-nq-base",
"facebook/dpr-question_encoder-multiset-base",
]
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST = [
"facebook/dpr-reader-single-nq-base",
"facebook/dpr-reader-multiset-base",
]
##########
# Outputs
##########
@dataclass
class DPRContextEncoderOutput(ModelOutput):
"""
Class for outputs of :class:`~transformers.DPRQuestionEncoder`.
Args:
pooler_output: (:obj:``torch.FloatTensor`` of shape ``(batch_size, embeddings_size)``):
The DPR encoder outputs the `pooler_output` that corresponds to the context representation. Last layer
hidden-state of the first token of the sequence (classification token) further processed by a Linear layer.
This output is to be used to embed contexts for nearest neighbors queries with questions embeddings.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
pooler_output: torch.FloatTensor
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class DPRQuestionEncoderOutput(ModelOutput):
"""
Class for outputs of :class:`~transformers.DPRQuestionEncoder`.
Args:
pooler_output: (:obj:``torch.FloatTensor`` of shape ``(batch_size, embeddings_size)``):
The DPR encoder outputs the `pooler_output` that corresponds to the question representation. Last layer
hidden-state of the first token of the sequence (classification token) further processed by a Linear layer.
This output is to be used to embed questions for nearest neighbors queries with context embeddings.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
pooler_output: torch.FloatTensor
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class DPRReaderOutput(ModelOutput):
"""
Class for outputs of :class:`~transformers.DPRQuestionEncoder`.
Args:
start_logits: (:obj:``torch.FloatTensor`` of shape ``(n_passages, sequence_length)``):
Logits of the start index of the span for each passage.
end_logits: (:obj:``torch.FloatTensor`` of shape ``(n_passages, sequence_length)``):
Logits of the end index of the span for each passage.
relevance_logits: (:obj:`torch.FloatTensor`` of shape ``(n_passages, )``):
Outputs of the QA classifier of the DPRReader that corresponds to the scores of each passage to answer the
question, compared to all the other passages.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
start_logits: torch.FloatTensor
end_logits: torch.FloatTensor = None
relevance_logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
class DPREncoder(PreTrainedModel):
base_model_prefix = "bert_model"
def __init__(self, config: DPRConfig):
super().__init__(config)
self.bert_model = BertModel(config)
assert self.bert_model.config.hidden_size > 0, "Encoder hidden_size can't be zero"
self.projection_dim = config.projection_dim
if self.projection_dim > 0:
self.encode_proj = nn.Linear(self.bert_model.config.hidden_size, config.projection_dim)
self.init_weights()
def forward(
self,
input_ids: Tensor,
attention_mask: Optional[Tensor] = None,
token_type_ids: Optional[Tensor] = None,
inputs_embeds: Optional[Tensor] = None,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = False,
) -> Union[BaseModelOutputWithPooling, Tuple[Tensor, ...]]:
outputs = self.bert_model(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output, pooled_output = outputs[:2]
pooled_output = sequence_output[:, 0, :]
if self.projection_dim > 0:
pooled_output = self.encode_proj(pooled_output)
if not return_dict:
return (sequence_output, pooled_output) + outputs[2:]
return BaseModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@property
def embeddings_size(self) -> int:
if self.projection_dim > 0:
return self.encode_proj.out_features
return self.bert_model.config.hidden_size
def init_weights(self):
self.bert_model.init_weights()
if self.projection_dim > 0:
self.encode_proj.apply(self.bert_model._init_weights)
class DPRSpanPredictor(PreTrainedModel):
base_model_prefix = "encoder"
def __init__(self, config: DPRConfig):
super().__init__(config)
self.encoder = DPREncoder(config)
self.qa_outputs = nn.Linear(self.encoder.embeddings_size, 2)
self.qa_classifier = nn.Linear(self.encoder.embeddings_size, 1)
self.init_weights()
def forward(
self,
input_ids: Tensor,
attention_mask: Tensor,
inputs_embeds: Optional[Tensor] = None,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = False,
) -> Union[DPRReaderOutput, Tuple[Tensor, ...]]:
# notations: N - number of questions in a batch, M - number of passages per questions, L - sequence length
n_passages, sequence_length = input_ids.size() if input_ids is not None else inputs_embeds.size()[:2]
# feed encoder
outputs = self.encoder(
input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
# compute logits
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
relevance_logits = self.qa_classifier(sequence_output[:, 0, :])
# resize
start_logits = start_logits.view(n_passages, sequence_length)
end_logits = end_logits.view(n_passages, sequence_length)
relevance_logits = relevance_logits.view(n_passages)
if not return_dict:
return (start_logits, end_logits, relevance_logits) + outputs[2:]
return DPRReaderOutput(
start_logits=start_logits,
end_logits=end_logits,
relevance_logits=relevance_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def init_weights(self):
self.encoder.init_weights()
##################
# PreTrainedModel
##################
class DPRPretrainedContextEncoder(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = DPRConfig
load_tf_weights = None
base_model_prefix = "ctx_encoder"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def init_weights(self):
self.ctx_encoder.init_weights()
class DPRPretrainedQuestionEncoder(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = DPRConfig
load_tf_weights = None
base_model_prefix = "question_encoder"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def init_weights(self):
self.question_encoder.init_weights()
class DPRPretrainedReader(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = DPRConfig
load_tf_weights = None
base_model_prefix = "span_predictor"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def init_weights(self):
self.span_predictor.encoder.init_weights()
self.span_predictor.qa_classifier.apply(self.span_predictor.encoder.bert_model._init_weights)
self.span_predictor.qa_outputs.apply(self.span_predictor.encoder.bert_model._init_weights)
###############
# Actual Models
###############
DPR_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.DPRConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
DPR_ENCODERS_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. To match pretraining, DPR input sequence should be
formatted with [CLS] and [SEP] tokens as follows:
(a) For sequence pairs (for a pair title+text for example):
::
tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
(b) For single sequences (for a question for example):
::
tokens: [CLS] the dog is hairy . [SEP]
token_type_ids: 0 0 0 0 0 0 0
DPR is a model with absolute position embeddings so it's usually advised to pad the inputs on the right
rather than the left.
Indices can be obtained using :class:`~transformers.DPRTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
DPR_READER_INPUTS_DOCSTRING = r"""
Args:
input_ids: (:obj:`Tuple[torch.LongTensor]` of shapes :obj:`(n_passages, sequence_length)`):
Indices of input sequence tokens in the vocabulary. It has to be a sequence triplet with 1) the question
and 2) the passages titles and 3) the passages texts To match pretraining, DPR :obj:`input_ids` sequence
should be formatted with [CLS] and [SEP] with the format:
``[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>``
DPR is a model with absolute position embeddings so it's usually advised to pad the inputs on the right
rather than the left.
Indices can be obtained using :class:`~transformers.DPRReaderTokenizer`. See this class documentation for
more details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(n_passages, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(n_passages, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare DPRContextEncoder transformer outputting pooler outputs as context representations.",
DPR_START_DOCSTRING,
)
class DPRContextEncoder(DPRPretrainedContextEncoder):
def __init__(self, config: DPRConfig):
super().__init__(config)
self.config = config
self.ctx_encoder = DPREncoder(config)
self.init_weights()
@add_start_docstrings_to_model_forward(DPR_ENCODERS_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=DPRContextEncoderOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: Optional[Tensor] = None,
attention_mask: Optional[Tensor] = None,
token_type_ids: Optional[Tensor] = None,
inputs_embeds: Optional[Tensor] = None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
) -> Union[DPRContextEncoderOutput, Tuple[Tensor, ...]]:
r"""
Return:
Examples::
>>> from transformers import DPRContextEncoder, DPRContextEncoderTokenizer
>>> tokenizer = DPRContextEncoderTokenizer.from_pretrained('facebook/dpr-ctx_encoder-single-nq-base')
>>> model = DPRContextEncoder.from_pretrained('facebook/dpr-ctx_encoder-single-nq-base')
>>> input_ids = tokenizer("Hello, is my dog cute ?", return_tensors='pt')["input_ids"]
>>> embeddings = model(input_ids).pooler_output
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = (
torch.ones(input_shape, device=device)
if input_ids is None
else (input_ids != self.config.pad_token_id)
)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
outputs = self.ctx_encoder(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not return_dict:
return outputs[1:]
return DPRContextEncoderOutput(
pooler_output=outputs.pooler_output, hidden_states=outputs.hidden_states, attentions=outputs.attentions
)
@add_start_docstrings(
"The bare DPRQuestionEncoder transformer outputting pooler outputs as question representations.",
DPR_START_DOCSTRING,
)
class DPRQuestionEncoder(DPRPretrainedQuestionEncoder):
def __init__(self, config: DPRConfig):
super().__init__(config)
self.config = config
self.question_encoder = DPREncoder(config)
self.init_weights()
@add_start_docstrings_to_model_forward(DPR_ENCODERS_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=DPRQuestionEncoderOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: Optional[Tensor] = None,
attention_mask: Optional[Tensor] = None,
token_type_ids: Optional[Tensor] = None,
inputs_embeds: Optional[Tensor] = None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
) -> Union[DPRQuestionEncoderOutput, Tuple[Tensor, ...]]:
r"""
Return:
Examples::
>>> from transformers import DPRQuestionEncoder, DPRQuestionEncoderTokenizer
>>> tokenizer = DPRQuestionEncoderTokenizer.from_pretrained('facebook/dpr-question_encoder-single-nq-base')
>>> model = DPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base')
>>> input_ids = tokenizer("Hello, is my dog cute ?", return_tensors='pt')["input_ids"]
>>> embeddings = model(input_ids).pooler_output
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = (
torch.ones(input_shape, device=device)
if input_ids is None
else (input_ids != self.config.pad_token_id)
)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
outputs = self.question_encoder(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not return_dict:
return outputs[1:]
return DPRQuestionEncoderOutput(
pooler_output=outputs.pooler_output, hidden_states=outputs.hidden_states, attentions=outputs.attentions
)
@add_start_docstrings(
"The bare DPRReader transformer outputting span predictions.",
DPR_START_DOCSTRING,
)
class DPRReader(DPRPretrainedReader):
def __init__(self, config: DPRConfig):
super().__init__(config)
self.config = config
self.span_predictor = DPRSpanPredictor(config)
self.init_weights()
@add_start_docstrings_to_model_forward(DPR_READER_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=DPRReaderOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: Optional[Tensor] = None,
attention_mask: Optional[Tensor] = None,
inputs_embeds: Optional[Tensor] = None,
output_attentions: bool = None,
output_hidden_states: bool = None,
return_dict=None,
) -> Union[DPRReaderOutput, Tuple[Tensor, ...]]:
r"""
Return:
Examples::
>>> from transformers import DPRReader, DPRReaderTokenizer
>>> tokenizer = DPRReaderTokenizer.from_pretrained('facebook/dpr-reader-single-nq-base')
>>> model = DPRReader.from_pretrained('facebook/dpr-reader-single-nq-base')
>>> encoded_inputs = tokenizer(
... questions=["What is love ?"],
... titles=["Haddaway"],
... texts=["'What Is Love' is a song recorded by the artist Haddaway"],
... return_tensors='pt'
... )
>>> outputs = model(**encoded_inputs)
>>> start_logits = outputs.stat_logits
>>> end_logits = outputs.end_logits
>>> relevance_logits = outputs.relevance_logits
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
return self.span_predictor(
input_ids,
attention_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
|
|
# EMACS settings: -*- tab-width: 2; indent-tabs-mode: t -*-
# vim: tabstop=2:shiftwidth=2:noexpandtab
# kate: tab-width 2; replace-tabs off; indent-width 2;
#
# ==============================================================================
# Authors: Patrick Lehmann
#
# Python Main Module: Entry point to the testbench tools in PoC repository.
#
# Description:
# ------------------------------------
# This is a python main module (executable) which:
# - runs automated testbenches,
# - ...
#
# License:
# ==============================================================================
# Copyright 2007-2015 Technische Universitaet Dresden - Germany
# Chair for VLSI-Design, Diagnostics and Architecture
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from pathlib import Path
from lib.Functions import Exit
from Base.Exceptions import *
from Base.PoCBase import CommandLineProgram
from PoC.Entity import *
from PoC.Config import *
from Compiler import *
from Compiler.Exceptions import *
class NetList(CommandLineProgram):
__netListConfigFileName = "configuration.ini"
headLine = "The PoC-Library - NetList Service Tool"
dryRun = False
netListConfig = None
def __init__(self, debug, verbose, quiet):
super(self.__class__, self).__init__(debug, verbose, quiet)
if not ((self.platform == "Windows") or (self.platform == "Linux")): raise PlatformNotSupportedException(self.platform)
self.readNetListConfiguration()
# read NetList configuration
# ==========================================================================
def readNetListConfiguration(self):
from configparser import ConfigParser, ExtendedInterpolation
self.files["PoCNLConfig"] = self.directories["PoCNetList"] / self.__netListConfigFileName
netListConfigFilePath = self.files["PoCNLConfig"]
self.printDebug("Reading NetList configuration from '%s'" % str(netListConfigFilePath))
if not netListConfigFilePath.exists(): raise NotConfiguredException("PoC netlist configuration file does not exist. (%s)" % str(netListConfigFilePath))
self.netListConfig = ConfigParser(interpolation=ExtendedInterpolation())
self.netListConfig.optionxform = str
self.netListConfig.read([
str(self.files['PoCPrivateConfig']),
str(self.files['PoCPublicConfig']),
str(self.files["PoCNLConfig"])
])
def coreGenCompilation(self, entity, showLogs, showReport, deviceString=None, boardString=None):
# check if ISE is configure
if (len(self.pocConfig.options("Xilinx-ISE")) == 0): raise NotConfiguredException("Xilinx ISE is not configured on this system.")
# prepare some paths
self.directories["ISEInstallation"] = Path(self.pocConfig['Xilinx-ISE']['InstallationDirectory'])
self.directories["ISEBinary"] = Path(self.pocConfig['Xilinx-ISE']['BinaryDirectory'])
# check if the appropriate environment is loaded
from os import environ
if (environ.get('XILINX') == None): raise EnvironmentException("Xilinx ISE environment is not loaded in this shell environment. ")
if (boardString is not None):
if not self.netListConfig.has_option('BOARDS', boardString):
from configparser import NoOptionError
raise CompilerException("Board '" + boardString + "' not found.") from NoOptionError(boardString, 'BOARDS')
device = Device(self.netListConfig['BOARDS'][boardString])
elif (deviceString is not None):
device = Device(deviceString)
else: raise BaseException("No board or device given.")
entityToCompile = Entity(self, entity)
compiler = XCOCompiler.Compiler(self, showLogs, showReport)
compiler.dryRun = self.dryRun
compiler.run(entityToCompile, device)
def xstCompilation(self, entity, showLogs, showReport, deviceString=None, boardString=None):
# check if ISE is configure
if (len(self.pocConfig.options("Xilinx-ISE")) == 0):
raise NotConfiguredException("Xilinx ISE is not configured on this system.")
# prepare some paths
self.directories["ISEInstallation"] = Path(self.pocConfig['Xilinx-ISE']['InstallationDirectory'])
self.directories["ISEBinary"] = Path(self.pocConfig['Xilinx-ISE']['BinaryDirectory'])
# check if the appropriate environment is loaded
from os import environ
if (environ.get('XILINX') == None):
raise EnvironmentException("Xilinx ISE environment is not loaded in this shell environment. ")
if (boardString is not None):
if not self.netListConfig.has_option('BOARDS', boardString):
from configparser import NoOptionError
raise CompilerException("Board '" + boardString + "' not found.") from NoOptionError(boardString, 'BOARDS')
device = Device(self.netListConfig['BOARDS'][boardString])
elif (deviceString is not None):
device = Device(deviceString)
else: raise BaseException("No board or device given.")
entityToCompile = Entity(self, entity)
compiler = XSTCompiler.Compiler(self, showLogs, showReport)
compiler.dryRun = self.dryRun
compiler.run(entityToCompile, device)
# main program
def main():
from colorama import Fore, Back, Style, init
init()
print(Fore.MAGENTA + "=" * 80)
print("{: ^80s}".format("The PoC Library - NetList Service Tool"))
print("=" * 80)
print(Fore.RESET + Back.RESET + Style.RESET_ALL)
try:
import argparse
import textwrap
# create a command line argument parser
argParser = argparse.ArgumentParser(
formatter_class = argparse.RawDescriptionHelpFormatter,
description = textwrap.dedent('''\
This is the PoC Library NetList Service Tool.
'''),
add_help=False)
# add arguments
group1 = argParser.add_argument_group('Verbosity')
group1.add_argument('-D', help='enable script wrapper debug mode', action='store_const', const=True, default=False)
group1.add_argument('-d', dest="debug", help='enable debug mode', action='store_const', const=True, default=False)
group1.add_argument('-v', dest="verbose", help='print out detailed messages', action='store_const', const=True, default=False)
group1.add_argument('-q', dest="quiet", help='run in quiet mode', action='store_const', const=True, default=False)
group1.add_argument('-r', dest="showReport", help='show report', action='store_const', const=True, default=False)
group1.add_argument('-l', dest="showLog", help='show logs', action='store_const', const=True, default=False)
group2 = argParser.add_argument_group('Commands')
group21 = group2.add_mutually_exclusive_group(required=True)
group21.add_argument('-h', '--help', dest="help", help='show this help message and exit', action='store_const', const=True, default=False)
group211 = group21.add_mutually_exclusive_group()
group211.add_argument( '--coregen', metavar="<Entity>", dest="coreGen", help='use Xilinx IP-Core Generator (CoreGen)')
group211.add_argument( '--xst', metavar="<Entity>", dest="xst", help='use Xilinx Compiler Tool (XST)')
group3 = group211.add_argument_group('Specify target platform')
group31 = group3.add_mutually_exclusive_group()
group31.add_argument('--device', metavar="<Device>", dest="device", help='target device (e.g. XC5VLX50T-1FF1136)')
group31.add_argument('--board', metavar="<Board>", dest="board", help='target board to infere the device (e.g. ML505)')
# parse command line options
args = argParser.parse_args()
except Exception as ex:
Exit.printException(ex)
try:
netList = NetList(args.debug, args.verbose, args.quiet)
#netList.dryRun = True
if (args.help == True):
argParser.print_help()
return
elif (args.coreGen is not None):
netList.coreGenCompilation(args.coreGen, args.showLog, args.showReport, deviceString=args.device, boardString=args.board)
elif (args.xst is not None):
netList.xstCompilation(args.xst, args.showLog, args.showReport, deviceString=args.device, boardString=args.board)
else:
argParser.print_help()
except CompilerException as ex:
from colorama import Fore, Back, Style
from configparser import Error
print(Fore.RED + "ERROR:" + Fore.RESET + " %s" % ex.message)
if isinstance(ex.__cause__, FileNotFoundError):
print(Fore.YELLOW + " FileNotFound:" + Fore.RESET + " '%s'" % str(ex.__cause__))
elif isinstance(ex.__cause__, Error):
print(Fore.YELLOW + " configparser.Error:" + Fore.RESET + " %s" % str(ex.__cause__))
print(Fore.RESET + Back.RESET + Style.RESET_ALL)
exit(1)
except EnvironmentException as ex: Exit.printEnvironmentException(ex)
except NotConfiguredException as ex: Exit.printNotConfiguredException(ex)
except PlatformNotSupportedException as ex: Exit.printPlatformNotSupportedException(ex)
except BaseException as ex: Exit.printBaseException(ex)
except NotImplementedException as ex: Exit.printNotImplementedException(ex)
except Exception as ex: Exit.printException(ex)
# entry point
if __name__ == "__main__":
Exit.versionCheck((3,4,0))
main()
else:
Exit.printThisIsNoLibraryFile(Netlist.headLine)
|
|
################################
# Problem: Doomsday Fuel (Google foobar)
# Author: babhishek21
# Lang: Python 2.7
#
# Hint: Absorbing Markov Chains
#
# This solution follows the wikipedia format of canonizing the transition matrix `P`:
# https://en.wikipedia.org/wiki/Absorbing_Markov_chain
#
# Problem Solution at:
# https://github.com/ivanseed/google-foobar-help/blob/master/challenges/doomsday_fuel/doomsday_fuel.md
################################
# fraction ops
from fractions import Fraction, gcd
def lcm(x, y):
return (x*y)//abs(gcd(x, y))
# matrix ops
# all matrices are assumed square
def transpose(mat):
return map(list, zip(*mat))
def cross(mat1, mat2):
res = []
for i in xrange(len(mat1)):
res.append([])
for j in xrange(len(mat2[0])):
res[i].append(Fraction(0, 1))
for k in xrange(len(mat2)): # assuming len(mat1[0]) == len(mat2)
res[i][j] += mat1[i][k] * mat2[k][j]
return res
def inverse(mat):
order = len(mat)
id_mat = [[Fraction(int(i==j), 1) for j in xrange(order)] for i in xrange(order)]
for fd in xrange(order):
fd_scaler = Fraction(1, 1) / mat[fd][fd]
for j in xrange(order):
mat[fd][j] *= fd_scaler
id_mat[fd][j] *= fd_scaler
for i in xrange(order):
if i == fd:
continue
cr_scaler = mat[i][fd]
for j in xrange(order):
mat[i][j] -= cr_scaler * mat[fd][j]
id_mat[i][j] -= cr_scaler * id_mat[fd][j]
return id_mat
## solution
# transforms an ordinary step matrix to P matrix of Absorbing Markov Chain
# See: https://en.wikipedia.org/wiki/Absorbing_Markov_chain
def canonize(mat):
row_sums = map(sum, mat)
terminal_row_index = set([i for i, x in enumerate(row_sums) if x == 0])
frac_mat = [[Fraction(x, row_sums[i]) if row_sums[i] != 0 else Fraction(0, 1) for x in row] for i, row in enumerate(mat)]
# frac_mat = mat
nS = len(frac_mat) # total number of states
nR = len(terminal_row_index) # number of terminal / absorbing states
nQ = nS - nR # number of non-terminal / non-absorbing states
Q = [] # Q is a (nQ x nQ) matrix
R = [] # R is a (nQ x nR) matrix
Q = [row[:] for idx, row in enumerate(frac_mat) if idx not in terminal_row_index]
R = [row[:] for row in Q]
Q = [[x for i, x in enumerate(row) if i not in terminal_row_index] for row in Q]
R = [[x for i, x in enumerate(row) if i in terminal_row_index] for row in R]
# print pprint(locals())
return Q, R, nQ, nR
def solution(mat):
Q, R, nQ, nR = canonize(mat)
if nQ + nR == 1:
return [1, 1]
I_sub_Q = [[Fraction(int(i==j), 1) - Q[i][j] for j in range(nQ)] for i in range(nQ)]
N = inverse(I_sub_Q)
N_mult_R = cross(N, R)
# get probabilities for starting at state 0
denom = map(lambda x: x.denominator, N_mult_R[0])
denom_lcm = reduce(lambda x, y: lcm(x, y), denom)
ans = [int(f.numerator * denom_lcm / f.denominator) for f in N_mult_R[0]] + [denom_lcm]
# import pprint
# pprint.pprint(locals())
return ans
if __name__ == "__main__":
print solution([
[0, 1, 0, 0, 0, 1],
[4, 0, 0, 3, 2, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]
])
# Test cases from https://sskaje.me/2017/05/googles-foo-bar-doomsday-fuel/
assert (
solution([
[0, 2, 1, 0, 0],
[0, 0, 0, 3, 4],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]
]) == [7, 6, 8, 21]
)
assert (
solution([
[0, 1, 0, 0, 0, 1],
[4, 0, 0, 3, 2, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]
]) == [0, 3, 2, 9, 14]
)
assert (
solution([
[1, 2, 3, 0, 0, 0],
[4, 5, 6, 0, 0, 0],
[7, 8, 9, 1, 0, 0],
[0, 0, 0, 0, 1, 2],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]
]) == [1, 2, 3]
)
assert (
solution([
[0]
]) == [1, 1]
)
assert (
solution([
[0, 0, 12, 0, 15, 0, 0, 0, 1, 8],
[0, 0, 60, 0, 0, 7, 13, 0, 0, 0],
[0, 15, 0, 8, 7, 0, 0, 1, 9, 0],
[23, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[37, 35, 0, 0, 0, 0, 3, 21, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]) == [1, 2, 3, 4, 5, 15]
)
assert (
solution([
[0, 7, 0, 17, 0, 1, 0, 5, 0, 2],
[0, 0, 29, 0, 28, 0, 3, 0, 16, 0],
[0, 3, 0, 0, 0, 1, 0, 0, 0, 0],
[48, 0, 3, 0, 0, 0, 17, 0, 0, 0],
[0, 6, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]) == [4, 5, 5, 4, 2, 20]
)
assert (
solution([
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]) == [1, 1, 1, 1, 1, 5]
)
assert (
solution([
[1, 1, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 1, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 1, 0, 1, 1, 1, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 1, 0, 1, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 1, 0, 1, 0, 1, 0, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]) == [2, 1, 1, 1, 1, 6]
)
assert (
solution([
[0, 86, 61, 189, 0, 18, 12, 33, 66, 39],
[0, 0, 2, 0, 0, 1, 0, 0, 0, 0],
[15, 187, 0, 0, 18, 23, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]) == [6, 44, 4, 11, 22, 13, 100]
)
assert (
solution([
[0, 0, 0, 0, 3, 5, 0, 0, 0, 2],
[0, 0, 4, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 4, 4, 0, 0, 0, 1, 1],
[13, 0, 0, 0, 0, 0, 2, 0, 0, 0],
[0, 1, 8, 7, 0, 0, 0, 1, 3, 0],
[1, 7, 0, 0, 0, 0, 0, 2, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]) == [1, 1, 1, 2, 5]
)
|
|
#! /usr/bin/env python
# By Fedor Iskhakov
# fedor.iskh.me
# The packages:
# geopy is needed for geo-locating the employers
# bleach is needed for cleaning up the content of ads for Evernote standard (ENML)
# https://dev.evernote.com/doc/articles/enml.php#prohibited
# https://pypi.python.org/pypi/bleach
# http://geopy.readthedocs.org/en/1.10.0/
import sys
import xml.etree.ElementTree as ET
import geopy
import datetime
import calendar
import bleach
from xml.sax.saxutils import escape
# SETUP:
# The XML file downloaded from JOE
joe_xmlfile='<PATH>'
# The output file that will be imported into Evernote
evernote_xmlfile='<PATH>'
print '''
Python script that converts XML positions data downloaded from JOE (joe_full_xml.xml)
to ENEX format XML that can be imported into Evernote.
'''
#patch for CDATA support from http://www.kaarsemaker.net/blog/2013/10/10/cdata-support-in-elementtree/
def CDATA(text=None):
element = ET.Element('![CDATA[')
element.text = text
return element
# Python 2.7 and 3
if hasattr(ET, '_serialize_xml'):
ET._original_serialize_xml = ET._serialize_xml
def _serialize_xml(write, elem, *args):
if elem.tag == '![CDATA[':
# write("%s%s" % (elem.tag, elem.text))
write("<![CDATA[%s]]>" % elem.text.encode('utf-8'))
return
return ET._original_serialize_xml(write, elem, *args)
ET._serialize_xml = ET._serialize['xml'] = _serialize_xml
# Python 2.5-2.6, and non-stdlib ElementTree
elif hasattr(ET.ElementTree, '_write'):
ET.ElementTree._orig_write = ET.ElementTree._write
def _write(self, file, node, encoding, namespaces):
if node.tag == '![CDATA[':
file.write("\n<![CDATA[%s]]>\n" % node.text.encode(encoding))
else:
self._orig_write(file, node, encoding, namespaces)
ET.ElementTree._write = _write
else:
raise RuntimeError("Don't know how to monkeypatch CDATA support. Please report a bug at https://github.com/seveas/python-hpilo")
from geopy.geocoders import Nominatim
# from geopy.geocoders import GoogleV3
geolocator = Nominatim()
# geolocator = GoogleV3()
# input XML tree
intree = ET.parse(joe_xmlfile)
# output start building the tree
root2 = ET.Element("en-export")
#number of positions in the file
npos=len(list(intree.iter('position')))
i=1
yeartag=intree.find('year')
year=yeartag.attrib['joe_year_ID']
issue=yeartag.find('issue').attrib['joe_issue_ID']
if len(issue)==1:
issue='0'+issue
for position in intree.iter('position'):
print '\nPosition ',i,' of ',npos,':'
joeid=year+'-'+issue+'_'+position.attrib['jp_id']
print ' JOE id=',joeid
section = position.find('jp_section').text
print ' section=',section
title=position.find('jp_title').text
print ' title=',title
institution=position.find('jp_institution').text
print ' institution=',institution
print ' address=',
sys.stdout.flush()
#analyse location
try:
loc=position.find('locations').find('location')
country=loc.find('country').text
if country is None:
country=''
state=loc.find('state').text
if state is None:
state=''
city=loc.find('city').text
if city is None:
city=''
geo = geolocator.geocode(' '.join([institution,city,state,country]), exactly_one=True)
if geo is None:
geo = geolocator.geocode(' '.join([city,state,country]), exactly_one=True)
if geo is None:
geo = geolocator.geocode(institution, exactly_one=True)
except Exception:
geo = None
if geo is not None:
print geo.address,
print((geo.latitude, geo.longitude))
else:
print 'unknown after 3'
i=i+1
# JEL codes and keywords
jel=position.find('JEL_Classifications')
jel_codes=list(jel.iter('jc_description'))
keywords=position.find('jp_keywords').text
if keywords is not None:
keywords=keywords.split("\n")
#start creating a note for Evernote
note = ET.SubElement(root2, "note")
ET.SubElement(note, "title").text = title+' at '+institution
if 'full-time' in section.lower():
ET.SubElement(note, "tag").text = 'Full-Time'
if 'nonacademic' in section.lower():
ET.SubElement(note, "tag").text = 'Non-Academic'
if 'international' not in section.lower():
ET.SubElement(note, "tag").text = 'USA'
#the actual Note content
entry='<?xml version="1.0" encoding="UTF-8" standalone="no"?>' + \
'<!DOCTYPE en-note SYSTEM "http://xml.evernote.com/pub/enml2.dtd">' + \
'<en-note style="word-wrap: break-word; -webkit-nbsp-mode: space; -webkit-line-break: after-white-space;">'
entry=entry+'<div style="margin-bottom:1em;"><a style="color:black" href="https://www.aeaweb.org/joe/listing.php?JOE_ID='+joeid+'">JOE id '+joeid+' (view online)</a></div>'
entry=entry+'<div style="font-size:small;">' + section + '</div>'
entry=entry+'<div style="font-size:large;color:#00b300">'+position.find('jp_title').text+'</div>'
entry=entry+'<div style="font-size:large;font-weight:bold;color:#c80000">'+escape(institution)+'</div>'
if position.find('jp_division') is not None and position.find('jp_division').text is not None:
entry=entry+'<div style="font-size:norlam;font-weight:bold;color:#c80000">'+escape(position.find('jp_division').text)+'</div>'
if position.find('jp_department') is not None and position.find('jp_department').text is not None:
entry=entry+'<div style="font-size:norlam;font-weight:bold;color:#c80000">'+escape(position.find('jp_department').text)+'</div>'
if geo is not None:
entry=entry+'<div><a style="font-size:large;font-weight:bold;color:#0000cc" href="https://www.google.com.au/maps/@'+str(geo.latitude)+','+str(geo.longitude)+',10z">'
check=False
if len(city)>0:
entry=entry+escape(city)
check=True
if len(state)>0:
if check:
entry=entry+', '
entry=entry+escape(state)
check=True
if len(country)>0:
if check:
entry=entry+', '
entry=entry+escape(country)
entry=entry+'</a></div>'
if position.find('jp_application_deadline') is not None and position.find('jp_application_deadline').text is not None:
datevar=datetime.datetime.strptime(position.find('jp_application_deadline').text,"%Y-%m-%d %H:%M:%S")
entry=entry+'<div style="font-size:large;font-weight:bold;color:#b30059">DEADLINE: '+datevar.strftime("%B %d")+'</div>'
if jel_codes is not None:
entry=entry+'<div style="margin-top:1.5em;margin-bottom:0em;font-size:small">Research fields:</div>'
entry=entry+'<ul>'
for k in jel_codes:
entry=entry+'<li style="color:black">'+escape(k.text)+'</li>'
entry=entry+'</ul>'
if keywords is not None:
entry=entry+'<div style="margin-top:1.5em;margin-bottom:0em;font-size:small">Keywords:</div>'
entry=entry+'<ul>'
for k in keywords:
entry=entry+'<li style="color:black">'+escape(k)+'</li>'
entry=entry+'</ul>'
#clean the ad text
allowed_tags=['a','abbr','acronym','address','area','b','bdo','big','blockquote','br','caption','center','cite','code','col','colgroup','dd','del','dfn','div','dl','dt','em','font','h1','h2','h3','h4','h5','h6','hr','i','img','ins','kbd','li','map','ol','p','pre','q','s','samp','small','span','strike','strong','sub','sup','table','tbody','td','tfoot','th','thead','title','tr','tt','u','ul','var','xmp']
allowed_attrib=['style','href']
allowed_styles=['font-size','font-weight','margin-bottom','margin-top','color','white-space','word-wrap']
ad_clean=bleach.clean(position.find('jp_full_text').text,allowed_tags,allowed_attrib,allowed_styles, strip=True,strip_comments=True)
entry=entry+'<pre style="white-space:pre-wrap;word-wrap:break-word;">'+escape(ad_clean)+'</pre>'
entry=entry + \
'</en-note>'
contenttag=ET.SubElement(note, "content")
ET.SubElement(contenttag, "![CDATA[").text=entry
# xmlstr = ElementTree.tostring(ET, encoding='utf8', method='xml')
note_attr=ET.SubElement(note, "note-attributes")
note_attr.text=''
ET.SubElement(note_attr, "author").text = 'JOE'
if geo is not None:
ET.SubElement(note_attr, "latitude").text = str(geo.latitude)
ET.SubElement(note_attr, "longitude").text = str(geo.longitude)
ET.SubElement(note_attr, "altitude").text = '0'
#reminder and reminder order from
if position.find('jp_application_deadline') is not None and position.find('jp_application_deadline').text is not None:
datevar=datetime.datetime.strptime(position.find('jp_application_deadline').text,"%Y-%m-%d %H:%M:%S")
year_corr=max(min(datevar.year,datetime.date.today().year+1),datetime.date.today().year)
try:
datevar=datetime.date(year_corr,datevar.month,datevar.day)
except ValueError:
#February 29 in a wrong year..
datevar=datetime.date(year_corr,datevar.month,datevar.day-1)
ET.SubElement(note_attr, "reminder-order").text = str(calendar.timegm(datevar.timetuple()))
ET.SubElement(note_attr, "reminder-time").text = datevar.strftime("%Y%m%dT%H%M%SZ")
#clean the objects
note_attr=None
note=None
with open(evernote_xmlfile, 'w') as f:
f.write('<?xml version="1.0" encoding="UTF-8" ?>\n<!DOCTYPE en-export SYSTEM "http://xml.evernote.com/pub/evernote-export3.dtd">\n')
ET.ElementTree(root2).write(f,'utf-8')
|
|
# JN 2015-04-22
# JN 2021-12-01 adding type check for stored sign
# refactoring
"""
manages spikes and sorting, after concatenation
"""
import numpy as np
import tables
import os
from .. import SIGNS, TYPE_NAMES, TYPE_ART, GROUP_NOCLASS, GROUP_ART, NcsFile,\
TYPE_NON_NOISE, TYPE_ALL
debug = False
class SortingFile(object):
"""
represents a grouped sorting file
"""
def __del__(self):
self.h5fid.close()
def __init__(self, h5fname):
self.h5fid = tables.open_file(h5fname, 'r+')
self.index = self.h5fid.root.index[:]
self.classes = self.h5fid.root.classes[:]
self.groups = self.h5fid.root.groups[:]
self.types = self.h5fid.root.types[:]
temp = self.h5fid.get_node_attr('/', 'sign')
if debug:
print(f'Detected type {type(temp)}')
try:
self.sign = str(temp, 'utf-8')
except TypeError:
self.sign = temp
self.basedir = os.path.dirname(h5fname)
self.matches = self.h5fid.root.matches[:]
def get_gids(self):
"""
return list of gids
"""
return np.unique(self.groups[:, 1])
def get_cluster_ids_by_gid(self, gid):
"""
return class ids for a group
"""
idx = self.groups[:, 1] == gid
return self.groups[idx, 0]
def get_non_noise_cluster_index(self):
"""
returns an index of spikes that are not in
unassigned or artifact groups
"""
bad_groups = np.array((GROUP_ART, GROUP_NOCLASS))
idx = np.in1d(self.types[:, 1], bad_groups)
gids = self.types[-idx, 0]
idx = self.get_cluster_index_joined_list(gids)
return idx
def get_cluster_index(self, clid):
"""
return index for a cluster
"""
return self.index[self.classes == clid]
def _get_group_matches(self, gid):
"""
specific function to get matches
"""
clids = self.get_cluster_ids_by_gid(gid)
return self.matches[np.in1d(self.classes, clids)]
def get_cluster_index_joined(self, gid):
"""
return index for group (concatenated from all clusters)
get_cluster_index_alt will be renamed to this function
"""
clids = self.get_cluster_ids_by_gid(gid)
all_idx = []
for clid in clids:
# print('Getting index for {}'.format(clid))
all_idx.append(self.get_cluster_index(clid))
return np.sort(np.hstack(all_idx))
def get_cluster_index_alt(self, gid):
"""
alternative implementation
"""
return self.get_cluster_index_joined_list([gid])
def get_cluster_index_joined_list(self, gids):
"""
return index for several groups together
"""
idx = np.in1d(self.groups[:, 1], gids)
all_clids = self.groups[idx, 0]
return self.index[np.in1d(self.classes, all_clids)]
def get_group_type(self, gid):
"""
return the type of a group
"""
idx = self.types[:, 0] == gid
return self.types[idx, 1][0]
def save_groups_and_types(self, groups, types):
"""
save a new group and type array
"""
self.groups = groups
self.types = types
self.h5fid.root.groups[:] = groups
self.h5fid.remove_node('/', 'types')
self.h5fid.create_array('/', 'types', types)
self.h5fid.flush()
class SortingManagerGrouped(object):
"""
represents a sorting session after grouping
"""
def __del__(self):
if self.h5datafile is not None:
self.h5datafile.close()
def __init__(self, h5fname):
self.basedir = os.path.dirname(h5fname)
self.h5datafile = None
try:
self.h5datafile = tables.open_file(h5fname, 'r')
except IOError as error:
print('Could not initialize {}: {}'.format(h5fname, error))
self.initialized = False
return
self.start_idx = None
self.stop_idx = None
self.sign = None
self.all_times = dict()
self.spikes = dict()
self.times = dict()
for sign in SIGNS:
self.all_times[sign] = None
self.spikes[sign] = None
self.times[sign] = None
self.sorting = None
self.header = None
self.init_header()
self.initialized = True
def get_thresholds(self):
"""
get extraction thresholds
"""
try:
thr = self.h5datafile.root.thr[:, :]
except tables.exceptions.NoSuchNodeError:
print('Extraction thresholds were not saved!')
thr = None
return thr
def init_header(self):
"""
Tries to initialize a ncs header. Not necessarily possible.
"""
ext = os.path.basename(self.basedir)
cand_folders = (os.path.join(self.basedir, '..'),
ext)
name = None
for folder in cand_folders:
for suffix in ('.ncs', '.Ncs'):
cand_name = os.path.join(folder, ext + suffix)
if os.path.exists(cand_name):
name = cand_name
break
if name is not None:
fid = NcsFile(name)
self.header = fid.header
del fid
return
for folder in cand_folders:
cand_name = os.path.join(folder, 'channel_names.csv')
if os.path.exists(cand_name):
import csv
with open(cand_name) as fid:
reader = csv.reader(fid, delimiter=';')
names = {l[0]: l[1] for l in reader}
self.header = {'AcqEntName': names[ext]}
return
print('Ncs file not found, no header!')
self.header = None
def init_sorting(self, sorting_folder):
"""
initialize a sorting folder
returns True if init worked, else False
"""
sorting_path = os.path.join(sorting_folder, 'sort_cat.h5')
if os.path.exists(sorting_path):
self.sorting = SortingFile(sorting_path)
self.sign = self.sorting.sign
return True
else:
return False
def get_start_stop_index(self, sign, start_time, stop_time):
"""
return where to start and stop for a given time frame
"""
if self.times[sign] is None:
self.times[sign] = self.h5datafile.get_node('/' + sign, 'times')[:]
t = self.times[sign]
start_idx = t.searchsorted(start_time)
stop_idx = t.searchsorted(stop_time)
if stop_idx + 1 < t.shape[0]:
stop_idx += 2
return start_idx, stop_idx
def set_sign_times_spikes(self, sign, start_idx=0, stop_idx=np.inf):
"""
set times, spikes, start, stop
"""
self.start_idx = start_idx
if stop_idx in [np.inf, None]:
stop_idx = self.h5datafile.get_node('/' + sign,
'times').shape[0]
self.stop_idx = stop_idx
self.sign = str(sign)
self.spikes[sign] =\
self.h5datafile.\
get_node('/' + sign, 'spikes')[start_idx:stop_idx, :]
if self.all_times[sign] is not None:
t = self.all_times[sign]
else:
t = self.h5datafile.get_node('/' + sign, 'times')
self.times[sign] = t[start_idx:stop_idx]
def get_groups(self, times=True, spikes=True):
"""
return groups, each containing its times and spikes if requested
"""
gids = self.sorting.get_gids()
ret = dict()
for gid in gids:
clids = self.sorting.get_cluster_ids_by_gid(gid)
for clid in clids:
idx = self.sorting.get_cluster_index(clid)
# shorten it
sel = (idx >= self.start_idx) & (idx < self.stop_idx)
idx = idx[sel] - self.start_idx
if idx.any():
if gid not in ret:
ret[gid] = dict()
ret[gid][clid] = dict()
if times:
ret[gid][clid]['times'] = self.times[self.sign][idx]
if spikes:
ret[gid][clid]['spikes'] =\
self.spikes[self.sign][idx, :]
imgname = 'class_{:03d}.png'.format(clid)
imgpath1 = os.path.join(self.basedir, self.sorting.basedir,
imgname)
imgpath2 = os.path.join(self.sorting.basedir, imgname)
if os.path.exists(imgpath1):
imgval = imgpath1
elif os.path.exists(imgpath2):
imgval = imgpath2
else:
imgval = None
ret[gid][clid]['image'] = imgval
return ret
def get_group_joined(self, gid, times=True, spikes=True, artifacts=True):
"""
get one group, all clusters joined
"""
ret = dict()
gtype = self.get_group_type(gid)
if (artifacts is False) and (gtype == TYPE_ART):
return ret
idx = self.sorting.get_cluster_index_joined(gid)
n_clusters = len(self.sorting.get_cluster_ids_by_gid(gid))
# shorten it
sel = (idx >= self.start_idx) & (idx <= self.stop_idx)
if not sel.any():
return ret
idx = idx[sel] - self.start_idx
# idx -= self.start_idx
shape = self.times[self.sign].shape[0]
if idx[-1] >= shape:
idx = idx[idx < shape]
print('Shortened index!')
ret['type'] = gtype
ret['n_clusters'] = n_clusters
if times:
ret['times'] = self.times[self.sign][idx]
if spikes:
ret['spikes'] = self.spikes[self.sign][idx]
return ret
def get_data_from_index(self, index, times=True, spikes=True):
"""
return data from a given index
"""
idx = index - self.start_idx
ret = dict()
if times:
ret['times'] = self.times[self.sign][idx]
if spikes:
ret['spikes'] = self.spikes[self.sign][idx]
return ret
def get_groups_joined(self, times=True, spikes=True, artifacts=True):
"""
return groups with times and spikes joined
"""
gids = self.sorting.get_gids()
ret = dict()
for gid in gids:
group = self.get_group_joined(gid, times, spikes, artifacts)
if len(group) > 0:
ret[gid] = group
return ret
def get_group_type(self, gid):
"""
return group type
"""
return self.sorting.get_group_type(gid)
def get_samples_per_spike(self):
"""
return samples per spike...
"""
return self.spikes[self.sign].shape[1]
def save_groups_and_types(self, groups, types):
"""
save to underlying sorting file
"""
self.sorting.save_groups_and_types(groups, types)
def get_group_table(self):
"""
get group table
"""
return self.sorting.groups
def get_type_table(self):
"""
get type table
"""
return self.sorting.types
def get_non_noise_spikes(self, spikes=True, times=True):
"""
return all non-noise spikes joined
"""
idx = self.sorting.get_non_noise_cluster_index()
sel = (idx >= self.start_idx) & (idx < self.stop_idx)
idx = idx[sel]
ret = self.get_data_from_index(idx, times=times, spikes=spikes)
ret['type'] = TYPE_NON_NOISE
return ret
def get_all_spikes(self):
"""
return all spikes
"""
sel = (self.sorting.index >= self.start_idx) &\
(self.sorting.index < self.stop_idx)
idx = self.sorting.index[sel]
ret = self.get_data_from_index(idx)
ret['type'] = TYPE_ALL
return ret
class Combinato(SortingManagerGrouped):
"""
convenience class, reads sorted data
"""
def __init__(self, fname, sign, label):
self.initialized = False
self.h5datafile = None # in case of early return
basedir = os.path.dirname(fname)
labelbasename = os.path.basename(label)
sorting_session = os.path.join(basedir, labelbasename)
# quick check if we can do this
if not os.path.exists(sorting_session):
print('Session folder {} '
'not found'.format(sorting_session))
return
super(Combinato, self).__init__(fname)
self.set_sign_times_spikes(sign)
res = self.init_sorting(sorting_session)
if not res:
print('Sorting session {} '
'not initialized'.format(sorting_session))
else:
self.initialized = True
def test(name, label, ts):
"""
simple test case, needs a folder as argument
"""
with open(ts) as fid:
start, stop = [int(x)/1000. for x in fid.readline().split()]
fid.close()
man = SortingManagerGrouped(name)
if not man.initialized:
return
print('Working on {}, from time {} to {} ({:.1f} min)'
.format(name, start, stop, (stop-start)/6e4))
start_idx, stop_idx = man.get_start_stop_index('pos', start, stop)
print('Setting start index: {}, stop index: {}'.
format(start_idx, stop_idx))
man.set_sign_times_spikes('pos', start_idx, stop_idx)
ret = man.init_sorting(os.path.join(os.path.dirname(name), label))
if not ret:
print('Unable to initialize!')
return
print(man.sorting.index.shape)
groups = man.get_groups()
print('Retrieved Groups')
test_gid = groups.keys()[0]
man.get_group_joined(test_gid)
all_groups = man.get_groups_joined()
# iterate through clusters
all_good = 0
for k, v in groups.items():
print('Group {} type {}'.format(k, TYPE_NAMES[man.get_group_type(k)]))
print(v.keys())
sumidx = 0
for clid in v:
print('Cluster {} len {}'.format(clid, v[clid]['times'].shape[0]))
sumidx += v[clid]['times'].shape[0]
if man.get_group_type(k) > 0:
all_good += sumidx
idx1 = man.sorting.get_cluster_index_joined(k)
idx2 = man.sorting.get_cluster_index_alt(k)
assert not (idx1 - idx2).any()
print('Total index len {} vs {} summed'.
format(idx1.shape[0], sumidx))
# assert idx1.shape[0] == sumidx
non_noise_spk = man.get_non_noise_spikes()
total = man.get_all_spikes()
print('Non-noise index has {} elements'.
format(non_noise_spk['times'].shape[0]))
assert non_noise_spk['times'].shape[0] == all_good
print('Total has {} elements'.format(total['times'].shape[0]))
for gid, group in all_groups.items():
print('Group {} has {} times, type {} and {} members'.
format(gid, group['times'].shape[0],
TYPE_NAMES[group['type']], group['n_clusters']))
|
|
import pickle
import hashlib
import datetime
import logging
import dateutil.parser
from django.db import models
from django.db.models import Q
from django.db.models import Max, Min
from django_pgjson.fields import JsonField
from core.utilities import check_domain_valid, get_base_domain
from pivoteer.records import RecordType, RecordSource
from core.lookups import geolocate_ip
LOGGER = logging.getLogger(__name__)
class IndicatorManager(models.Manager):
def host_records(self, indicator):
record_type = RecordType.HR
records = self.get_queryset().filter(Q(record_type=record_type.name),
Q(indicator=indicator))
return records
def recent_cert(self, indicator):
"""Retrieve the most recent censys.io certificate result for the provided indicator
# Updated by LNguyen
# Date: 05May2017
Args:
indicator (str): The indicator to search for
Returns (IndicatorRecord): The IndicatorRecord for the most recently saved
result for the provided indicator or an empty query set if no record was found.
"""
# TODO: Why are we returning empty query sets hiversus None when there are no results?
record_type = RecordType.CE
time_frame = datetime.datetime.utcnow() + datetime.timedelta(hours=-24)
records = self.get_queryset().filter(Q(record_type=record_type.name),
Q(info_date__gte=time_frame),
Q(indicator=indicator)).values('info', 'info_date')
# records = self.get_queryset().filter(Q(record_type=record_type.name),
# Q(info_date__gte=time_frame),
# Q(info__at_indicator__exact=indicator)).values('info', 'info_date')
if records:
return records.latest('info_date')
LOGGER.info("Failed to retrieve certificate data for indicator %s" % indicator)
return records
def recent_tc(self, indicator):
"""Retrieve the most recent ThreatCrowd record for the provided indicator
# Updated by LNguyen
# Date: 05May2017
Args:
indicator (str): The indicator to search for
Returns (IndicatorRecord): The indicator record for the most recently saved
result for the provided indicator.
"""
record_type = RecordType.TR
time_frame = datetime.datetime.utcnow() + datetime.timedelta(hours=-24)
records = self.get_queryset().filter(Q(record_type=record_type.name),
Q(info_date__gte=time_frame),
Q(indicator=indicator)).values('info', 'info_date')
# records = self.get_queryset().filter(Q(record_type=record_type.name),
# Q(info_date__gte=time_frame),
# Q(info__at_domain__exact=indicator) |
# Q(info__at_ip__exact=indicator)).values('info', 'info_date')
if records:
return records.latest('info_date')
LOGGER.info("Failed to retrieve ThreatCrowd data for indicator %s" % indicator)
return records
def recent_hosts(self, indicator):
# Updated by LNguyen
# Date: 1Aug2017
# Description: Update query to return only recent DNS data and exclude PDNS and Passive Total Data
record_type = RecordType.HR
time_frame = datetime.datetime.utcnow() + datetime.timedelta(hours=-24)
records = self.get_queryset().filter(~Q(info_source=RecordSource.PDS.name),
~Q(info_source=RecordSource.PTO.name),
Q(record_type=record_type.name),
Q(info_date__gte=time_frame),
Q(indicator=indicator))
# records = self.get_queryset().filter(Q(record_type=record_type.name),
# Q(info_date__gte=time_frame),
# Q(info__at_domain__endswith=indicator) |
# Q(info__at_ip__endswith=indicator))
return records
def dns_historical_hosts(self, indicator, request):
# Updated by LNguyen
# Date: 1Aug2017
# Description: Query to retrieve Historical DNS dataset
record_type = RecordType.HR
time_frame = datetime.datetime.utcnow() + datetime.timedelta(hours=-24)
time_start = datetime.datetime.utcnow() - datetime.timedelta(days=366)
records = self.get_queryset().filter(Q(info_source=RecordSource.DNS.name),
Q(record_type=record_type.name),
Q(info_date__lt=time_frame),
Q(indicator=indicator)).values('info', 'info_date', 'info_source')
return records
def historical_hosts(self, indicator, request):
# Updated by LNguyen
# Date: 1Aug2017
# Description: Query to retrieve other Historical dataset (with exception of PDNS and DNS data because they're handled in separate queries)
# This query also includes Passive Total Data in the dataset.
record_type = RecordType.HR
time_frame = datetime.datetime.utcnow() + datetime.timedelta(hours=-24)
time_start = datetime.datetime.utcnow() - datetime.timedelta(days=366)
if request.user.is_staff:
records = self.get_queryset().filter(~Q(info_source=RecordSource.PDS.name),
~Q(info_source=RecordSource.DNS.name),
Q(record_type=record_type.name),
Q(info_date__lt=time_frame),
Q(indicator=indicator)).values('info', 'info_date', 'info_source')
else:
records = self.get_queryset().filter(~Q(info_source=RecordSource.PDS.name),
~Q(info_source=RecordSource.IID.name),
~Q(info_source=RecordSource.DNS.name),
Q(record_type=record_type.name),
Q(info_date__lt=time_frame),
Q(indicator=indicator)).values('info', 'info_date', 'info_source')
return records
def pto_hosts(self, indicator, request):
# Updated by LNguyenQ(
# Date: 24Oct2017
# Description: Query to retrieve Passive Total Data for Historical dataset
record_type = RecordType.HR
time_frame = datetime.datetime.utcnow() + datetime.timedelta(hours=-24)
records = self.get_queryset().filter(Q(info_source=RecordSource.PTO.name),
Q(record_type=record_type.name),
Q(indicator=indicator)).values('info', 'info_date', 'info_source')
return records
def pds_hosts(self, indicator, request):
# Updated by LNguyen
# Date: 1Aug2017
# Description: Query to retrieve PDNS Data for Historical dataset
record_type = RecordType.HR
time_frame = datetime.datetime.utcnow() + datetime.timedelta(hours=-24)
records = self.get_queryset().filter(Q(info_source=RecordSource.PDS.name),
Q(record_type=record_type.name),
Q(indicator=indicator)).values('info', 'info_date', 'info_source')
return records
def malware_records(self, indicator):
# Updated by LNguyen
# Date: 12Mayl2017
# Description: Update to store dataset in array variable because dates were not being handled as datetime and date sorting was not working
record_type = RecordType.MR
records = self.get_queryset().filter(Q(record_type=record_type.name),
Q(indicator=indicator))
records_complete = []
for record in records:
new_record = {
'info': record.info,
'info_date': record.info_date,
'info_hash':record.info_hash,
'get_info_source_display':record.get_info_source_display()
}
records_complete.append(new_record)
return records
def recent_malware(self, indicator):
record_type = RecordType.MR
time_frame = datetime.datetime.utcnow() + datetime.timedelta(days=-30)
records = self.get_queryset().filter(Q(record_type=record_type.name),
Q(info_date__gte=time_frame),
Q(indicator=indicator))
return records
def historical_malware(self, indicator):
record_type = RecordType.MR
time_frame = datetime.datetime.utcnow() + datetime.timedelta(days=-30)
records = self.get_queryset().filter(Q(record_type=record_type.name),
Q(info_date__lt=time_frame),
Q(indicator=indicator))
return records
def whois_records(self, indicator):
record_type = RecordType.WR
if check_domain_valid(indicator):
indicator = get_base_domain(indicator)
records = self.get_queryset().filter(Q(record_type=record_type.name),
Q(indicator=indicator)).values('info', 'info_date')
return records
def recent_whois(self, indicator):
# Updated by LNguyen
# Date: 26April2017
# Description: Former query was not correctly handling unicode characters in the info field so had to update where condition to use wildcard contains
record_type = RecordType.WR
time_frame = datetime.datetime.utcnow() + datetime.timedelta(hours=-24)
if check_domain_valid(indicator):
indicator = get_base_domain(indicator)
record = self.get_queryset().filter(Q(record_type=record_type.name),
Q(info_date__gte=time_frame),
Q(indicator=indicator)).values('info', 'info_date')
# record = self.get_queryset().filter(Q(record_type=record_type.name),
# Q(info_date__gte=time_frame),
# Q(info__at_query__iendswith=indicator) |
# Q(info__at_domain_name__iendswith=indicator)).values('info', 'info_date')
if record:
return record.latest('info_date')
return record
def historical_whois(self, indicator):
# Updated by LNguyen
# Date: 26April2017
# Description: Former query was not correctly handling unicode characters in the info field so had to update where condition to use wildcard contains
# Updated by LNguyen
# Date: 12Mayl2017
# Description: Update to store dataset in array variable because dates were not being handled as datetime and date sorting was not working
record_type = RecordType.WR
time_frame = datetime.datetime.utcnow() + datetime.timedelta(hours=-24)
if check_domain_valid(indicator):
indicator = get_base_domain(indicator)
raw_records = self.get_queryset().filter(Q(record_type=record_type.name),
Q(indicator=indicator)).values('info_hash', 'info_date', 'info')
# raw_records = self.get_queryset().filter(Q(record_type=record_type.name),
# Q(info_date__lt=time_frame),
# Q(info__at_query__endswith=indicator) |
# Q(info__at_domain_name__endswith=indicator)).values('info_hash',
# 'info_date')
latest = raw_records.latest('info_date')['info_date']
earliest = raw_records.earliest('info_date')['info_date']
span = str(earliest) + " / " + str(latest)
unique_records = []
for record in raw_records:
new_record = {'latest': latest,
'earliest': earliest,
'info_date': span,
'info': record['info']}
unique_records.append(new_record)
# annotated_records = raw_records.annotate(latest=Max('info_date')).annotate(earliest=Min('info_date'))
# for record in annotated_records:
# hash_value = record['info_hash']
# if hash_value not in tracking:
# record_info = self.get_queryset().filter(info_hash=hash_value).values('info')[0]['info']
# span = str(record['earliest']) + " / " + str(record['latest'])
# new_record = {'latest': record['latest'],
# 'earliest': record['earliest'],
# 'info_date': span,
# 'info': record_info}
# unique_records.append(new_record)
# tracking.append(hash_value)
return unique_records
def get_threatlab_record(self, indicator):
"""
Retrieve DNSTwist records for an indicator from the database.
:param indicator: The indicator value
:return: The DNSTwist records for the indicator
"""
import dateutil.parser
record_type = RecordType.TL
records = self.get_queryset().filter(Q(record_type=record_type.name),
Q(indicator=indicator)).values('info', 'info_date')
# records_complete = []
#
# for record in records:
#
# for result in record['info']['results']:
#
# new_record = {
# 'domain': result['domain'],
# 'ip': result['rdata'][0],
# 'timefirst': dateutil.parser.parse(result['timefirst']),
# 'timelast': dateutil.parser.parse(result['timelast']),
# 'type': result['type'],
# 'info_date':record['info_date']
# }
# records_complete.append(new_record)
# for record in records_complete:
# print(record.info_date)
# print(record.domain)
return records
def safebrowsing_record(self, indicator):
record_type = RecordType.SB
records = self.get_queryset().filter(Q(record_type=record_type.name),
Q(indicator=indicator))
return records
def get_search_records(self, indicator):
"""
Retrieve any search records from within the last 24 hours for an indicator from the database.
:param indicator: The indicator value
:return: The search records for the indicator
"""
record_type = RecordType.SR
time_frame = datetime.datetime.utcnow() + datetime.timedelta(hours=-24)
value = indicator
if check_domain_valid(indicator):
value = get_base_domain(indicator)
LOGGER.debug("Using search value: %s", value)
records = self.get_queryset().filter(Q(record_type=record_type.name),
Q(info_date__gte=time_frame),
Q(indicator=value)).values('info', 'info_date')
if LOGGER.isEnabledFor(logging.INFO):
rank = 0
msg = "Found %d search record(s):" % len(records)
for record in records:
info = record['info']
results = info['results']
for result in results:
rank += 1
url = result['url']
msg += "\n\t%d - %s" % (rank, url)
LOGGER.info(msg)
return records
def get_dnstwist_record(self, indicator):
"""
Retrieve DNSTwist records for an indicator from the database.
:param indicator: The indicator value
:return: The DNSTwist records for the indicator
"""
record_type = RecordType.DR
records = self.get_queryset().filter(Q(record_type=record_type.name),
Q(indicator=indicator)).values('info', 'info_date')
return records
class IndicatorRecord(models.Model):
record_choices = tuple((rt.name, rt.title) for rt in RecordType)
source_choices = tuple((rs.name, rs.title) for rs in RecordSource)
record_type = models.CharField(max_length=2, choices=record_choices)
created = models.DateTimeField(auto_now_add=True, editable=False)
modified = models.DateTimeField(auto_now=True)
info = JsonField()
info_source = models.CharField(max_length=3, choices=source_choices)
info_hash = models.CharField(max_length=40)
info_date = models.DateTimeField()
indicator = models.CharField(max_length=253, blank=True, null=True)
objects = IndicatorManager()
class Meta:
unique_together = (("indicator", "info_hash", "info_source", "info_date"),)
def generate_hash(self):
info_pickle = pickle.dumps(self.info)
info_sha1 = hashlib.sha1(info_pickle).hexdigest()
return info_sha1
def save(self, *args, **kwargs):
if not self.info_hash:
self.info_hash = self.generate_hash()
super(IndicatorRecord, self).save(*args, **kwargs)
class TaskTracker(models.Model):
""" Tracker for identifying and resuming tasks """
keyword = models.CharField(max_length=253)
group_id = models.CharField(max_length=50)
type = models.CharField(max_length=50)
date = models.DateTimeField()
class ExternalSessions(models.Model):
""" External cookie sessions for scrapers """
# Note: Yes, this syntax is mildly awkward, but it allows for very easy addition of additional sources in the list
# at the end of the line
service_choices = tuple((rs.name, rs.title) for rs in RecordSource if rs in [RecordSource.IID])
service = models.CharField(max_length=3, choices=service_choices)
cookie = JsonField()
|
|
# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""pytest is a tool that eases test running and debugging.
To be able to use pytest, you should either write tests using
the logilab.common.testlib's framework or the unittest module of the
Python's standard library.
You can customize pytest's behaviour by defining a ``pytestconf.py`` file
somewhere in your test directory. In this file, you can add options or
change the way tests are run.
To add command line options, you must define a ``update_parser`` function in
your ``pytestconf.py`` file. The function must accept a single parameter
that will be the OptionParser's instance to customize.
If you wish to customize the tester, you'll have to define a class named
``CustomPyTester``. This class should extend the default `PyTester` class
defined in the pytest module. Take a look at the `PyTester` and `DjangoTester`
classes for more information about what can be done.
For instance, if you wish to add a custom -l option to specify a loglevel, you
could define the following ``pytestconf.py`` file ::
import logging
from logilab.common.pytest import PyTester
def update_parser(parser):
parser.add_option('-l', '--loglevel', dest='loglevel', action='store',
choices=('debug', 'info', 'warning', 'error', 'critical'),
default='critical', help="the default log level possible choices are "
"('debug', 'info', 'warning', 'error', 'critical')")
return parser
class CustomPyTester(PyTester):
def __init__(self, cvg, options):
super(CustomPyTester, self).__init__(cvg, options)
loglevel = options.loglevel.upper()
logger = logging.getLogger('erudi')
logger.setLevel(logging.getLevelName(loglevel))
In your TestCase class you can then get the value of a specific option with
the ``optval`` method::
class MyTestCase(TestCase):
def test_foo(self):
loglevel = self.optval('loglevel')
# ...
You can also tag your tag your test for fine filtering
With those tag::
from logilab.common.testlib import tag, TestCase
class Exemple(TestCase):
@tag('rouge', 'carre')
def toto(self):
pass
@tag('carre', 'vert')
def tata(self):
pass
@tag('rouge')
def titi(test):
pass
you can filter the function with a simple python expression
* ``toto`` and ``titi`` match ``rouge``
* ``toto``, ``tata`` and ``titi``, match ``rouge or carre``
* ``tata`` and ``titi`` match``rouge ^ carre``
* ``titi`` match ``rouge and not carre``
"""
__docformat__ = "restructuredtext en"
PYTEST_DOC = """%prog [OPTIONS] [testfile [testpattern]]
examples:
pytest path/to/mytests.py
pytest path/to/mytests.py TheseTests
pytest path/to/mytests.py TheseTests.test_thisone
pytest path/to/mytests.py -m '(not long and database) or regr'
pytest one (will run both test_thisone and test_thatone)
pytest path/to/mytests.py -s not (will skip test_notthisone)
pytest --coverage test_foo.py
(only if logilab.devtools is available)
"""
ENABLE_DBC = False
FILE_RESTART = ".pytest.restart"
import os, sys, re
import os.path as osp
from time import time, clock
import warnings
import types
from logilab.common.fileutils import abspath_listdir
from logilab.common import textutils
from logilab.common import testlib, STD_BLACKLIST
# use the same unittest module as testlib
from logilab.common.testlib import unittest, start_interactive_mode
from logilab.common.compat import any
import doctest
import unittest as unittest_legacy
if not getattr(unittest_legacy, "__package__", None):
try:
import unittest2.suite as unittest_suite
except ImportError:
sys.exit("You have to install python-unittest2 to use this module")
else:
import unittest.suite as unittest_suite
try:
import django
from logilab.common.modutils import modpath_from_file, load_module_from_modpath
DJANGO_FOUND = True
except ImportError:
DJANGO_FOUND = False
CONF_FILE = 'pytestconf.py'
## coverage hacks, do not read this, do not read this, do not read this
# hey, but this is an aspect, right ?!!!
class TraceController(object):
nesting = 0
def pause_tracing(cls):
if not cls.nesting:
cls.tracefunc = staticmethod(getattr(sys, '__settrace__', sys.settrace))
cls.oldtracer = getattr(sys, '__tracer__', None)
sys.__notrace__ = True
cls.tracefunc(None)
cls.nesting += 1
pause_tracing = classmethod(pause_tracing)
def resume_tracing(cls):
cls.nesting -= 1
assert cls.nesting >= 0
if not cls.nesting:
cls.tracefunc(cls.oldtracer)
delattr(sys, '__notrace__')
resume_tracing = classmethod(resume_tracing)
pause_tracing = TraceController.pause_tracing
resume_tracing = TraceController.resume_tracing
def nocoverage(func):
if hasattr(func, 'uncovered'):
return func
func.uncovered = True
def not_covered(*args, **kwargs):
pause_tracing()
try:
return func(*args, **kwargs)
finally:
resume_tracing()
not_covered.uncovered = True
return not_covered
## end of coverage hacks
TESTFILE_RE = re.compile("^((unit)?test.*|smoketest)\.py$")
def this_is_a_testfile(filename):
"""returns True if `filename` seems to be a test file"""
return TESTFILE_RE.match(osp.basename(filename))
TESTDIR_RE = re.compile("^(unit)?tests?$")
def this_is_a_testdir(dirpath):
"""returns True if `filename` seems to be a test directory"""
return TESTDIR_RE.match(osp.basename(dirpath))
def load_pytest_conf(path, parser):
"""loads a ``pytestconf.py`` file and update default parser
and / or tester.
"""
namespace = {}
execfile(path, namespace)
if 'update_parser' in namespace:
namespace['update_parser'](parser)
return namespace.get('CustomPyTester', PyTester)
def project_root(parser, projdir=os.getcwd()):
"""try to find project's root and add it to sys.path"""
previousdir = curdir = osp.abspath(projdir)
testercls = PyTester
conf_file_path = osp.join(curdir, CONF_FILE)
if osp.isfile(conf_file_path):
testercls = load_pytest_conf(conf_file_path, parser)
while this_is_a_testdir(curdir) or \
osp.isfile(osp.join(curdir, '__init__.py')):
newdir = osp.normpath(osp.join(curdir, os.pardir))
if newdir == curdir:
break
previousdir = curdir
curdir = newdir
conf_file_path = osp.join(curdir, CONF_FILE)
if osp.isfile(conf_file_path):
testercls = load_pytest_conf(conf_file_path, parser)
return previousdir, testercls
class GlobalTestReport(object):
"""this class holds global test statistics"""
def __init__(self):
self.ran = 0
self.skipped = 0
self.failures = 0
self.errors = 0
self.ttime = 0
self.ctime = 0
self.modulescount = 0
self.errmodules = []
def feed(self, filename, testresult, ttime, ctime):
"""integrates new test information into internal statistics"""
ran = testresult.testsRun
self.ran += ran
self.skipped += len(getattr(testresult, 'skipped', ()))
self.failures += len(testresult.failures)
self.errors += len(testresult.errors)
self.ttime += ttime
self.ctime += ctime
self.modulescount += 1
if not testresult.wasSuccessful():
problems = len(testresult.failures) + len(testresult.errors)
self.errmodules.append((filename[:-3], problems, ran))
def failed_to_test_module(self, filename):
"""called when the test module could not be imported by unittest
"""
self.errors += 1
self.modulescount += 1
self.ran += 1
self.errmodules.append((filename[:-3], 1, 1))
def skip_module(self, filename):
self.modulescount += 1
self.ran += 1
self.errmodules.append((filename[:-3], 0, 0))
def __str__(self):
"""this is just presentation stuff"""
line1 = ['Ran %s test cases in %.2fs (%.2fs CPU)'
% (self.ran, self.ttime, self.ctime)]
if self.errors:
line1.append('%s errors' % self.errors)
if self.failures:
line1.append('%s failures' % self.failures)
if self.skipped:
line1.append('%s skipped' % self.skipped)
modulesok = self.modulescount - len(self.errmodules)
if self.errors or self.failures:
line2 = '%s modules OK (%s failed)' % (modulesok,
len(self.errmodules))
descr = ', '.join(['%s [%s/%s]' % info for info in self.errmodules])
line3 = '\nfailures: %s' % descr
elif modulesok:
line2 = 'All %s modules OK' % modulesok
line3 = ''
else:
return ''
return '%s\n%s%s' % (', '.join(line1), line2, line3)
def remove_local_modules_from_sys(testdir):
"""remove all modules from cache that come from `testdir`
This is used to avoid strange side-effects when using the
testall() mode of pytest.
For instance, if we run pytest on this tree::
A/test/test_utils.py
B/test/test_utils.py
we **have** to clean sys.modules to make sure the correct test_utils
module is ran in B
"""
for modname, mod in sys.modules.items():
if mod is None:
continue
if not hasattr(mod, '__file__'):
# this is the case of some built-in modules like sys, imp, marshal
continue
modfile = mod.__file__
# if modfile is not an absolute path, it was probably loaded locally
# during the tests
if not osp.isabs(modfile) or modfile.startswith(testdir):
del sys.modules[modname]
class PyTester(object):
"""encapsulates testrun logic"""
def __init__(self, cvg, options):
self.report = GlobalTestReport()
self.cvg = cvg
self.options = options
self.firstwrite = True
self._errcode = None
def show_report(self):
"""prints the report and returns appropriate exitcode"""
# everything has been ran, print report
print "*" * 79
print self.report
def get_errcode(self):
# errcode set explicitly
if self._errcode is not None:
return self._errcode
return self.report.failures + self.report.errors
def set_errcode(self, errcode):
self._errcode = errcode
errcode = property(get_errcode, set_errcode)
def testall(self, exitfirst=False):
"""walks through current working directory, finds something
which can be considered as a testdir and runs every test there
"""
here = os.getcwd()
for dirname, dirs, _ in os.walk(here):
for skipped in STD_BLACKLIST:
if skipped in dirs:
dirs.remove(skipped)
basename = osp.basename(dirname)
if this_is_a_testdir(basename):
print "going into", dirname
# we found a testdir, let's explore it !
if not self.testonedir(dirname, exitfirst):
break
dirs[:] = []
if self.report.ran == 0:
print "no test dir found testing here:", here
# if no test was found during the visit, consider
# the local directory as a test directory even if
# it doesn't have a traditional test directory name
self.testonedir(here)
def testonedir(self, testdir, exitfirst=False):
"""finds each testfile in the `testdir` and runs it
return true when all tests has been executed, false if exitfirst and
some test has failed.
"""
for filename in abspath_listdir(testdir):
if this_is_a_testfile(filename):
if self.options.exitfirst and not self.options.restart:
# overwrite restart file
try:
restartfile = open(FILE_RESTART, "w")
restartfile.close()
except Exception, e:
print >> sys.__stderr__, "Error while overwriting \
succeeded test file :", osp.join(os.getcwd(), FILE_RESTART)
raise e
# run test and collect information
prog = self.testfile(filename, batchmode=True)
if exitfirst and (prog is None or not prog.result.wasSuccessful()):
return False
self.firstwrite = True
# clean local modules
remove_local_modules_from_sys(testdir)
return True
def testfile(self, filename, batchmode=False):
"""runs every test in `filename`
:param filename: an absolute path pointing to a unittest file
"""
here = os.getcwd()
dirname = osp.dirname(filename)
if dirname:
os.chdir(dirname)
# overwrite restart file if it has not been done already
if self.options.exitfirst and not self.options.restart and self.firstwrite:
try:
restartfile = open(FILE_RESTART, "w")
restartfile.close()
except Exception, e:
print >> sys.__stderr__, "Error while overwriting \
succeeded test file :", osp.join(os.getcwd(), FILE_RESTART)
raise e
modname = osp.basename(filename)[:-3]
try:
print >> sys.stderr, (' %s ' % osp.basename(filename)).center(70, '=')
except TypeError: # < py 2.4 bw compat
print >> sys.stderr, (' %s ' % osp.basename(filename)).center(70)
try:
tstart, cstart = time(), clock()
try:
testprog = SkipAwareTestProgram(modname, batchmode=batchmode, cvg=self.cvg,
options=self.options, outstream=sys.stderr)
except KeyboardInterrupt:
raise
except SystemExit, exc:
self.errcode = exc.code
raise
except testlib.SkipTest:
print "Module skipped:", filename
self.report.skip_module(filename)
return None
except Exception:
self.report.failed_to_test_module(filename)
print >> sys.stderr, 'unhandled exception occurred while testing', modname
import traceback
traceback.print_exc(file=sys.stderr)
return None
tend, cend = time(), clock()
ttime, ctime = (tend - tstart), (cend - cstart)
self.report.feed(filename, testprog.result, ttime, ctime)
return testprog
finally:
if dirname:
os.chdir(here)
class DjangoTester(PyTester):
def load_django_settings(self, dirname):
"""try to find project's setting and load it"""
curdir = osp.abspath(dirname)
previousdir = curdir
while not osp.isfile(osp.join(curdir, 'settings.py')) and \
osp.isfile(osp.join(curdir, '__init__.py')):
newdir = osp.normpath(osp.join(curdir, os.pardir))
if newdir == curdir:
raise AssertionError('could not find settings.py')
previousdir = curdir
curdir = newdir
# late django initialization
settings = load_module_from_modpath(modpath_from_file(osp.join(curdir, 'settings.py')))
from django.core.management import setup_environ
setup_environ(settings)
settings.DEBUG = False
self.settings = settings
# add settings dir to pythonpath since it's the project's root
if curdir not in sys.path:
sys.path.insert(1, curdir)
def before_testfile(self):
# Those imports must be done **after** setup_environ was called
from django.test.utils import setup_test_environment
from django.test.utils import create_test_db
setup_test_environment()
create_test_db(verbosity=0)
self.dbname = self.settings.TEST_DATABASE_NAME
def after_testfile(self):
# Those imports must be done **after** setup_environ was called
from django.test.utils import teardown_test_environment
from django.test.utils import destroy_test_db
teardown_test_environment()
print 'destroying', self.dbname
destroy_test_db(self.dbname, verbosity=0)
def testall(self, exitfirst=False):
"""walks through current working directory, finds something
which can be considered as a testdir and runs every test there
"""
for dirname, dirs, files in os.walk(os.getcwd()):
for skipped in ('CVS', '.svn', '.hg'):
if skipped in dirs:
dirs.remove(skipped)
if 'tests.py' in files:
if not self.testonedir(dirname, exitfirst):
break
dirs[:] = []
else:
basename = osp.basename(dirname)
if basename in ('test', 'tests'):
print "going into", dirname
# we found a testdir, let's explore it !
if not self.testonedir(dirname, exitfirst):
break
dirs[:] = []
def testonedir(self, testdir, exitfirst=False):
"""finds each testfile in the `testdir` and runs it
return true when all tests has been executed, false if exitfirst and
some test has failed.
"""
# special django behaviour : if tests are splitted in several files,
# remove the main tests.py file and tests each test file separately
testfiles = [fpath for fpath in abspath_listdir(testdir)
if this_is_a_testfile(fpath)]
if len(testfiles) > 1:
try:
testfiles.remove(osp.join(testdir, 'tests.py'))
except ValueError:
pass
for filename in testfiles:
# run test and collect information
prog = self.testfile(filename, batchmode=True)
if exitfirst and (prog is None or not prog.result.wasSuccessful()):
return False
# clean local modules
remove_local_modules_from_sys(testdir)
return True
def testfile(self, filename, batchmode=False):
"""runs every test in `filename`
:param filename: an absolute path pointing to a unittest file
"""
here = os.getcwd()
dirname = osp.dirname(filename)
if dirname:
os.chdir(dirname)
self.load_django_settings(dirname)
modname = osp.basename(filename)[:-3]
print >>sys.stderr, (' %s ' % osp.basename(filename)).center(70, '=')
try:
try:
tstart, cstart = time(), clock()
self.before_testfile()
testprog = SkipAwareTestProgram(modname, batchmode=batchmode, cvg=self.cvg)
tend, cend = time(), clock()
ttime, ctime = (tend - tstart), (cend - cstart)
self.report.feed(filename, testprog.result, ttime, ctime)
return testprog
except SystemExit:
raise
except Exception, exc:
import traceback
traceback.print_exc()
self.report.failed_to_test_module(filename)
print 'unhandled exception occurred while testing', modname
print 'error: %s' % exc
return None
finally:
self.after_testfile()
if dirname:
os.chdir(here)
def make_parser():
"""creates the OptionParser instance
"""
from optparse import OptionParser
parser = OptionParser(usage=PYTEST_DOC)
parser.newargs = []
def rebuild_cmdline(option, opt, value, parser):
"""carry the option to unittest_main"""
parser.newargs.append(opt)
def rebuild_and_store(option, opt, value, parser):
"""carry the option to unittest_main and store
the value on current parser
"""
parser.newargs.append(opt)
setattr(parser.values, option.dest, True)
def capture_and_rebuild(option, opt, value, parser):
warnings.simplefilter('ignore', DeprecationWarning)
rebuild_cmdline(option, opt, value, parser)
# pytest options
parser.add_option('-t', dest='testdir', default=None,
help="directory where the tests will be found")
parser.add_option('-d', dest='dbc', default=False,
action="store_true", help="enable design-by-contract")
# unittest_main options provided and passed through pytest
parser.add_option('-v', '--verbose', callback=rebuild_cmdline,
action="callback", help="Verbose output")
parser.add_option('-i', '--pdb', callback=rebuild_and_store,
dest="pdb", action="callback",
help="Enable test failure inspection (conflicts with --coverage)")
parser.add_option('-x', '--exitfirst', callback=rebuild_and_store,
dest="exitfirst", default=False,
action="callback", help="Exit on first failure "
"(only make sense when pytest run one test file)")
parser.add_option('-R', '--restart', callback=rebuild_and_store,
dest="restart", default=False,
action="callback",
help="Restart tests from where it failed (implies exitfirst) "
"(only make sense if tests previously ran with exitfirst only)")
parser.add_option('--color', callback=rebuild_cmdline,
action="callback",
help="colorize tracebacks")
parser.add_option('-s', '--skip',
# XXX: I wish I could use the callback action but it
# doesn't seem to be able to get the value
# associated to the option
action="store", dest="skipped", default=None,
help="test names matching this name will be skipped "
"to skip several patterns, use commas")
parser.add_option('-q', '--quiet', callback=rebuild_cmdline,
action="callback", help="Minimal output")
parser.add_option('-P', '--profile', default=None, dest='profile',
help="Profile execution and store data in the given file")
parser.add_option('-m', '--match', default=None, dest='tags_pattern',
help="only execute test whose tag match the current pattern")
try:
from logilab.devtools.lib.coverage import Coverage
parser.add_option('--coverage', dest="coverage", default=False,
action="store_true",
help="run tests with pycoverage (conflicts with --pdb)")
except ImportError:
pass
if DJANGO_FOUND:
parser.add_option('-J', '--django', dest='django', default=False,
action="store_true",
help='use pytest for django test cases')
return parser
def parseargs(parser):
"""Parse the command line and return (options processed), (options to pass to
unittest_main()), (explicitfile or None).
"""
# parse the command line
options, args = parser.parse_args()
if options.pdb and getattr(options, 'coverage', False):
parser.error("'pdb' and 'coverage' options are exclusive")
filenames = [arg for arg in args if arg.endswith('.py')]
if filenames:
if len(filenames) > 1:
parser.error("only one filename is acceptable")
explicitfile = filenames[0]
args.remove(explicitfile)
else:
explicitfile = None
# someone wants DBC
testlib.ENABLE_DBC = options.dbc
newargs = parser.newargs
if options.skipped:
newargs.extend(['--skip', options.skipped])
# restart implies exitfirst
if options.restart:
options.exitfirst = True
# append additional args to the new sys.argv and let unittest_main
# do the rest
newargs += args
return options, explicitfile
def run():
parser = make_parser()
rootdir, testercls = project_root(parser)
options, explicitfile = parseargs(parser)
# mock a new command line
sys.argv[1:] = parser.newargs
covermode = getattr(options, 'coverage', None)
cvg = None
if not '' in sys.path:
sys.path.insert(0, '')
if covermode:
# control_import_coverage(rootdir)
from logilab.devtools.lib.coverage import Coverage
cvg = Coverage([rootdir])
cvg.erase()
cvg.start()
if DJANGO_FOUND and options.django:
tester = DjangoTester(cvg, options)
else:
tester = testercls(cvg, options)
if explicitfile:
cmd, args = tester.testfile, (explicitfile,)
elif options.testdir:
cmd, args = tester.testonedir, (options.testdir, options.exitfirst)
else:
cmd, args = tester.testall, (options.exitfirst,)
try:
try:
if options.profile:
import hotshot
prof = hotshot.Profile(options.profile)
prof.runcall(cmd, *args)
prof.close()
print 'profile data saved in', options.profile
else:
cmd(*args)
except SystemExit:
raise
except:
import traceback
traceback.print_exc()
finally:
if covermode:
cvg.stop()
cvg.save()
tester.show_report()
if covermode:
print 'coverage information stored, use it with pycoverage -ra'
sys.exit(tester.errcode)
class SkipAwareTestProgram(unittest.TestProgram):
# XXX: don't try to stay close to unittest.py, use optparse
USAGE = """\
Usage: %(progName)s [options] [test] [...]
Options:
-h, --help Show this message
-v, --verbose Verbose output
-i, --pdb Enable test failure inspection
-x, --exitfirst Exit on first failure
-s, --skip skip test matching this pattern (no regexp for now)
-q, --quiet Minimal output
--color colorize tracebacks
-m, --match Run only test whose tag match this pattern
-P, --profile FILE: Run the tests using cProfile and saving results
in FILE
Examples:
%(progName)s - run default set of tests
%(progName)s MyTestSuite - run suite 'MyTestSuite'
%(progName)s MyTestCase.testSomething - run MyTestCase.testSomething
%(progName)s MyTestCase - run all 'test*' test methods
in MyTestCase
"""
def __init__(self, module='__main__', defaultTest=None, batchmode=False,
cvg=None, options=None, outstream=sys.stderr):
self.batchmode = batchmode
self.cvg = cvg
self.options = options
self.outstream = outstream
super(SkipAwareTestProgram, self).__init__(
module=module, defaultTest=defaultTest,
testLoader=NonStrictTestLoader())
def parseArgs(self, argv):
self.pdbmode = False
self.exitfirst = False
self.skipped_patterns = []
self.test_pattern = None
self.tags_pattern = None
self.colorize = False
self.profile_name = None
import getopt
try:
options, args = getopt.getopt(argv[1:], 'hHvixrqcp:s:m:P:',
['help', 'verbose', 'quiet', 'pdb',
'exitfirst', 'restart',
'skip=', 'color', 'match=', 'profile='])
for opt, value in options:
if opt in ('-h', '-H', '--help'):
self.usageExit()
if opt in ('-i', '--pdb'):
self.pdbmode = True
if opt in ('-x', '--exitfirst'):
self.exitfirst = True
if opt in ('-r', '--restart'):
self.restart = True
self.exitfirst = True
if opt in ('-q', '--quiet'):
self.verbosity = 0
if opt in ('-v', '--verbose'):
self.verbosity = 2
if opt in ('-s', '--skip'):
self.skipped_patterns = [pat.strip() for pat in
value.split(', ')]
if opt == '--color':
self.colorize = True
if opt in ('-m', '--match'):
#self.tags_pattern = value
self.options["tag_pattern"] = value
if opt in ('-P', '--profile'):
self.profile_name = value
self.testLoader.skipped_patterns = self.skipped_patterns
if len(args) == 0 and self.defaultTest is None:
suitefunc = getattr(self.module, 'suite', None)
if isinstance(suitefunc, (types.FunctionType,
types.MethodType)):
self.test = self.module.suite()
else:
self.test = self.testLoader.loadTestsFromModule(self.module)
return
if len(args) > 0:
self.test_pattern = args[0]
self.testNames = args
else:
self.testNames = (self.defaultTest, )
self.createTests()
except getopt.error, msg:
self.usageExit(msg)
def runTests(self):
if self.profile_name:
import cProfile
cProfile.runctx('self._runTests()', globals(), locals(), self.profile_name )
else:
return self._runTests()
def _runTests(self):
self.testRunner = SkipAwareTextTestRunner(verbosity=self.verbosity,
stream=self.outstream,
exitfirst=self.exitfirst,
pdbmode=self.pdbmode,
cvg=self.cvg,
test_pattern=self.test_pattern,
skipped_patterns=self.skipped_patterns,
colorize=self.colorize,
batchmode=self.batchmode,
options=self.options)
def removeSucceededTests(obj, succTests):
""" Recursive function that removes succTests from
a TestSuite or TestCase
"""
if isinstance(obj, unittest.TestSuite):
removeSucceededTests(obj._tests, succTests)
if isinstance(obj, list):
for el in obj[:]:
if isinstance(el, unittest.TestSuite):
removeSucceededTests(el, succTests)
elif isinstance(el, unittest.TestCase):
descr = '.'.join((el.__class__.__module__,
el.__class__.__name__,
el._testMethodName))
if descr in succTests:
obj.remove(el)
# take care, self.options may be None
if getattr(self.options, 'restart', False):
# retrieve succeeded tests from FILE_RESTART
try:
restartfile = open(FILE_RESTART, 'r')
try:
succeededtests = list(elem.rstrip('\n\r') for elem in
restartfile.readlines())
removeSucceededTests(self.test, succeededtests)
finally:
restartfile.close()
except Exception, ex:
raise Exception("Error while reading succeeded tests into %s: %s"
% (osp.join(os.getcwd(), FILE_RESTART), ex))
result = self.testRunner.run(self.test)
# help garbage collection: we want TestSuite, which hold refs to every
# executed TestCase, to be gc'ed
del self.test
if getattr(result, "debuggers", None) and \
getattr(self, "pdbmode", None):
start_interactive_mode(result)
if not getattr(self, "batchmode", None):
sys.exit(not result.wasSuccessful())
self.result = result
class SkipAwareTextTestRunner(unittest.TextTestRunner):
def __init__(self, stream=sys.stderr, verbosity=1,
exitfirst=False, pdbmode=False, cvg=None, test_pattern=None,
skipped_patterns=(), colorize=False, batchmode=False,
options=None):
super(SkipAwareTextTestRunner, self).__init__(stream=stream,
verbosity=verbosity)
self.exitfirst = exitfirst
self.pdbmode = pdbmode
self.cvg = cvg
self.test_pattern = test_pattern
self.skipped_patterns = skipped_patterns
self.colorize = colorize
self.batchmode = batchmode
self.options = options
def _this_is_skipped(self, testedname):
return any([(pat in testedname) for pat in self.skipped_patterns])
def _runcondition(self, test, skipgenerator=True):
if isinstance(test, testlib.InnerTest):
testname = test.name
else:
if isinstance(test, testlib.TestCase):
meth = test._get_test_method()
func = meth.im_func
testname = '%s.%s' % (meth.im_class.__name__, func.__name__)
elif isinstance(test, types.FunctionType):
func = test
testname = func.__name__
elif isinstance(test, types.MethodType):
func = test.im_func
testname = '%s.%s' % (test.im_class.__name__, func.__name__)
else:
return True # Not sure when this happens
if testlib.is_generator(test) and skipgenerator:
return self.does_match_tags(test) # Let inner tests decide at run time
if self._this_is_skipped(testname):
return False # this was explicitly skipped
if self.test_pattern is not None:
try:
classpattern, testpattern = self.test_pattern.split('.')
klass, name = testname.split('.')
if classpattern not in klass or testpattern not in name:
return False
except ValueError:
if self.test_pattern not in testname:
return False
return self.does_match_tags(test)
def does_match_tags(self, test):
if self.options is not None:
tags_pattern = getattr(self.options, 'tags_pattern', None)
if tags_pattern is not None:
tags = getattr(test, 'tags', testlib.Tags())
if tags.inherit and isinstance(test, types.MethodType):
tags = tags | getattr(test.im_class, 'tags', testlib.Tags())
return tags.match(tags_pattern)
return True # no pattern
def _makeResult(self):
return testlib.SkipAwareTestResult(self.stream, self.descriptions,
self.verbosity, self.exitfirst,
self.pdbmode, self.cvg, self.colorize)
def run(self, test):
"Run the given test case or test suite."
result = self._makeResult()
startTime = time()
test(result, runcondition=self._runcondition, options=self.options)
stopTime = time()
timeTaken = stopTime - startTime
result.printErrors()
if not self.batchmode:
self.stream.writeln(result.separator2)
run = result.testsRun
self.stream.writeln("Ran %d test%s in %.3fs" %
(run, run != 1 and "s" or "", timeTaken))
self.stream.writeln()
if not result.wasSuccessful():
if self.colorize:
self.stream.write(textutils.colorize_ansi("FAILED", color='red'))
else:
self.stream.write("FAILED")
else:
if self.colorize:
self.stream.write(textutils.colorize_ansi("OK", color='green'))
else:
self.stream.write("OK")
failed, errored, skipped = map(len, (result.failures,
result.errors,
result.skipped))
det_results = []
for name, value in (("failures", result.failures),
("errors",result.errors),
("skipped", result.skipped)):
if value:
det_results.append("%s=%i" % (name, len(value)))
if det_results:
self.stream.write(" (")
self.stream.write(', '.join(det_results))
self.stream.write(")")
self.stream.writeln("")
return result
class NonStrictTestLoader(unittest.TestLoader):
"""
Overrides default testloader to be able to omit classname when
specifying tests to run on command line.
For example, if the file test_foo.py contains ::
class FooTC(TestCase):
def test_foo1(self): # ...
def test_foo2(self): # ...
def test_bar1(self): # ...
class BarTC(TestCase):
def test_bar2(self): # ...
'python test_foo.py' will run the 3 tests in FooTC
'python test_foo.py FooTC' will run the 3 tests in FooTC
'python test_foo.py test_foo' will run test_foo1 and test_foo2
'python test_foo.py test_foo1' will run test_foo1
'python test_foo.py test_bar' will run FooTC.test_bar1 and BarTC.test_bar2
"""
def __init__(self):
self.skipped_patterns = ()
# some magic here to accept empty list by extending
# and to provide callable capability
def loadTestsFromNames(self, names, module=None):
suites = []
for name in names:
suites.extend(self.loadTestsFromName(name, module))
return self.suiteClass(suites)
def _collect_tests(self, module):
tests = {}
for obj in vars(module).values():
if (issubclass(type(obj), (types.ClassType, type)) and
issubclass(obj, unittest.TestCase)):
classname = obj.__name__
if classname[0] == '_' or self._this_is_skipped(classname):
continue
methodnames = []
# obj is a TestCase class
for attrname in dir(obj):
if attrname.startswith(self.testMethodPrefix):
attr = getattr(obj, attrname)
if callable(attr):
methodnames.append(attrname)
# keep track of class (obj) for convenience
tests[classname] = (obj, methodnames)
return tests
def loadTestsFromSuite(self, module, suitename):
try:
suite = getattr(module, suitename)()
except AttributeError:
return []
assert hasattr(suite, '_tests'), \
"%s.%s is not a valid TestSuite" % (module.__name__, suitename)
# python2.3 does not implement __iter__ on suites, we need to return
# _tests explicitly
return suite._tests
def loadTestsFromName(self, name, module=None):
parts = name.split('.')
if module is None or len(parts) > 2:
# let the base class do its job here
return [super(NonStrictTestLoader, self).loadTestsFromName(name)]
tests = self._collect_tests(module)
collected = []
if len(parts) == 1:
pattern = parts[0]
if callable(getattr(module, pattern, None)
) and pattern not in tests:
# consider it as a suite
return self.loadTestsFromSuite(module, pattern)
if pattern in tests:
# case python unittest_foo.py MyTestTC
klass, methodnames = tests[pattern]
for methodname in methodnames:
collected = [klass(methodname)
for methodname in methodnames]
else:
# case python unittest_foo.py something
for klass, methodnames in tests.values():
# skip methodname if matched by skipped_patterns
for skip_pattern in self.skipped_patterns:
methodnames = [methodname
for methodname in methodnames
if skip_pattern not in methodname]
collected += [klass(methodname)
for methodname in methodnames
if pattern in methodname]
elif len(parts) == 2:
# case "MyClass.test_1"
classname, pattern = parts
klass, methodnames = tests.get(classname, (None, []))
for methodname in methodnames:
collected = [klass(methodname) for methodname in methodnames
if pattern in methodname]
return collected
def _this_is_skipped(self, testedname):
return any([(pat in testedname) for pat in self.skipped_patterns])
def getTestCaseNames(self, testCaseClass):
"""Return a sorted sequence of method names found within testCaseClass
"""
is_skipped = self._this_is_skipped
classname = testCaseClass.__name__
if classname[0] == '_' or is_skipped(classname):
return []
testnames = super(NonStrictTestLoader, self).getTestCaseNames(
testCaseClass)
return [testname for testname in testnames if not is_skipped(testname)]
def _ts_run(self, result, runcondition=None, options=None):
self._wrapped_run(result,runcondition=runcondition, options=options)
self._tearDownPreviousClass(None, result)
self._handleModuleTearDown(result)
return result
def _ts_wrapped_run(self, result, debug=False, runcondition=None, options=None):
for test in self:
if result.shouldStop:
break
if unittest_suite._isnotsuite(test):
self._tearDownPreviousClass(test, result)
self._handleModuleFixture(test, result)
self._handleClassSetUp(test, result)
result._previousTestClass = test.__class__
if (getattr(test.__class__, '_classSetupFailed', False) or
getattr(result, '_moduleSetUpFailed', False)):
continue
if hasattr(test, '_wrapped_run'):
try:
test._wrapped_run(result, debug, runcondition=runcondition, options=options)
except TypeError:
test._wrapped_run(result, debug)
elif not debug:
try:
test(result, runcondition, options)
except TypeError:
test(result)
else:
test.debug()
def enable_dbc(*args):
"""
Without arguments, return True if contracts can be enabled and should be
enabled (see option -d), return False otherwise.
With arguments, return False if contracts can't or shouldn't be enabled,
otherwise weave ContractAspect with items passed as arguments.
"""
if not ENABLE_DBC:
return False
try:
from logilab.aspects.weaver import weaver
from logilab.aspects.lib.contracts import ContractAspect
except ImportError:
sys.stderr.write(
'Warning: logilab.aspects is not available. Contracts disabled.')
return False
for arg in args:
weaver.weave_module(arg, ContractAspect)
return True
# monkeypatch unittest and doctest (ouch !)
unittest._TextTestResult = testlib.SkipAwareTestResult
unittest.TextTestRunner = SkipAwareTextTestRunner
unittest.TestLoader = NonStrictTestLoader
unittest.TestProgram = SkipAwareTestProgram
if sys.version_info >= (2, 4):
doctest.DocTestCase.__bases__ = (testlib.TestCase,)
# XXX check python2.6 compatibility
#doctest.DocTestCase._cleanups = []
#doctest.DocTestCase._out = []
else:
unittest.FunctionTestCase.__bases__ = (testlib.TestCase,)
unittest.TestSuite.run = _ts_run
unittest.TestSuite._wrapped_run = _ts_wrapped_run
|
|
def conc(corpus,
query,
option = 'tregex',
dep_function = 'any',
dep_type = 'basic-dependencies',
n = 100,
random = False,
window = 100,
trees = False,
plaintext = False, #'guess',
add_links = False,
show_links = False,
print_status = True,
print_output = True,
just_speakers = False,
root = False,
**kwargs):
"""A concordancer for Tregex queries and dependencies"""
import corpkit
import os
import re
import pandas as pd
from pandas import DataFrame
from time import localtime, strftime
try:
from IPython.display import display, clear_output
except ImportError:
pass
from corpkit.other import tregex_engine
from corpkit.tests import check_pytex, check_dit
try:
get_ipython().getoutput()
except TypeError:
have_ipython = True
except NameError:
import subprocess
have_ipython = False
if query == 'any':
query = r'.*'
# convert list to query
if type(query) == list:
from other import as_regex
if option.startswith('t'):
query = r'/%s/ !< __' % as_regex(query, boundaries = 'line')
else:
query = as_regex(query, boundaries = 'w')
can_do_fast = False
if option.startswith('t'):
if just_speakers is False:
can_do_fast = True
just_speakers_is_list = False
if type(just_speakers) == list:
just_speakers_is_list = True
if type(just_speakers) == str:
if just_speakers.lower() != 'all':
just_speakers = [just_speakers]
def get_deps(sentence, dep_type):
if dep_type == 'basic-dependencies':
return sentence.basic_dependencies
if dep_type == 'collapsed-dependencies':
return sentence.collapsed_dependencies
if dep_type == 'collapsed-ccprocessed-dependencies':
return sentence.collapsed_ccprocessed_dependencies
conc_lines = []
if option.startswith('t'):
if trees:
options = '-s'
else:
options = '-t'
if can_do_fast:
speakr = ''
tregex_engine(query = query, check_query = True, root = root)
wholes = tregex_engine(query = query,
options = ['-o', '-w', '-f', options],
corpus = corpus,
preserve_case = True,
root = root)
middle_column_result = tregex_engine(query = query,
options = ['-o', options],
corpus = corpus,
preserve_case = True,
root = root)
for (f, whole), mid in zip(wholes, middle_column_result):
reg = re.compile(r'(' + re.escape(mid) + r')', re.IGNORECASE)
start, middle, end = re.split(reg, whole, 1)
conc_lines.append([os.path.basename(f), speakr, start, middle, end])
else:
fs_to_conc = []
for r, dirs, fs in os.walk(corpus):
for f in fs:
if not os.path.isfile(os.path.join(r, f)):
continue
if not f.endswith('.txt') and not f.endswith('.xml'):
continue
fs_to_conc.append(os.path.join(r, f))
def normalise(concline):
import re
reg = re.compile(r'\([^ ]+')
spaces = re.compile(r'\s+')
concline = re.sub(reg, '', concline)
concline = re.sub(spaces, ' ', concline)
concline = concline.replace(')', '').replace(' ', ' ')
return concline.strip()
num_fs = len(fs_to_conc)
for index, filepath in enumerate(fs_to_conc):
f = os.path.basename(filepath)
if num_fs > 1:
if 'note' in kwargs.keys():
kwargs['note'].progvar.set((index) * 100.0 / num_fs)
from time import localtime, strftime
thetime = strftime("%H:%M:%S", localtime())
print '%s: Extracting data from %s ...' % (thetime, f)
if root:
root.update()
with open(filepath, "rb") as text:
parsetreedict = {}
data = text.read()
if option.startswith('p') or option.startswith('l'):
if option.startswith('l'):
lstokens = pickle.load(open(filepath, 'rb'))
data = ' '.join(tokens)
data = data.split(' . ')
else:
lines = data.splitlines()
for l in lines:
m = re.compile(r'^(.*?)(' + query + r')(.*)$', re.IGNORECASE)
mat = re.search(m, l)
if mat:
conc_lines.append([f, '', mat.group(1), mat.group(2), mat.group(3)])
continue
from corenlp_xml.document import Document
corenlp_xml = Document(data)
#corenlp_xml = Beautifulcorenlp_xml(data, parse_only=justsents)
if just_speakers:
for s in just_speakers:
parsetreedict[s] = []
sents = [s for s in corenlp_xml.sentences if s.speakername in just_speakers]
#sents = [s for s in corenlp_xml.find_all('sentence') \
#if s.speakername.text.strip() in just_speakers]
else:
sents = corenlp_xml.sentences
nsents = len(sents)
for i, s in enumerate(sents):
if num_fs == 1:
if 'note' in kwargs.keys():
kwargs['note'].progvar.set((index) * 100.0 / nsents)
if root:
root.update()
try:
speakr = s.speakername.strip()
except:
speakr = ''
parsetree = s.parse_string
if option.startswith('t'):
parsetreedict[speakr].append(parsetree)
continue
elif option.startswith('d'):
#right_dependency_grammar = s.find_all('dependencies', type=dep_type, limit = 1)
deps = get_deps(s, dep_type)
if dep_function == 'any' or dep_function is False:
wdsmatching = [l.dependent.text.strip() for l in deps.links \
if re.match(query, l.dependent.text.strip())]
else:
comped = re.compile(dep_function, re.IGNORECASE)
#goodsent = any(re.match(query, l.dependent.text.strip()) for l in deps.links if re.match(comped, l.type.strip()))
wdsmatching = [l.dependent.text.strip() for l in deps.links \
if re.match(comped, l.type.strip()) and \
re.match(query, l.dependent.text.strip())]
# this is shit, needs indexing or something
for wd in wdsmatching:
line = normalise(parsetree)
start, middle, end = re.split(r'(' + wd + r')', line, 1)
conc_lines.append([f, speakr, start, middle, end])
if option.startswith('t'):
for speakr, dt in parsetreedict.items():
trees_as_string = '\n'.join(dt)
if trees:
options = '-s'
else:
options = '-t'
with open('tmp.txt', 'w') as fo:
fo.write(trees_as_string.encode('utf-8', errors = 'ignore'))
tregex_engine(query = query, check_query = True, root = root)
wholes = tregex_engine(query = query,
options = ['-o', '-w', options],
corpus = 'tmp.txt',
preserve_case = True,
root = root)
middle_column_result = tregex_engine(query = query,
options = ['-o', options],
corpus = 'tmp.txt',
preserve_case = True,
root = root)
for whole, mid in zip(wholes, middle_column_result):
reg = re.compile(r'(' + re.escape(mid) + r')', re.IGNORECASE)
start, middle, end = re.split(reg, whole, 1)
conc_lines.append([f, speakr, start, middle, end])
# does not keep results ordered!
try:
os.remove('tmp.txt')
except:
pass
unique_results = [list(x) for x in set(tuple(x) for x in conc_lines)]
#make into series
series = []
pindex = 'f s l m r'.encode('utf-8').split()
for fname, spkr, start, word, end in unique_results:
import os
fname = os.path.basename(fname)
start = start.replace('$ ', '$').replace('`` ', '``').replace(' ,', ',').replace(' .', '.').replace("'' ", "''").replace(" n't", "n't").replace(" 're","'re").replace(" 'm","'m").replace(" 's","'s").replace(" 'd","'d").replace(" 'll","'ll").replace(' ', ' ')
word = word.replace('$ ', '$').replace('`` ', '``').replace(' ,', ',').replace(' .', '.').replace("'' ", "''").replace(" n't", "n't").replace(" 're","'re").replace(" 'm","'m").replace(" 's","'s").replace(" 'd","'d").replace(" 'll","'ll").replace(' ', ' ')
end = end.replace('$ ', '$').replace('`` ', '``').replace(' ,', ',').replace(' .', '.').replace("'' ", "''").replace(" n't", "n't").replace(" 're","'re").replace(" 'm","'m").replace(" 's","'s").replace(" 'd","'d").replace(" 'll","'ll").replace(' ', ' ')
#spaces = ' ' * (maximum / 2 - (len(word) / 2))
#new_word = spaces + word + spaces
series.append(pd.Series([fname.encode('utf-8', errors = 'ignore'), \
spkr.encode('utf-8', errors = 'ignore'), \
start.encode('utf-8', errors = 'ignore'), \
word.encode('utf-8', errors = 'ignore'), \
end.encode('utf-8', errors = 'ignore')], index = pindex))
# randomise results...
if random:
from random import shuffle
shuffle(series)
if series == []:
if root:
print 'No results found, sorry.'
return
else:
raise ValueError("No results found, I'm afraid. Check your query and path.")
df = pd.concat(series, axis = 1).T
if not add_links:
df.columns = ['f', 's', 'l', 'm', 'r']
else:
df.columns = ['f', 's', 'l', 'm', 'r', 'link']
if all(x == '' for x in list(df['s'].values)):
df.drop('s', axis = 1, inplace = True)
formatl = lambda x: "{0}".format(x[-window:])
formatf = lambda x: "{0}".format(x[-20:])
#formatr = lambda x:
formatr = lambda x: "{{:<{}s}}".format(df['r'].str.len().max()).format(x[:window])
st = df.head(n).to_string(header = False, formatters={'l': formatl,
'r': formatr,
'f': formatf}).splitlines()
# hack because i can't figure out formatter:
rem = '\n'.join([re.sub('\s*\.\.\.\s*$', '', s) for s in st])
if print_output:
print rem
if 'note' in kwargs.keys():
kwargs['note'].progvar.set(100)
return df
if add_links:
def _add_links(lines, links = False, show = 'thread'):
link = "http://www.healthboards.com/boards/bipolar-disorder/695089-labels.html"
linktext = '<a href="%s>link</a>' % link
import pandas as pd
inds = list(df.index)
num_objects = len(list(df.index))
ser = pd.Series([link for n in range(num_objects)], index = inds)
lines['link'] = ser
return lines
df = _add_links(df)
if add_links:
if not show_links:
if print_output:
print df.drop('link', axis = 1).head(n).to_string(header = False, formatters={rname:'{{:<{}s}}'.format(df[rname].str.len().max()).format})
else:
if print_output:
print HTML(df.to_html(escape=False))
else:
if print_output:
print df.head(n).to_string(header = False, formatters={rname:'{{:<{}s}}'.format(df[rname].str.len().max()).format})
if not add_links:
df.columns = ['f', 'l', 'm', 'r']
else:
df.columns = ['f', 'l', 'm', 'r', 'link']
return df
|
|
from unittest import TestCase
from game.game_variant import GameVariantGrand
from model.card import Card
from model.player import Player
class GameVariantGrandTest(TestCase):
def setUp(self):
self.game_variant = GameVariantGrand()
def test_isTrump_jacksTrue(self):
# when/then
for suit in Card.Suit:
self.assertTrue(self.game_variant.is_trump(Card(suit, Card.Face.JACK)))
def test_isTrump_nonJacksFalse(self):
# when/then
for suit in Card.Suit:
for face in Card.Face:
if face is Card.Face.JACK:
continue
else:
self.assertFalse(self.game_variant.is_trump(Card(suit, face)))
def test_compareJacks_invalidHigherJackFails(self):
# given
no_jack = Card(Card.Suit.CLUB, Card.Face.TEN)
lower_jack = Card(Card.Suit.DIAMOND, Card.Face.JACK)
# when/then
self.assertRaises(TypeError, self.game_variant.compare_jacks, no_jack, lower_jack)
def test_compareJacks_invalidLowerJackFails(self):
# given
higher_jack = Card(Card.Suit.CLUB, Card.Face.JACK)
no_jack = Card(Card.Suit.CLUB, Card.Face.TEN)
# when/then
self.assertRaises(TypeError, self.game_variant.compare_jacks, higher_jack, no_jack)
def test_compareJacks_invalidJacksFails(self):
# given
no_jack_a = Card(Card.Suit.CLUB, Card.Face.TEN)
no_jack_b = Card(Card.Suit.DIAMOND, Card.Face.NINE)
# when/then
self.assertRaises(TypeError, self.game_variant.compare_jacks, no_jack_a, no_jack_b)
def test_compareJacks_higherJackTrue(self):
# given
diamonds_jack = Card(Card.Suit.DIAMOND, Card.Face.JACK)
hearts_jack = Card(Card.Suit.HEARTS, Card.Face.JACK)
spade_jack = Card(Card.Suit.SPADE, Card.Face.JACK)
club_jack = Card(Card.Suit.CLUB, Card.Face.JACK)
# when/then
self.assertEquals(self.game_variant.compare_jacks(club_jack, spade_jack), 1)
self.assertEquals(self.game_variant.compare_jacks(club_jack, hearts_jack), 1)
self.assertEquals(self.game_variant.compare_jacks(club_jack, diamonds_jack), 1)
self.assertEquals(self.game_variant.compare_jacks(spade_jack, hearts_jack), 1)
self.assertEquals(self.game_variant.compare_jacks(spade_jack, diamonds_jack), 1)
self.assertEquals(self.game_variant.compare_jacks(hearts_jack, diamonds_jack), 1)
def test_compareJacks_lowerJackFalse(self):
# given
diamonds_jack = Card(Card.Suit.DIAMOND, Card.Face.JACK)
hearts_jack = Card(Card.Suit.HEARTS, Card.Face.JACK)
spade_jack = Card(Card.Suit.SPADE, Card.Face.JACK)
club_jack = Card(Card.Suit.CLUB, Card.Face.JACK)
# when/then
self.assertEquals(self.game_variant.compare_jacks(diamonds_jack, club_jack), -1)
self.assertEquals(self.game_variant.compare_jacks(diamonds_jack, spade_jack), -1)
self.assertEquals(self.game_variant.compare_jacks(diamonds_jack, hearts_jack), -1)
self.assertEquals(self.game_variant.compare_jacks(hearts_jack, club_jack), -1)
self.assertEquals(self.game_variant.compare_jacks(hearts_jack, spade_jack), -1)
self.assertEquals(self.game_variant.compare_jacks(spade_jack, club_jack), -1)
def test_compareCards_oneJack(self):
# given
diamonds_jack = Card(Card.Suit.DIAMOND, Card.Face.JACK)
spade_jack = Card(Card.Suit.SPADE, Card.Face.JACK)
diamonds_eight = Card(Card.Suit.DIAMOND, Card.Face.EIGHT)
club_ace = Card(Card.Suit.CLUB, Card.Face.ACE)
# when/then
self.assertEquals(self.game_variant.compare_cards(diamonds_jack, diamonds_eight), 1)
self.assertEquals(self.game_variant.compare_cards(spade_jack, diamonds_eight), 1)
self.assertEquals(self.game_variant.compare_cards(diamonds_jack, club_ace), 1)
self.assertEquals(self.game_variant.compare_cards(diamonds_eight, diamonds_jack), -1)
self.assertEquals(self.game_variant.compare_cards(diamonds_eight, spade_jack), -1)
self.assertEquals(self.game_variant.compare_cards(club_ace, diamonds_jack), -1)
def test_compareCards_sameFace(self):
# given
diamonds_seven = Card(Card.Suit.DIAMOND, Card.Face.SEVEN)
heats_seven = Card(Card.Suit.HEARTS, Card.Face.SEVEN)
# when/then
self.assertEquals(self.game_variant.compare_cards(heats_seven, diamonds_seven), 0)
self.assertEquals(self.game_variant.compare_cards(diamonds_seven, heats_seven), 0)
def test_compareCards_tens(self):
# given
diamonds_nine = Card(Card.Suit.DIAMOND, Card.Face.NINE)
diamonds_ten = Card(Card.Suit.DIAMOND, Card.Face.TEN)
spade_ten = Card(Card.Suit.SPADE, Card.Face.TEN)
diamonds_jack = Card(Card.Suit.DIAMOND, Card.Face.JACK)
diamonds_king = Card(Card.Suit.DIAMOND, Card.Face.KING)
diamonds_ace = Card(Card.Suit.DIAMOND, Card.Face.ACE)
club_ace = Card(Card.Suit.CLUB, Card.Face.ACE)
# when/then
self.assertEquals(self.game_variant.compare_cards(spade_ten, diamonds_ten), 0)
self.assertEquals(self.game_variant.compare_cards(diamonds_jack, diamonds_ten), 1)
self.assertEquals(self.game_variant.compare_cards(diamonds_nine, diamonds_ten), -1)
self.assertEquals(self.game_variant.compare_cards(diamonds_king, diamonds_ten), -1)
self.assertEquals(self.game_variant.compare_cards(diamonds_ace, diamonds_ten), 1)
self.assertEquals(self.game_variant.compare_cards(club_ace, diamonds_ten), 0)
self.assertEquals(self.game_variant.compare_cards(diamonds_ten, spade_ten), 0)
self.assertEquals(self.game_variant.compare_cards(diamonds_ten, diamonds_jack), -1)
self.assertEquals(self.game_variant.compare_cards(diamonds_ten, diamonds_nine), 1)
self.assertEquals(self.game_variant.compare_cards(diamonds_ten, diamonds_king), 1)
self.assertEquals(self.game_variant.compare_cards(diamonds_ten, diamonds_ace), -1)
self.assertEquals(self.game_variant.compare_cards(diamonds_ten, club_ace), 0)
def test_compareCards_sameSuit(self):
# given
diamonds_seven = Card(Card.Suit.DIAMOND, Card.Face.SEVEN)
diamonds_eight = Card(Card.Suit.DIAMOND, Card.Face.EIGHT)
# when/then
self.assertEquals(self.game_variant.compare_cards(diamonds_eight, diamonds_seven), 1)
self.assertEquals(self.game_variant.compare_cards(diamonds_seven, diamonds_eight), -1)
def test_compareCards_jacks(self):
# given
diamonds_ten = Card(Card.Suit.DIAMOND, Card.Face.TEN)
diamonds_jack = Card(Card.Suit.DIAMOND, Card.Face.JACK)
diamonds_queen = Card(Card.Suit.DIAMOND, Card.Face.QUEEN)
diamonds_ace = Card(Card.Suit.DIAMOND, Card.Face.ACE)
# when/then
self.assertEquals(self.game_variant.compare_cards(diamonds_queen, diamonds_jack), -1)
self.assertEquals(self.game_variant.compare_cards(diamonds_ace, diamonds_jack), -1)
self.assertEquals(self.game_variant.compare_cards(diamonds_ten, diamonds_jack), -1)
def test_getHighestCard_sameSuit(self):
# given
diamonds_ten = Card(Card.Suit.DIAMOND, Card.Face.TEN)
diamonds_queen = Card(Card.Suit.DIAMOND, Card.Face.QUEEN)
diamonds_king = Card(Card.Suit.DIAMOND, Card.Face.KING)
# when/then
result = self.game_variant.get_highest_card([diamonds_queen, diamonds_king, diamonds_ten])
self.assertEquals(Card(Card.Suit.DIAMOND, Card.Face.TEN), result)
def test_getHighestCard_sameFace(self):
# given
diamonds_seven = Card(Card.Suit.DIAMOND, Card.Face.SEVEN)
hearts_seven = Card(Card.Suit.HEARTS, Card.Face.SEVEN)
spade_seven = Card(Card.Suit.SPADE, Card.Face.SEVEN)
# when/then
result = self.game_variant.get_highest_card([hearts_seven, spade_seven, diamonds_seven])
self.assertEquals(Card(Card.Suit.HEARTS, Card.Face.SEVEN), result)
def test_getHighestCard_differentFaceAndSuit(self):
# given
diamonds_seven = Card(Card.Suit.DIAMOND, Card.Face.SEVEN)
hearts_eight = Card(Card.Suit.HEARTS, Card.Face.EIGHT)
spade_nine = Card(Card.Suit.SPADE, Card.Face.NINE)
# when/then
result = self.game_variant.get_highest_card([spade_nine, hearts_eight, diamonds_seven])
self.assertEquals(Card(Card.Suit.SPADE, Card.Face.NINE), result)
def test_getHighestCard_jacks(self):
# given
diamonds_jack = Card(Card.Suit.DIAMOND, Card.Face.JACK)
hearts_jack = Card(Card.Suit.HEARTS, Card.Face.JACK)
spade_jack = Card(Card.Suit.SPADE, Card.Face.JACK)
# when/then
result = self.game_variant.get_highest_card([spade_jack, hearts_jack, diamonds_jack])
self.assertEquals(Card(Card.Suit.SPADE, Card.Face.JACK), result)
def test_getHighestCard_jackAndSuit(self):
# given
diamonds_jack = Card(Card.Suit.DIAMOND, Card.Face.JACK)
diamonds_king = Card(Card.Suit.DIAMOND, Card.Face.KING)
club_ace = Card(Card.Suit.CLUB, Card.Face.ACE)
# when/then
result = self.game_variant.get_highest_card([diamonds_king, club_ace, diamonds_jack])
self.assertEquals(Card(Card.Suit.DIAMOND, Card.Face.JACK), result)
def test_hasTrump_withJack(self):
# given
player = Player(1, "Player")
player.cards = [Card(Card.Suit.DIAMOND, Card.Face.JACK), Card(Card.Suit.DIAMOND, Card.Face.EIGHT)]
# when
result = self.game_variant.has_trump(player)
# then
self.assertTrue(result)
def test_hasTrump_withoutTrump(self):
# given
player = Player(1, "Player")
player.cards = [Card(Card.Suit.DIAMOND, Card.Face.SEVEN), Card(Card.Suit.DIAMOND, Card.Face.EIGHT)]
# when
result = self.game_variant.has_trump(player)
# then
self.assertFalse(result)
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core import exceptions as django_exceptions
from django.template import defaultfilters
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import exceptions as horizon_exceptions
from horizon import forms
from horizon import messages
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard import policy
ENABLE = 0
DISABLE = 1
class CreateUserLink(tables.LinkAction):
name = "create"
verbose_name = _("Create User")
url = "horizon:identity:users:create"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (('identity', 'identity:create_grant'),
("identity", "identity:create_user"),
("identity", "identity:list_roles"),
("identity", "identity:list_projects"),)
def allowed(self, request, user):
return api.keystone.keystone_can_edit_user()
class EditUserLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "edit"
verbose_name = _("Edit")
url = "horizon:identity:users:update"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("identity", "identity:update_user"),
("identity", "identity:list_projects"),)
policy_target_attrs = (("user_id", "id"),)
def allowed(self, request, user):
return api.keystone.keystone_can_edit_user()
class ToggleEnabled(policy.PolicyTargetMixin, tables.BatchAction):
name = "toggle"
@staticmethod
def action_present(count):
return (
ungettext_lazy(
u"Enable User",
u"Enable Users",
count
),
ungettext_lazy(
u"Disable User",
u"Disable Users",
count
),
)
@staticmethod
def action_past(count):
return (
ungettext_lazy(
u"Enabled User",
u"Enabled Users",
count
),
ungettext_lazy(
u"Disabled User",
u"Disabled Users",
count
),
)
classes = ("btn-toggle",)
policy_rules = (("identity", "identity:update_user"),)
policy_target_attrs = (("user_id", "id"),)
def allowed(self, request, user=None):
if not api.keystone.keystone_can_edit_user():
return False
self.enabled = True
if not user:
return self.enabled
self.enabled = user.enabled
if self.enabled:
self.current_present_action = DISABLE
else:
self.current_present_action = ENABLE
return True
def update(self, request, user=None):
super(ToggleEnabled, self).update(request, user)
if user and user.id == request.user.id:
self.attrs["disabled"] = "disabled"
def action(self, request, obj_id):
if obj_id == request.user.id:
messages.info(request, _('You cannot disable the user you are '
'currently logged in as.'))
return
if self.enabled:
api.keystone.user_update_enabled(request, obj_id, False)
self.current_past_action = DISABLE
else:
api.keystone.user_update_enabled(request, obj_id, True)
self.current_past_action = ENABLE
class DeleteUsersAction(tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete User",
u"Delete Users",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted User",
u"Deleted Users",
count
)
policy_rules = (("identity", "identity:delete_user"),)
def allowed(self, request, datum):
if not api.keystone.keystone_can_edit_user() or \
(datum and datum.id == request.user.id):
return False
return True
def delete(self, request, obj_id):
api.keystone.user_delete(request, obj_id)
class UserFilterAction(tables.FilterAction):
def filter(self, table, users, filter_string):
"""Naive case-insensitive search."""
q = filter_string.lower()
return [user for user in users
if q in user.name.lower()
or q in (getattr(user, 'email', None) or '').lower()]
class UpdateRow(tables.Row):
ajax = True
def get_data(self, request, user_id):
user_info = api.keystone.user_get(request, user_id, admin=True)
return user_info
class UpdateCell(tables.UpdateAction):
def allowed(self, request, user, cell):
return api.keystone.keystone_can_edit_user() and \
policy.check((("identity", "identity:update_user"),),
request)
def update_cell(self, request, datum, user_id,
cell_name, new_cell_value):
try:
user_obj = datum
setattr(user_obj, cell_name, new_cell_value)
api.keystone.user_update(
request,
user_obj,
name=user_obj.name,
email=user_obj.email,
enabled=user_obj.enabled,
project=user_obj.project_id,
password=None)
except horizon_exceptions.Conflict:
message = _("This name is already taken.")
messages.warning(request, message)
raise django_exceptions.ValidationError(message)
except Exception:
horizon_exceptions.handle(request, ignore=True)
return False
return True
class UsersTable(tables.DataTable):
STATUS_CHOICES = (
("true", True),
("false", False)
)
name = tables.Column('name',
link=("horizon:identity:users:detail"),
verbose_name=_('User Name'),
form_field=forms.CharField(),
update_action=UpdateCell)
email = tables.Column('email', verbose_name=_('Email'),
form_field=forms.CharField(required=False),
update_action=UpdateCell,
filters=(lambda v: defaultfilters
.default_if_none(v, ""),
defaultfilters.escape,
defaultfilters.urlize)
)
# Default tenant is not returned from Keystone currently.
# default_tenant = tables.Column('default_tenant',
# verbose_name=_('Default Project'))
id = tables.Column('id', verbose_name=_('User ID'))
enabled = tables.Column('enabled', verbose_name=_('Enabled'),
status=True,
status_choices=STATUS_CHOICES,
empty_value="False")
class Meta:
name = "users"
verbose_name = _("Users")
row_actions = (EditUserLink, ToggleEnabled, DeleteUsersAction)
table_actions = (UserFilterAction, CreateUserLink, DeleteUsersAction)
row_class = UpdateRow
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import nova.api.openstack
from nova.api.openstack.compute import admin_actions
from nova.api.openstack.compute import admin_password
from nova.api.openstack.compute import agents
from nova.api.openstack.compute import aggregates
from nova.api.openstack.compute import assisted_volume_snapshots
from nova.api.openstack.compute import availability_zone
from nova.api.openstack.compute import config_drive
from nova.api.openstack.compute import console_output
from nova.api.openstack.compute import create_backup
from nova.api.openstack.compute import deferred_delete
from nova.api.openstack.compute import evacuate
from nova.api.openstack.compute import extended_availability_zone
from nova.api.openstack.compute import extended_server_attributes
from nova.api.openstack.compute import extended_status
from nova.api.openstack.compute import extended_volumes
from nova.api.openstack.compute import extension_info
from nova.api.openstack.compute import fixed_ips
from nova.api.openstack.compute import flavor_access
from nova.api.openstack.compute import flavor_manage
from nova.api.openstack.compute import flavor_rxtx
from nova.api.openstack.compute import flavors
from nova.api.openstack.compute import flavors_extraspecs
from nova.api.openstack.compute import floating_ip_dns
from nova.api.openstack.compute import floating_ip_pools
from nova.api.openstack.compute import floating_ips
from nova.api.openstack.compute import floating_ips_bulk
from nova.api.openstack.compute import hide_server_addresses
from nova.api.openstack.compute import instance_usage_audit_log
from nova.api.openstack.compute import keypairs
from nova.api.openstack.compute import lock_server
from nova.api.openstack.compute import migrate_server
from nova.api.openstack.compute import multinic
from nova.api.openstack.compute import pause_server
from nova.api.openstack.compute import remote_consoles
from nova.api.openstack.compute import rescue
from nova.api.openstack.compute import security_groups
from nova.api.openstack.compute import server_metadata
from nova.api.openstack.compute import server_usage
from nova.api.openstack.compute import servers
from nova.api.openstack.compute import shelve
from nova.api.openstack.compute import simple_tenant_usage
from nova.api.openstack.compute import suspend_server
from nova.api.openstack import wsgi
import nova.conf
CONF = nova.conf.CONF
def _create_controller(main_controller, controller_list,
action_controller_list):
"""This is a helper method to create controller with a
list of extended controller. This is for backward compatible
with old extension interface. Finally, the controller for the
same resource will be merged into single one controller.
"""
controller = wsgi.ResourceV21(main_controller())
for ctl in controller_list:
controller.register_extensions(ctl())
for ctl in action_controller_list:
controller.register_actions(ctl())
return controller
agents_controller = functools.partial(
_create_controller, agents.AgentController, [], [])
aggregates_controller = functools.partial(
_create_controller, aggregates.AggregateController, [], [])
assisted_volume_snapshots_controller = functools.partial(
_create_controller,
assisted_volume_snapshots.AssistedVolumeSnapshotsController, [], [])
availability_zone_controller = functools.partial(
_create_controller, availability_zone.AvailabilityZoneController, [], [])
keypairs_controller = functools.partial(
_create_controller, keypairs.KeypairController, [], [])
fixed_ips_controller = functools.partial(_create_controller,
fixed_ips.FixedIPController, [], [])
flavor_controller = functools.partial(_create_controller,
flavors.FlavorsController,
[
flavor_rxtx.FlavorRxtxController,
flavor_access.FlavorActionController
],
[
flavor_manage.FlavorManageController,
flavor_access.FlavorActionController
]
)
flavor_access_controller = functools.partial(_create_controller,
flavor_access.FlavorAccessController, [], [])
flavor_extraspec_controller = functools.partial(_create_controller,
flavors_extraspecs.FlavorExtraSpecsController, [], [])
floating_ip_dns_controller = functools.partial(_create_controller,
floating_ip_dns.FloatingIPDNSDomainController, [], [])
floating_ip_dnsentry_controller = functools.partial(_create_controller,
floating_ip_dns.FloatingIPDNSEntryController, [], [])
floating_ip_pools_controller = functools.partial(_create_controller,
floating_ip_pools.FloatingIPPoolsController, [], [])
floating_ips_controller = functools.partial(_create_controller,
floating_ips.FloatingIPController, [], [])
floating_ips_bulk_controller = functools.partial(_create_controller,
floating_ips_bulk.FloatingIPBulkController, [], [])
instance_usage_audit_log_controller = functools.partial(_create_controller,
instance_usage_audit_log.InstanceUsageAuditLogController, [], [])
server_controller = functools.partial(_create_controller,
servers.ServersController,
[
config_drive.ConfigDriveController,
extended_availability_zone.ExtendedAZController,
extended_server_attributes.ExtendedServerAttributesController,
extended_status.ExtendedStatusController,
extended_volumes.ExtendedVolumesController,
hide_server_addresses.Controller,
keypairs.Controller,
security_groups.SecurityGroupsOutputController,
server_usage.ServerUsageController,
],
[
admin_actions.AdminActionsController,
admin_password.AdminPasswordController,
console_output.ConsoleOutputController,
create_backup.CreateBackupController,
deferred_delete.DeferredDeleteController,
evacuate.EvacuateController,
floating_ips.FloatingIPActionController,
lock_server.LockServerController,
migrate_server.MigrateServerController,
multinic.MultinicController,
pause_server.PauseServerController,
remote_consoles.RemoteConsolesController,
rescue.RescueController,
security_groups.SecurityGroupActionController,
shelve.ShelveController,
suspend_server.SuspendServerController
]
)
server_metadata_controller = functools.partial(_create_controller,
server_metadata.ServerMetadataController, [], [])
simple_tenant_usage_controller = functools.partial(_create_controller,
simple_tenant_usage.SimpleTenantUsageController, [], [])
# NOTE(alex_xu): This is structure of this route list as below:
# (
# ('Route path': {
# 'HTTP method: [
# 'Controller',
# 'The method of controller is used to handle this route'
# ],
# ...
# }),
# ...
# )
#
# Also note that this is ordered tuple. For example, the '/servers/detail'
# should be in the front of '/servers/{id}', otherwise the request to
# '/servers/detail' always matches to '/servers/{id}' as the id is 'detail'.
ROUTE_LIST = (
# NOTE: '/os-volumes_boot' is a clone of '/servers'. We may want to
# deprecate it in the future.
('/flavors', {
'GET': [flavor_controller, 'index'],
'POST': [flavor_controller, 'create']
}),
('/flavors/detail', {
'GET': [flavor_controller, 'detail']
}),
('/flavors/{id}', {
'GET': [flavor_controller, 'show'],
'DELETE': [flavor_controller, 'delete']
}),
('/flavors/{id}/action', {
'POST': [flavor_controller, 'action']
}),
('/flavors/{flavor_id}/os-extra_specs', {
'GET': [flavor_extraspec_controller, 'index'],
'POST': [flavor_extraspec_controller, 'create']
}),
('/flavors/{flavor_id}/os-extra_specs/{id}', {
'GET': [flavor_extraspec_controller, 'show'],
'PUT': [flavor_extraspec_controller, 'update'],
'DELETE': [flavor_extraspec_controller, 'delete']
}),
('/flavors/{flavor_id}/os-flavor-access', {
'GET': [flavor_access_controller, 'index']
}),
('/os-agents', {
'GET': [agents_controller, 'index'],
'POST': [agents_controller, 'create']
}),
('/os-agents/{id}', {
'PUT': [agents_controller, 'update'],
'DELETE': [agents_controller, 'delete']
}),
('/os-aggregates', {
'GET': [aggregates_controller, 'index'],
'POST': [aggregates_controller, 'create']
}),
('/os-aggregates/{id}', {
'GET': [aggregates_controller, 'show'],
'PUT': [aggregates_controller, 'update'],
'DELETE': [aggregates_controller, 'delete']
}),
('/os-aggregates/{id}/action', {
'POST': [aggregates_controller, 'action'],
}),
('/os-assisted-volume-snapshots', {
'POST': [assisted_volume_snapshots_controller, 'create']
}),
('/os-assisted-volume-snapshots/{id}', {
'DELETE': [assisted_volume_snapshots_controller, 'delete']
}),
('/os-availability-zone', {
'GET': [availability_zone_controller, 'index']
}),
('/os-availability-zone/detail', {
'GET': [availability_zone_controller, 'detail'],
}),
('/os-fixed-ips/{id}', {
'GET': [fixed_ips_controller, 'show']
}),
('/os-fixed-ips/{id}/action', {
'POST': [fixed_ips_controller, 'action'],
}),
('/os-floating-ip-dns', {
'GET': [floating_ip_dns_controller, 'index']
}),
('/os-floating-ip-dns/{id}', {
'PUT': [floating_ip_dns_controller, 'update'],
'DELETE': [floating_ip_dns_controller, 'delete']
}),
('/os-floating-ip-dns/{domain_id}/entries/{id}', {
'GET': [floating_ip_dnsentry_controller, 'show'],
'PUT': [floating_ip_dnsentry_controller, 'update'],
'DELETE': [floating_ip_dnsentry_controller, 'delete']
}),
('/os-floating-ip-pools', {
'GET': [floating_ip_pools_controller, 'index'],
}),
('/os-floating-ips', {
'GET': [floating_ips_controller, 'index'],
'POST': [floating_ips_controller, 'create']
}),
('/os-floating-ips/{id}', {
'GET': [floating_ips_controller, 'show'],
'DELETE': [floating_ips_controller, 'delete']
}),
('/os-floating-ips-bulk', {
'GET': [floating_ips_bulk_controller, 'index'],
'POST': [floating_ips_bulk_controller, 'create']
}),
('/os-floating-ips-bulk/{id}', {
'GET': [floating_ips_bulk_controller, 'show'],
'PUT': [floating_ips_bulk_controller, 'update']
}),
('/os-instance_usage_audit_log', {
'GET': [instance_usage_audit_log_controller, 'index']
}),
('/os-instance_usage_audit_log/{id}', {
'GET': [instance_usage_audit_log_controller, 'show']
}),
('/os-keypairs', {
'GET': [keypairs_controller, 'index'],
'POST': [keypairs_controller, 'create']
}),
('/os-keypairs/{id}', {
'GET': [keypairs_controller, 'show'],
'DELETE': [keypairs_controller, 'delete']
}),
('/os-simple-tenant-usage', {
'GET': [simple_tenant_usage_controller, 'index']
}),
('/os-simple-tenant-usage/{id}', {
'GET': [simple_tenant_usage_controller, 'show']
}),
('/os-volumes_boot', {
'GET': [server_controller, 'index'],
'POST': [server_controller, 'create']
}),
('/os-volumes_boot/detail', {
'GET': [server_controller, 'detail']
}),
('/os-volumes_boot/{id}', {
'GET': [server_controller, 'show'],
'PUT': [server_controller, 'update'],
'DELETE': [server_controller, 'delete']
}),
('/os-volumes_boot/{id}/action', {
'POST': [server_controller, 'action']
}),
('/servers', {
'GET': [server_controller, 'index'],
'POST': [server_controller, 'create']
}),
('/servers/detail', {
'GET': [server_controller, 'detail']
}),
('/servers/{id}', {
'GET': [server_controller, 'show'],
'PUT': [server_controller, 'update'],
'DELETE': [server_controller, 'delete']
}),
('/servers/{id}/action', {
'POST': [server_controller, 'action']
}),
('/servers/{server_id}/metadata', {
'GET': [server_metadata_controller, 'index'],
'POST': [server_metadata_controller, 'create'],
'PUT': [server_metadata_controller, 'update_all'],
}),
('/servers/{server_id}/metadata/{id}', {
'GET': [server_metadata_controller, 'show'],
'PUT': [server_metadata_controller, 'update'],
'DELETE': [server_metadata_controller, 'delete'],
}),
)
class APIRouterV21(nova.api.openstack.APIRouterV21):
"""Routes requests on the OpenStack API to the appropriate controller
and method. The URL mapping based on the plain list `ROUTE_LIST` is built
at here. The stevedore based API loading will be replaced by this.
"""
def __init__(self):
self._loaded_extension_info = extension_info.LoadedExtensionInfo()
super(APIRouterV21, self).__init__()
for path, methods in ROUTE_LIST:
for method, controller_info in methods.items():
# TODO(alex_xu): In the end, I want to create single controller
# instance instead of create controller instance for each
# route.
controller = controller_info[0]()
action = controller_info[1]
self.map.create_route(path, method, controller, action)
def _register_extension(self, ext):
return self.loaded_extension_info.register_extension(ext.obj)
@property
def loaded_extension_info(self):
return self._loaded_extension_info
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
import logging
from urlparse import urljoin
from tg import config
from bs4 import BeautifulSoup
import html5lib
import html5lib.serializer
import html5lib.filters.alphabeticalattributes
import markdown
from . import macro
from . import helpers as h
from allura import model as M
from allura.lib.utils import ForgeHTMLSanitizer
log = logging.getLogger(__name__)
PLAINTEXT_BLOCK_RE = re.compile(
r'(?P<bplain>\[plain\])(?P<code>.*?)(?P<eplain>\[\/plain\])',
re.MULTILINE | re.DOTALL
)
MACRO_PATTERN = r'\[\[([^\]\[]+)\]\]'
class CommitMessageExtension(markdown.Extension):
"""Markdown extension for processing commit messages.
People don't expect their commit messages to be parsed as Markdown. This
extension is therefore intentionally minimal in what it does. It knows how
to handle Trac-style short refs, will replace short refs with links, and
will create paragraphs around double-line breaks. That is *all* it does.
To make it do more, re-add some inlinePatterns and/or blockprocessors.
Some examples of the Trac-style refs this extension can parse::
#100
r123
ticket:100
comment:13:ticket:100
source:path/to/file.c@123#L456 (rev 123, lineno 456)
Trac-style refs will be converted to links to the appropriate artifact by
the :class:`PatternReplacingProcessor` preprocessor.
"""
def __init__(self, app):
markdown.Extension.__init__(self)
self.app = app
self._use_wiki = False
def extendMarkdown(self, md, md_globals):
md.registerExtension(self)
# remove default preprocessors and add our own
md.preprocessors.clear()
md.preprocessors['trac_refs'] = PatternReplacingProcessor(TracRef1(), TracRef2(), TracRef3(self.app))
# remove all inlinepattern processors except short refs and links
md.inlinePatterns.clear()
md.inlinePatterns["link"] = markdown.inlinepatterns.LinkPattern(markdown.inlinepatterns.LINK_RE, md)
md.inlinePatterns['short_reference'] = ForgeLinkPattern(markdown.inlinepatterns.SHORT_REF_RE, md, ext=self)
# remove all default block processors except for paragraph
md.parser.blockprocessors.clear()
md.parser.blockprocessors['paragraph'] = markdown.blockprocessors.ParagraphProcessor(md.parser)
# wrap artifact link text in square brackets
self.forge_link_tree_processor = ForgeLinkTreeProcessor(md)
md.treeprocessors['links'] = self.forge_link_tree_processor
# Sanitize HTML
md.postprocessors['sanitize_html'] = HTMLSanitizer()
# Put a class around markdown content for custom css
md.postprocessors['add_custom_class'] = AddCustomClass()
md.postprocessors['mark_safe'] = MarkAsSafe()
def reset(self):
self.forge_link_tree_processor.reset()
class Pattern(object):
"""Base class for regex patterns used by the :class:`PatternReplacingProcessor`.
Subclasses must define :attr:`pattern` (a compiled regex), and
:meth:`repl`.
"""
BEGIN, END = r'(^|\b|\s)', r'($|\b|\s)'
def sub(self, line):
return self.pattern.sub(self.repl, line)
def repl(self, match):
"""Return a string to replace ``match`` in the source string (the
string in which the match was found).
"""
return match.group()
class TracRef1(Pattern):
"""Replaces Trac-style short refs with links. Example patterns::
#100 (ticket 100)
r123 (revision 123)
"""
pattern = re.compile(r'(?<!\[|\w)([#r]\d+)(?!\]|\w)')
def repl(self, match):
shortlink = M.Shortlink.lookup(match.group(1))
if shortlink and not getattr(shortlink.ref.artifact, 'deleted', False):
return '[{ref}]({url})'.format(
ref=match.group(1),
url=shortlink.url)
return match.group()
class TracRef2(Pattern):
"""Replaces Trac-style short refs with links. Example patterns::
ticket:100
comment:13:ticket:400
"""
pattern = re.compile(
Pattern.BEGIN + r'((comment:(\d+):)?(ticket:)(\d+))' + Pattern.END)
def repl(self, match):
shortlink = M.Shortlink.lookup('#' + match.group(6))
if shortlink and not getattr(shortlink.ref.artifact, 'deleted', False):
url = shortlink.url
if match.group(4):
slug = self.get_comment_slug(
shortlink.ref.artifact, match.group(4))
slug = '#' + slug if slug else ''
url = url + slug
return '{front}[{ref}]({url}){back}'.format(
front=match.group(1),
ref=match.group(2),
url=url,
back=match.group(7))
return match.group()
def get_comment_slug(self, ticket, comment_num):
"""Given the id of an imported Trac comment, return it's Allura slug.
"""
if not ticket:
return None
comment_num = int(comment_num)
comments = ticket.discussion_thread.post_class().query.find(dict(
discussion_id=ticket.discussion_thread.discussion_id,
thread_id=ticket.discussion_thread._id,
status={'$in': ['ok', 'pending']},
deleted=False)).sort('timestamp')
if comment_num <= comments.count():
return comments.all()[comment_num - 1].slug
class TracRef3(Pattern):
"""Replaces Trac-style short refs with links. Example patterns::
source:trunk/server/file.c@123#L456 (rev 123, lineno 456)
Creates a link to a specific line of a source file at a specific revision.
"""
pattern = re.compile(
Pattern.BEGIN + r'((source:)([^@#\s]+)(@(\w+))?(#L(\d+))?)' + Pattern.END)
def __init__(self, app):
super(Pattern, self).__init__()
self.app = app
def repl(self, match):
if not self.app:
return match.group()
file, rev, lineno = (
match.group(4),
match.group(6) or 'HEAD',
'#l' + match.group(8) if match.group(8) else '')
url = '{app_url}{rev}/tree/{file}{lineno}'.format(
app_url=self.app.url,
rev=rev,
file=file,
lineno=lineno)
return '{front}[{ref}]({url}){back}'.format(
front=match.group(1),
ref=match.group(2),
url=url,
back=match.group(9))
class PatternReplacingProcessor(markdown.preprocessors.Preprocessor):
"""A Markdown preprocessor that searches the source lines for patterns and
replaces matches with alternate text.
"""
def __init__(self, *patterns):
self.patterns = patterns or []
def run(self, lines):
new_lines = []
for line in lines:
for pattern in self.patterns:
line = pattern.sub(line)
new_lines.append(line)
return new_lines
class ForgeExtension(markdown.Extension):
def __init__(self, wiki=False, email=False, macro_context=None):
markdown.Extension.__init__(self)
self._use_wiki = wiki
self._is_email = email
self._macro_context = macro_context
def extendMarkdown(self, md, md_globals):
md.registerExtension(self)
# allow markdown within e.g. <div markdown>...</div> More info at:
# https://github.com/waylan/Python-Markdown/issues/52
md.preprocessors['html_block'].markdown_in_raw = True
md.preprocessors.add('plain_text_block', PlainTextPreprocessor(md), "_begin")
md.preprocessors.add('macro_include', ForgeMacroIncludePreprocessor(md), '_end')
# this has to be before the 'escape' processor, otherwise weird
# placeholders are inserted for escaped chars within urls, and then the
# autolink can't match the whole url
md.inlinePatterns.add('autolink_without_brackets', AutolinkPattern(r'(http(?:s?)://[a-zA-Z0-9./\-\\_%?&=+#;~:!]+)', md), '<escape')
# replace the link pattern with our extended version
md.inlinePatterns['link'] = ForgeLinkPattern(markdown.inlinepatterns.LINK_RE, md, ext=self)
md.inlinePatterns['short_reference'] = ForgeLinkPattern(markdown.inlinepatterns.SHORT_REF_RE, md, ext=self)
# macro must be processed before links
md.inlinePatterns.add('macro', ForgeMacroPattern(MACRO_PATTERN, md, ext=self), '<link')
self.forge_link_tree_processor = ForgeLinkTreeProcessor(md)
md.treeprocessors['links'] = self.forge_link_tree_processor
# Sanitize HTML
md.postprocessors['sanitize_html'] = HTMLSanitizer()
# Rewrite all relative links that don't start with . to have a '../' prefix
md.postprocessors['rewrite_relative_links'] = RelativeLinkRewriter(make_absolute=self._is_email)
# Put a class around markdown content for custom css
md.postprocessors['add_custom_class'] = AddCustomClass()
md.postprocessors['mark_safe'] = MarkAsSafe()
def reset(self):
self.forge_link_tree_processor.reset()
class ForgeLinkPattern(markdown.inlinepatterns.LinkPattern):
artifact_re = re.compile(r'((.*?):)?((.*?):)?(.+)')
def __init__(self, *args, **kwargs):
self.ext = kwargs.pop('ext')
markdown.inlinepatterns.LinkPattern.__init__(self, *args, **kwargs)
def handleMatch(self, m):
el = markdown.util.etree.Element('a')
el.text = m.group(2)
is_link_with_brackets = False
try:
href = m.group(9)
except IndexError:
href = m.group(2)
is_link_with_brackets = True
try:
title = m.group(13)
except IndexError:
title = None
classes = ''
if href:
if href == 'TOC':
return '[TOC]' # skip TOC
if self.artifact_re.match(href):
href, classes = self._expand_alink(href, is_link_with_brackets)
el.set('href', self.sanitize_url(self.unescape(href.strip())))
el.set('class', classes)
else:
el.set('href', '')
if title:
title = markdown.inlinepatterns.dequote(self.unescape(title))
el.set('title', title)
if 'notfound' in classes and not self.ext._use_wiki:
text = el.text
el = markdown.util.etree.Element('span')
el.text = '[%s]' % text
return el
def _expand_alink(self, link, is_link_with_brackets):
'''Return (href, classes) for an artifact link'''
classes = ''
if is_link_with_brackets:
classes = 'alink'
href = link
shortlink = M.Shortlink.lookup(link)
if shortlink and shortlink.ref and not getattr(shortlink.ref.artifact, 'deleted', False):
href = shortlink.url
if getattr(shortlink.ref.artifact, 'is_closed', False):
classes += ' strikethrough'
self.ext.forge_link_tree_processor.alinks.append(shortlink)
elif is_link_with_brackets:
href = h.urlquote(link)
classes += ' notfound'
attach_link = link.split('/attachment/')
if len(attach_link) == 2 and self.ext._use_wiki:
shortlink = M.Shortlink.lookup(attach_link[0])
if shortlink:
attach_status = ' notfound'
for attach in shortlink.ref.artifact.attachments:
if attach.filename == attach_link[1]:
attach_status = ''
classes += attach_status
return href, classes
class PlainTextPreprocessor(markdown.preprocessors.Preprocessor):
'''
This was used earlier for [plain] tags that the Blog tool's rss importer
created, before html2text did good escaping of all special markdown chars.
Can be deprecated.
'''
def run(self, lines):
text = "\n".join(lines)
while 1:
res = PLAINTEXT_BLOCK_RE.finditer(text)
for m in res:
code = self._escape(m.group('code'))
placeholder = self.markdown.htmlStash.store(code, safe=True)
text = '%s%s%s' % (
text[:m.start()], placeholder, text[m.end():])
break
else:
break
return text.split("\n")
def _escape(self, txt):
""" basic html escaping """
txt = txt.replace('&', '&')
txt = txt.replace('<', '<')
txt = txt.replace('>', '>')
txt = txt.replace('"', '"')
return txt
class ForgeMacroPattern(markdown.inlinepatterns.Pattern):
def __init__(self, *args, **kwargs):
self.ext = kwargs.pop('ext')
self.macro = macro.parse(self.ext._macro_context)
markdown.inlinepatterns.Pattern.__init__(self, *args, **kwargs)
def handleMatch(self, m):
html = self.macro(m.group(2))
placeholder = self.markdown.htmlStash.store(html)
return placeholder
class ForgeLinkTreeProcessor(markdown.treeprocessors.Treeprocessor):
'''Wraps artifact links with []'''
def __init__(self, parent):
self.parent = parent
self.alinks = []
def run(self, root):
for node in root.getiterator('a'):
if 'alink' in node.get('class', '').split() and node.text:
node.text = '[' + node.text + ']'
return root
def reset(self):
self.alinks = []
class MarkAsSafe(markdown.postprocessors.Postprocessor):
def run(self, text):
return h.html.literal(text)
class AddCustomClass(markdown.postprocessors.Postprocessor):
def run(self, text):
return '<div class="markdown_content">%s</div>' % text
class RelativeLinkRewriter(markdown.postprocessors.Postprocessor):
def __init__(self, make_absolute=False):
self._make_absolute = make_absolute
def run(self, text):
soup = BeautifulSoup(text, 'html5lib') # 'html.parser' parser gives weird </li> behaviour with test_macro_members
if self._make_absolute:
rewrite = self._rewrite_abs
else:
rewrite = self._rewrite
for link in soup.findAll('a'):
rewrite(link, 'href')
for link in soup.findAll('img'):
rewrite(link, 'src')
# html5lib parser adds html/head/body tags, so output <body> without its own tags
return unicode(soup.body)[len('<body>'):-len('</body>')]
def _rewrite(self, tag, attr):
val = tag.get(attr)
if val is None:
return
if ' ' in val:
# Don't urllib.quote to avoid possible double-quoting
# just make sure no spaces
val = val.replace(' ', '%20')
tag[attr] = val
if '://' in val:
for domain in re.split(r'\s*,\s*', config.get('nofollow_exempt_domains', '')):
if domain and domain in val:
return
tag['rel'] = 'nofollow'
return
if val.startswith('/'):
return
if val.startswith('.'):
return
if val.startswith('mailto:'):
return
if val.startswith('#'):
return
tag[attr] = '../' + val
def _rewrite_abs(self, tag, attr):
self._rewrite(tag, attr)
val = tag.get(attr)
val = urljoin(config['base_url'], val)
tag[attr] = val
class HTMLSanitizer(markdown.postprocessors.Postprocessor):
def run(self, text):
parser = html5lib.HTMLParser(tokenizer=ForgeHTMLSanitizer)
parsed = parser.parse(text)
serializer = html5lib.serializer.HTMLSerializer()
walker = html5lib.getTreeWalker("etree")
stream = html5lib.filters.alphabeticalattributes.Filter(walker(parsed))
out = ''.join(serializer.serialize(stream))
return out
class AutolinkPattern(markdown.inlinepatterns.Pattern):
def __init__(self, pattern, markdown_instance=None):
markdown.inlinepatterns.Pattern.__init__(
self, pattern, markdown_instance)
# override the complete regex, requiring the preceding text (.*?) to end
# with whitespace or beginning of line "\s|^"
self.compiled_re = re.compile("^(.*?\s|^)%s(.*?)$" % pattern,
re.DOTALL | re.UNICODE)
def handleMatch(self, mo):
old_link = mo.group(2)
result = markdown.util.etree.Element('a')
result.text = old_link
# since this is run before the builtin 'escape' processor, we have to
# do our own unescaping
for char in markdown.Markdown.ESCAPED_CHARS:
old_link = old_link.replace('\\' + char, char)
result.set('href', old_link)
return result
class ForgeMacroIncludePreprocessor(markdown.preprocessors.Preprocessor):
'''Join include statements to prevent extra <br>'s inserted by nl2br extension.
Converts:
[[include ref=some_ref]]
[[include ref=some_other_ref]]
To:
[[include ref=some_ref]][[include ref=some_other_ref]]
'''
pattern = re.compile(r'^\s*\[\[include ref=[^\]]*\]\]\s*$', re.IGNORECASE)
def run(self, lines):
buf = []
result = []
for line in lines:
if self.pattern.match(line):
buf.append(line)
else:
if buf:
result.append(''.join(buf))
buf = []
result.append(line)
return result
|
|
"""Build and Version QuerySet classes."""
import datetime
import logging
from django.db import models
from django.db.models import Q
from django.utils import timezone
from readthedocs.builds.constants import (
BUILD_STATE_FINISHED,
BUILD_STATE_TRIGGERED,
EXTERNAL,
)
from readthedocs.core.utils.extend import SettingsOverrideObject
from readthedocs.projects import constants
from readthedocs.projects.models import Project
log = logging.getLogger(__name__)
__all__ = ['VersionQuerySet', 'BuildQuerySet', 'RelatedBuildQuerySet']
class VersionQuerySetBase(models.QuerySet):
"""Versions take into account their own privacy_level setting."""
use_for_related_fields = True
def __init__(self, *args, internal_only=False, external_only=False, **kwargs):
"""
Overridden to pass extra arguments from the manager.
Usage:
import functools
ManagerClass.from_queryset(
functools.partial(VersionQuerySet, internal_only=True)
)
:param bool internal_only: If this queryset is being used to query internal versions only.
:param bool external_only: If this queryset is being used to query external versions only.
"""
self.internal_only = internal_only
self.external_only = external_only
super().__init__(*args, **kwargs)
def _add_from_user_projects(self, queryset, user, admin=False, member=False):
"""
Add related objects from projects where `user` is an `admin` or a `member`.
.. note::
In .org all users are admin and member of a project.
This will change with organizations soon.
"""
if user.is_authenticated:
projects_pk = user.projects.all().values_list('pk', flat=True)
user_queryset = self.filter(project__in=projects_pk)
queryset = user_queryset | queryset
return queryset
def _public_only(self):
if self.internal_only:
# Since internal versions are already filtered,
# don't do anything special.
queryset = self.filter(privacy_level=constants.PUBLIC)
elif self.external_only:
# Since external versions are already filtered,
# don't filter them again.
queryset = self.filter(
project__external_builds_privacy_level=constants.PUBLIC,
)
else:
queryset = self.filter(privacy_level=constants.PUBLIC).exclude(type=EXTERNAL)
queryset |= self.filter(
type=EXTERNAL,
project__external_builds_privacy_level=constants.PUBLIC,
)
return queryset
def public(
self,
user=None,
project=None,
only_active=True,
include_hidden=True,
only_built=False,
):
"""
Get all allowed versions.
.. note::
External versions use the `Project.external_builds_privacy_level`
field instead of its `privacy_level` field.
"""
queryset = self._public_only()
if user:
if user.is_superuser:
queryset = self.all()
else:
queryset = self._add_from_user_projects(queryset, user)
if project:
queryset = queryset.filter(project=project)
if only_active:
queryset = queryset.filter(active=True)
if only_built:
queryset = queryset.filter(built=True)
if not include_hidden:
queryset = queryset.filter(hidden=False)
return queryset.distinct()
def api(self, user=None):
return self.public(user, only_active=False)
class VersionQuerySet(SettingsOverrideObject):
_default_class = VersionQuerySetBase
class BuildQuerySetBase(models.QuerySet):
"""
Build objects that are privacy aware.
i.e. they take into account the privacy of the Version that they relate to.
"""
use_for_related_fields = True
def _add_from_user_projects(self, queryset, user, admin=False, member=False):
"""
Add related objects from projects where `user` is an `admin` or a `member`.
.. note::
In .org all users are admin and member of a project.
This will change with organizations soon.
"""
if user.is_authenticated:
projects_pk = user.projects.all().values_list('pk', flat=True)
user_queryset = self.filter(project__in=projects_pk)
queryset = user_queryset | queryset
return queryset
def public(self, user=None, project=None):
"""
Get all allowed builds.
Builds are public if the linked version and project are public.
.. note::
External versions use the `Project.external_builds_privacy_level`
field instead of its `privacy_level` field.
"""
queryset = (
self.filter(
version__privacy_level=constants.PUBLIC,
version__project__privacy_level=constants.PUBLIC,
)
.exclude(version__type=EXTERNAL)
)
queryset |= self.filter(
version__type=EXTERNAL,
project__external_builds_privacy_level=constants.PUBLIC,
project__privacy_level=constants.PUBLIC,
)
if user:
if user.is_superuser:
queryset = self.all()
else:
queryset = self._add_from_user_projects(
queryset,
user,
admin=True,
member=True,
)
if project:
queryset = queryset.filter(project=project)
return queryset.distinct()
def api(self, user=None):
return self.public(user)
def concurrent(self, project):
"""
Check if the max build concurrency for this project was reached.
- regular project: counts concurrent builds
- translation: concurrent builds of all the translations + builds of main project
.. note::
If the project/translation belongs to an organization, we count all concurrent
builds for all the projects from the organization.
:rtype: tuple
:returns: limit_reached, number of concurrent builds, number of max concurrent
"""
limit_reached = False
query = Q(
project__slug=project.slug,
# Limit builds to 5 hours ago to speed up the query
date__gte=timezone.now() - datetime.timedelta(hours=5),
)
if project.main_language_project:
# Project is a translation, counts all builds of all the translations
query |= Q(project__main_language_project=project.main_language_project)
query |= Q(project__slug=project.main_language_project.slug)
elif project.translations.exists():
# The project has translations, counts their builds as well
query |= Q(project__in=project.translations.all())
# If the project belongs to an organization, count all the projects
# from this organization as well
organization = project.organizations.first()
if organization:
query |= Q(project__in=organization.projects.all())
concurrent = (
self.filter(query)
.exclude(state__in=[BUILD_STATE_TRIGGERED, BUILD_STATE_FINISHED])
).distinct().count()
max_concurrent = Project.objects.max_concurrent_builds(project)
log.info(
'Concurrent builds. project=%s running=%s max=%s',
project.slug,
concurrent,
max_concurrent,
)
if concurrent >= max_concurrent:
limit_reached = True
return (limit_reached, concurrent, max_concurrent)
class BuildQuerySet(SettingsOverrideObject):
_default_class = BuildQuerySetBase
class RelatedBuildQuerySet(models.QuerySet):
"""
For models with association to a project through :py:class:`Build`.
.. note::
This is only used for ``BuildCommandViewSet`` from api v2.
Which is being used to upload build command results from the builders.
"""
use_for_related_fields = True
def _add_from_user_projects(self, queryset, user):
if user.is_authenticated:
projects_pk = user.projects.all().values_list('pk', flat=True)
user_queryset = self.filter(build__project__in=projects_pk)
queryset = user_queryset | queryset
return queryset
def public(self, user=None):
queryset = self.filter(build__version__privacy_level=constants.PUBLIC)
if user:
if user.is_superuser:
queryset = self.all()
else:
queryset = self._add_from_user_projects(queryset, user)
return queryset.distinct()
def api(self, user=None):
return self.public(user)
|
|
#
# Collective Knowledge (CK)
#
# See CK LICENSE.txt for licensing details
# See CK COPYRIGHT.txt for copyright details
#
# Developer: Grigori Fursin
#
import sys
import os
##############################################################################
def load_json_file(i):
"""Load json from file into dict
Target audience: end users
Args:
json_file (str): name of a json file
Returns:
(dict): Unified CK dictionary:
return (int): return code = 0, if successful
> 0, if error
(error) (str): error text if return > 0
dict (dict or list): dict or list from the json file
"""
import json
fn = i['json_file']
try:
if sys.version_info[0] > 2:
f = open(fn, 'r', encoding='utf8')
else:
f = open(fn, 'r')
except Exception as e:
return {'return': 16, 'error': 'problem opening json file='+fn+' ('+format(e)+')'}
try:
s = f.read()
except Exception as e:
f.close()
return {'return': 1, 'error': 'problem reading json file='+fn+' ('+format(e)+')'}
f.close()
try:
if sys.version_info[0] > 2:
d = json.loads(s)
else:
d = json.loads(s, encoding='utf8')
except Exception as e:
return {'return': 1, 'error': 'problem parsing json from file='+fn+' ('+format(e)+')'}
return {'return': 0, 'dict': d}
##############################################################################
def save_json_to_file(i):
"""Save dict to a json file
Target audience: end users
Args:
json_file (str): filename to save dictionary
dict (dict): dict to save
(sort_keys) (str): if 'yes', sort keys
(safe) (str): if 'yes', ignore non-JSON values (only for Debugging - changes original dict!)
Returns:
(dict): Unified CK dictionary:
return (int): return code = 0, if successful
> 0, if error
(error) (str): error text if return > 0
"""
import json
import ck.strings
fn = i['json_file']
if i.get('safe', '') == 'yes':
d = i['dict']
sd = {}
# Check main unprintable keys
for k in d:
try:
json.dumps(d[k])
except Exception as e:
pass
else:
sd[k] = d[k]
i['dict'] = sd
r = ck.strings.dump_json(i)
if r['return'] > 0:
return r
s = r['string'].replace('\r', '')+'\n'
return save_text_file({'text_file': fn, 'string': s})
##############################################################################
def load_yaml_file(i):
"""Load YAML file to dict
Target audience: end users
Args:
yaml_file (str): name of a YAML file
Returns:
(dict): Unified CK dictionary:
return (int): return code = 0, if successful
> 0, if error
(error) (str): error text if return > 0
dict (dict): dict from a YAML file
"""
import yaml
fn = i['yaml_file']
try:
if sys.version_info[0] > 2:
f = open(fn, 'r', encoding='utf8')
else:
f = open(fn, 'r')
except Exception as e:
return {'return': 16, 'error': 'problem opening YAML file='+fn+' ('+format(e)+')'}
try:
s = f.read()
except Exception as e:
f.close()
return {'return': 1, 'error': 'problem reading YAML file='+fn+' ('+format(e)+')'}
f.close()
try:
d = yaml.load(s, Loader=yaml.FullLoader)
except Exception as e:
return {'return': 1, 'error': 'problem parsing YAML from file='+fn+' ('+format(e)+')'}
return {'return': 0, 'dict': d}
##############################################################################
def save_yaml_to_file(i):
"""Save dict to a YAML file
Target audience: end users
Args:
yaml_file (str): name of a YAML file
dict (dict): dict to save
Returns:
(dict): Unified CK dictionary:
return (int): return code = 0, if successful
> 0, if error
(error) (str): error text if return > 0
"""
import yaml
fn = i['yaml_file']
d = i['dict']
try:
# If using just dump and keys are in unicode,
# pyyaml adds warning and makes produced yaml unparsable
s = yaml.safe_dump(d)
except Exception as e:
return {'return': 1, 'error': 'problem converting dict to YAML ('+format(e)+')'}
return save_text_file({'text_file': fn, 'string': s})
##############################################################################
def load_text_file(i):
"""Load a text file to a string or list
Target audience: end users
Args:
text_file (str): name of a text file
(keep_as_bin) (str): if 'yes', return only bin
(encoding) (str): by default 'utf8', however sometimes we use utf16
(split_to_list) (str): if 'yes', split to list
(convert_to_dict) (str): if 'yes', split to list and convert to dict
(str_split) (str): if !='', use as separator of keys/values when converting to dict
(remove_quotes) (str): if 'yes', remove quotes from values when converting to dict
(delete_after_read) (str): if 'yes', delete file after read (useful when reading tmp files)
Returns:
(dict): Unified CK dictionary:
return (int): return code = 0, if successful
> 0, if error
(error) (str): error text if return > 0
bin (byte): loaded text file as byte array
(string) (str): loaded text as string with removed \r
(lst) (list): if split_to_list=='yes', split text to list
(dict) (dict): if convert_to_dict=='yes', return as dict
"""
fn = i['text_file']
en = i.get('encoding', '')
if en == '' or en == None:
en = 'utf8'
try:
f = open(fn, 'rb')
except Exception as e:
return {'return': 16, 'error': 'problem opening text file='+fn+' ('+format(e)+')'}
try:
b = f.read()
except Exception as e:
f.close()
return {'return': 1, 'error': 'problem reading text file='+fn+' ('+format(e)+')'}
f.close()
r = {'return': 0, 'bin': b}
if i.get('delete_after_read', '') == 'yes':
import os
os.remove(fn)
if i.get('keep_as_bin', '') != 'yes':
try:
# decode into Python string (unicode in Python3)
s = b.decode(en).replace('\r', '')
except Exception as e:
return {'return': 1, 'error': 'problem decoding content from file "'+fn+'" ('+format(e)+')'}
r['string'] = s
cl = i.get('split_to_list', '')
cd = i.get('convert_to_dict', '')
if cl == 'yes' or cd == 'yes':
lst = s.split('\n')
r['lst'] = lst
if cd == 'yes':
dd = {}
ss = i.get('str_split', '')
rq = i.get('remove_quotes', '')
if ss == '':
ss = ':'
for q in lst:
qq = q.strip()
ix = qq.find(ss)
if ix > 0:
k = qq[0:ix].strip()
v = ''
if ix+1 < len(qq):
v = qq[ix+1:].strip()
if v != '' and rq == 'yes':
if v.startswith('"'):
v = v[1:]
if v.endswith('"'):
v = v[:-1]
dd[k] = v
r['dict'] = dd
return r
##############################################################################
def save_text_file(i):
"""Save string to a text file with all \r removed
Target audience: end users
Args:
text_file (str): name of a text file
string (str): string to write to a file (all \r will be removed)
(append) (str): if 'yes', append to a file
Returns:
(dict): Unified CK dictionary:
return (int): return code = 0, if successful
> 0, if error
(error) (str): error text if return > 0
"""
fn = i['text_file']
s = i['string']
try:
s = s.replace('\r', '')
except Exception as e:
pass
try:
s = s.replace(b'\r', b'')
except Exception as e:
pass
m = 'w'
if i.get('append', '') == 'yes':
m = 'a'
try:
s = s.encode('utf8')
except Exception as e:
pass
try:
# if sys.version_info[0]>2:
# f=open(fn, m+'b')
# f.write(s)
# else:
f = open(fn, m+'b')
f.write(s)
except Exception as e:
return {'return': 1, 'error': 'problem writing text file='+fn+' ('+format(e)+')'}
f.close()
return {'return': 0}
|
|
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Performs training and evaluation of the proposed model spec on TPU."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from nasbench.lib import cifar
from nasbench.lib import model_builder
from nasbench.lib import training_time
import numpy as np
import tensorflow as tf
VALID_EXCEPTIONS = (
tf.train.NanLossDuringTrainingError, # NaN loss
tf.errors.ResourceExhaustedError, # OOM
tf.errors.InvalidArgumentError, # NaN gradient
tf.errors.DeadlineExceededError, # Timed out
)
class AbortError(Exception):
"""Signals that evaluation failed for a valid reason."""
pass
def train_and_evaluate(spec, config, model_dir):
"""Train and evaluate the proposed model.
This method trains and evaluates the model for the creation of the benchmark
dataset. The default values from the config.py are exactly the values used.
Args:
spec: ModelSpec object.
config: config dict generated from config.py.
model_dir: directory to store the checkpoint files.
Returns:
dict containing the evaluation metadata.
"""
return _train_and_evaluate_impl(spec, config, model_dir)
def augment_and_evaluate(spec, config, model_dir, epochs_per_eval=5):
"""Trains the model on the full training set and evaluates on test set.
"Augment" specifically refers to training the same spec in a larger network on
the full training set. Typically this involves increasing the epoch count,
number of modules/stacks, and changing the LR schedule. These changes should
be made to the config dict before calling this method.
Note: this method was not used for generating the NAS Benchmark dataset. See
train_and_evaluate instead.
Args:
spec: ModelSpec object.
config: config dict generated from config.py.
model_dir: directory to store the checkpoint files.
epochs_per_eval: number of epochs per evaluation run. Evaluation is always
run at the very start and end.
Returns:
dict containing the evaluation metadata.
"""
return _augment_and_evaluate_impl(spec, config, model_dir, epochs_per_eval)
def _train_and_evaluate_impl(spec, config, model_dir):
"""Train and evaluate implementation, see train_and_evaluate docstring."""
evaluator = _TrainAndEvaluator(spec, config, model_dir)
return evaluator.run()
class _TrainAndEvaluator(object):
"""Runs the training and evaluation."""
def __init__(self, spec, config, model_dir):
"""Initialize evaluator. See train_and_evaluate docstring."""
self.input_train = cifar.CIFARInput('train', config)
self.input_train_eval = cifar.CIFARInput('train_eval', config)
self.input_valid = cifar.CIFARInput('valid', config)
self.input_test = cifar.CIFARInput('test', config)
self.input_sample = cifar.CIFARInput('sample', config)
self.estimator = _create_estimator(spec, config, model_dir,
self.input_train.num_images,
self.input_sample.num_images)
self.spec = spec
self.config = config
self.model_dir = model_dir
def run(self):
"""Runs training and evaluation."""
attempts = 0
while True:
# Delete everything in the model dir at the start of each attempt
try:
tf.gfile.DeleteRecursively(self.model_dir)
except tf.errors.NotFoundError:
pass
tf.gfile.MakeDirs(self.model_dir)
try:
# Train
if self.config['train_seconds'] > 0.0:
timing = training_time.limit(self.config['train_seconds'])
else:
timing = training_time.limit(None)
evaluations = map(float, self.config['intermediate_evaluations'])
if not evaluations or evaluations[-1] != 1.0:
evaluations.append(1.0)
assert evaluations == sorted(evaluations)
evaluation_results = []
start_time = time.time()
# Train for 1 step with 0 LR to initialize the weights, then evaluate
# once at the start for completeness, accuracies expected to be around
# random selection. Note that batch norm moving averages change during
# the step but the trainable weights do not.
self.estimator.train(
input_fn=self.input_train.input_fn,
max_steps=1,
hooks=[timing.train_hook],
saving_listeners=[timing.saving_listener])
evaluation_results.append(self._evaluate_all(0.0, 0))
for next_evaluation in evaluations:
epoch = next_evaluation * self.config['train_epochs']
train_steps = int(epoch * self.input_train.num_images /
self.config['batch_size'])
self.estimator.train(
input_fn=self.input_train.input_fn,
max_steps=train_steps,
hooks=[timing.train_hook],
saving_listeners=[timing.saving_listener])
evaluation_results.append(self._evaluate_all(epoch, train_steps))
all_time = time.time() - start_time
break # Break from retry loop on success
except VALID_EXCEPTIONS as e: # pylint: disable=catching-non-exception
attempts += 1
tf.logging.warning(str(e))
if attempts >= self.config['max_attempts']:
raise AbortError(str(e))
metadata = {
'trainable_params': _get_param_count(self.model_dir),
'total_time': all_time, # includes eval and other metric time
'evaluation_results': evaluation_results,
}
return metadata
def _evaluate_all(self, epochs, steps):
"""Runs all the evaluations."""
train_accuracy = _evaluate(self.estimator, self.input_train_eval,
self.config, name='train')
valid_accuracy = _evaluate(self.estimator, self.input_valid,
self.config, name='valid')
test_accuracy = _evaluate(self.estimator, self.input_test,
self.config, name='test')
train_time = self.estimator.get_variable_value(
training_time.TOTAL_TIME_NAME)
now = time.time()
sample_metrics = self._compute_sample_metrics()
predict_time = time.time() - now
return {
'epochs': epochs,
'training_time': train_time,
'training_steps': steps,
'train_accuracy': train_accuracy,
'validation_accuracy': valid_accuracy,
'test_accuracy': test_accuracy,
'sample_metrics': sample_metrics,
'predict_time': predict_time,
}
def _compute_sample_metrics(self):
"""Computes the metrics on a fixed batch."""
sample_metrics = self.estimator.predict(
input_fn=self.input_sample.input_fn, yield_single_examples=False).next()
# Fix the extra batch dimension added by PREDICT
for metric in sample_metrics:
if metric in ['logits', 'input_grad_norm']:
# Batch-shaped tensors take first batch
sample_metrics[metric] = (
sample_metrics[metric][:self.input_sample.num_images, Ellipsis])
else:
# Other tensors remove batch dimension
sample_metrics[metric] = sample_metrics[metric][0, Ellipsis]
return sample_metrics
def _augment_and_evaluate_impl(spec, config, model_dir, epochs_per_eval=5):
"""Augment and evaluate implementation, see augment_and_evaluate docstring."""
input_augment, input_test = [
cifar.CIFARInput(m, config)
for m in ['augment', 'test']]
estimator = _create_estimator(spec, config, model_dir,
input_augment.num_images)
if config['train_seconds'] > 0.0:
timing = training_time.limit(config['train_seconds'])
else:
timing = training_time.limit(None)
steps_per_epoch = input_augment.num_images / config['batch_size'] # float
ckpt = tf.train.latest_checkpoint(model_dir)
if not ckpt:
current_step = 0
else:
current_step = int(ckpt.split('-')[-1])
max_steps = int(config['train_epochs'] * steps_per_epoch)
while current_step < max_steps:
next_step = current_step + int(epochs_per_eval * steps_per_epoch)
next_step = min(next_step, max_steps)
estimator.train(
input_fn=input_augment.input_fn,
max_steps=next_step,
hooks=[timing.train_hook],
saving_listeners=[timing.saving_listener])
current_step = next_step
test_accuracy = _evaluate(estimator, input_test, config)
metadata = {
'trainable_params': _get_param_count(model_dir),
'test_accuracy': test_accuracy,
}
return metadata
def _create_estimator(spec, config, model_dir,
num_train_images, num_sample_images=None):
"""Creates the TPUEstimator object."""
# Estimator will save a checkpoint at the end of every train() call. Disable
# automatic checkpoints by setting the time interval between checkpoints to
# a very large value.
run_config = tf.contrib.tpu.RunConfig(
model_dir=model_dir,
keep_checkpoint_max=3, # Keeps ckpt at start, halfway, and end
save_checkpoints_secs=2**30,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=config['tpu_iterations_per_loop'],
num_shards=config['tpu_num_shards']))
# This is a hack to allow PREDICT on a fixed batch on TPU. By replicating the
# batch by the number of shards, this ensures each TPU core operates on the
# entire fixed batch.
if num_sample_images and config['use_tpu']:
num_sample_images *= config['tpu_num_shards']
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=config['use_tpu'],
model_fn=model_builder.build_model_fn(
spec, config, num_train_images),
config=run_config,
train_batch_size=config['batch_size'],
eval_batch_size=config['batch_size'],
predict_batch_size=num_sample_images)
return estimator
def _evaluate(estimator, input_data, config, name=None):
"""Evaluate the estimator on the input data."""
steps = input_data.num_images // config['batch_size']
results = estimator.evaluate(
input_fn=input_data.input_fn,
steps=steps,
name=name)
return results['accuracy']
def _get_param_count(model_dir):
"""Get trainable param count from the model directory."""
tf.reset_default_graph()
checkpoint = tf.train.get_checkpoint_state(model_dir)
with tf.Session() as sess:
saver = tf.train.import_meta_graph(
checkpoint.model_checkpoint_path + '.meta')
saver.restore(sess, checkpoint.model_checkpoint_path)
params = np.sum([np.prod(v.get_shape().as_list())
for v in tf.trainable_variables()])
return params
|
|
# -*- encoding: utf-8 -*-
"""A script to control an HEOS player, see <https://github.com/ping13/heospy>
for details.
Specification of the HEOS interface at
http://rn.dmglobal.com/euheos/HEOS_CLI_ProtocolSpecification.pdf
"""
import json
import os
import telnetlib
import re
import logging
import argparse
import six
import sys
import time
from collections import OrderedDict
from pathlib import Path
# Simple Service Discovery Protocol (SSDP),
# https://gist.github.com/dankrause/6000248, should be right next to this file.
try:
from . import ssdp
except ImportError: # when run locally, relative import does not work
import ssdp
# determine a default path for the config file
DEFAULT_CONFIG_PATH = "."
for location in os.curdir, os.path.expanduser("~/.heospy"), os.environ.get("HEOSPY_CONF"):
if location is None:
continue
try:
testname = os.path.join(location,"config.json")
if os.path.exists(testname):
DEFAULT_CONFIG_PATH = location
break
except IOError:
pass
TIMEOUT = 15
class HeosPlayerConfigException(Exception):
pass
class HeosPlayerGeneralException(Exception):
pass
class HeosPlayer(object):
"""Representation of an HEOS player with a specific player id.
This needs a JSON config file with a minimal content:
{
"player_name": "Living Room",
"user": "me@example.com",
"pw": "do-not-use-qwerty-as-password"
}
"""
URN_SCHEMA = "urn:schemas-denon-com:device:ACT-Denon:1"
def __init__(self, rediscover = False,
config_file = os.path.join(DEFAULT_CONFIG_PATH, 'config.json')):
"""Initialize HEOS player."""
self.heosurl = 'heos://'
try:
with open(config_file) as json_data_file:
self._config = json.load(json_data_file)
except IOError:
error_msg = "cannot read your config file '{}'".format(config_file)
logging.error(error_msg)
raise HeosPlayerConfigException(error_msg)
logging.debug("use config file '{}'".format(config_file))
self.host = self._config.get("host")
self.pid = self._config.get("pid")
self.main_player_name = self._config.get("player_name", self._config.get("main_player_name"))
self._config_file = config_file
self.names = dict()
self.names["players"] = self._config.get("players", {})
self.names["groups"] = self._config.get("groups", {})
self.groups = None
if self.main_player_name is None:
logging.warn("No main player name given.")
raise HeosPlayerGeneralException("No main player name given.")
# if host and pid is not known, detect the first HEOS device.
if rediscover or (not self.host or not self.pid):
logging.info(u"Starting to discover your HEOS player '{}' in your local network".format(self.main_player_name))
ssdp_list = ssdp.discover(self.URN_SCHEMA)
logging.debug("found {} possible hosts: {}".format(len(ssdp_list), ssdp_list))
self.telnet = None
for response in ssdp_list:
if response.st == self.URN_SCHEMA:
try:
self.host = re.match(r"http:..([^\:]+):", response.location).group(1)
logging.debug("Testing host '{}'".format(self.host))
self.telnet = telnetlib.Telnet(self.host, 1255)
logging.debug("Telnet '{}'".format(self.telnet))
self.pid = self._get_player(self.main_player_name)
logging.debug("pid '{}'".format(self.pid))
if self.pid:
self.main_player_name = self._config.get("player_name", self._config.get("main_player_name"))
logging.info(u"Found main player '{}' in your local network".format(self.main_player_name))
break
except Exception as e:
logging.error(e)
pass
if self.telnet == None:
msg = "couldn't discover any HEOS player with Simple Service Discovery Protocol (SSDP)."
logging.error(msg)
raise HeosPlayerGeneralException(msg)
self._update_groups_players()
else:
logging.info(u"My cache says your HEOS player '{}' is at {}".format(self.main_player_name,
self.host))
try:
self.telnet = telnetlib.Telnet(self.host, 1255, timeout=TIMEOUT)
except Exception as e:
raise HeosPlayerGeneralException("telnet failed")
# check if we've found what we were looking for
if self.host is None:
logging.error("No HEOS player found in your local network")
elif self.pid is None:
logging.error(u"No player with name '{}' found for being a main player!".format(self.main_player_name))
else:
# get user and password
if self.login(user=self._config.get("user"),
pw = self._config.get("pw")):
self.user = self._config.get("user")
# save config
if (rediscover or self._config.get("pid") is None) and self.host and self.pid:
logging.info("Save host and pid in {}".format(self._config_file))
self._config["pid"] = self.pid
self._config["host"] = self.host
with open(os.path.join(self._config_file), "w") as json_data_file:
json.dump(self._config, json_data_file, indent=2)
def __repr__(self):
return "<HeosPlayer({main_player_name}, {user}, {host}, {pid})>".format(**self.__dict__)
def telnet_request(self, command, wait = True):
"""Execute a `command` and return the response(s)."""
command = self.heosurl + command
logging.debug("telnet request {}".format(command))
self.telnet.write(command.encode('ascii') + b'\n')
response = b''
logging.debug("starting response loop")
while True:
response += self.telnet.read_some()
try:
response = json.loads(response.decode("utf-8"))
logging.debug("found valid JSON: {}".format(json.dumps(response)))
if not wait:
logging.debug("I accept the first response: {}".format(response))
break
# sometimes, I get a response with the message "under
# process". I might want to wait here
message = response.get("heos", {}).get("message", "")
if "command under process" not in message:
logging.debug("I assume this is the final response: {}".format(response))
break
logging.debug("Wait for the final response")
response = b'' # forget this message
except ValueError:
logging.debug("... unfinished response: {}".format(response))
# response is not a complete JSON object
pass
except TypeError:
logging.debug("... unfinished response: {}".format(response))
# response is not a complete JSON object
pass
# try to parse the message attribute of the response, there might be
# some useful information, especially if the payload attribute is
# missing
if response.get("heos", {}).get("message"):
message_parsed_list = response.get("heos", {}).get("message").split("&")
logging.debug(message_parsed_list)
message_parsed = OrderedDict()
for item in message_parsed_list:
split_items = item.split("=")
if len(split_items) == 1:
split_items.append(True)
message_parsed[split_items[0]] = split_items[1]
response["heos_message_parsed"] = message_parsed
logging.debug("found valid response: {}".format(json.dumps(response)))
return response
def _update_groups_players(self):
idx = { "groups" : "gid", "players" : "pid" }
for aggregate in ["players", "groups"]:
result = self.telnet_request("player/get_{}".format(aggregate)).get("payload")
if result:
self.names[aggregate] = {}
logging.debug(result)
for this_item in result:
self.names[aggregate][this_item["name"]] = this_item[ idx[aggregate] ]
self._config[aggregate] = self.names[aggregate]
logging.info("In total, I found {} {} in your local network.".format(len(self.names[aggregate]), aggregate))
else:
msg = "I couldn't find a list of {}.".format(aggregate)
if aggregate == "groups":
logging.warn(msg)
else:
logging.error(msg)
raise HeosPlayerGeneralException(msg)
return True
def _get_player(self, name):
response = self.telnet_request("player/get_players")
if response.get("payload") is None:
return None
for player in response.get("payload"):
logging.debug(u"found '{}', looking for '{}'".format(player.get("name"), name))
if player.get("name") == name:
return player.get("pid")
return None
def login(self, user = None , pw = None):
if user is None or pw is None:
logging.info("No user or password found in config, skip login step")
return {}
# fist check if we're already signed in: get the currently signed in
# user from the system
signed_in_message = self.telnet_request("system/check_account").get("heos",{}).get("message", "")
is_signed_in = "signed_in&" in signed_in_message
if is_signed_in:
# if signed in, we should also have the same user here.
signed_in_user = signed_in_message.split("&")[1][3:]
if signed_in_user==user:
logging.info("Already signed in as {}".format(signed_in_user))
return True
else:
logging.info("user '{}' is different from '{}'".format(signed_in_user, user))
# At this point, it seems as if we have to really sign in, which takes
# a second or two...
logging.info("Need to sign in as {} to have access to favorites etc.".format(user))
return self.telnet_request("system/sign_in?un={}&pw={}".format(user, pw))
def cmd(self, cmd, args):
""" issue a command for our player """
# parse args and check if there is a gid or pid exlicitly given
args_concatenated = ""
pid_explicitly_given = False
gid_explicitly_given = False
for (key,value) in six.iteritems(args):
if key == "pname" or key == "gname" : # these are custom command, working only with this package
idx = { "gname" : "gid", "pname" : "pid" }
idx2 = { "gid" : "groups", "pid" : "players" }
# reassign key
key = idx[key]
# analyse the value, which could be one or many speaker names
named_values = value.split(u",")
value_list = []
for named_value in named_values:
new_value = self.names[ idx2[key] ].get(named_value, False)
if new_value is False:
raise HeosPlayerGeneralException("Name '{}' is not known. ({}). Try to rediscover (use flag '-r').".format(value, self.names[idx2[key]].keys()))
logging.debug("translated name '{}' to {}={}".format(value, idx, new_value))
value_list.append(str(new_value))
value = ",".join(value_list)
args_concatenated += "&{}={}".format(key, value)
logging.info("cmd : {}, args {}".format(cmd, args_concatenated))
if key == "pid":
pid_explicitly_given = True
if key == "gid":
gid_explicitly_given = True
if self.pid is None and ("groups/" in cmd or "group/" in cmd or "browse/" in cmd):
logging.warn("No default player is defined.")
else:
# if this is a command where a gid or a pid is needed, check if we
# could use the default pid from the config file
if ("groups/" in cmd or "group/" in cmd) and not gid_explicitly_given:
logging.info("I assume default group with id {0}".format(self.pid))
s = '{0}?gid={1}'.format(cmd, self.pid)
elif ("player/" in cmd or "players" in cmd) and not pid_explicitly_given:
logging.info("I assume default player with id {0}".format(self.pid))
s = '{0}?pid={1}'.format(cmd, self.pid)
else:
s = '{0}?dummy=1'.format(cmd) # use dummy so that
# args_concatenated is correctly attached
return self.telnet_request(s + args_concatenated)
def status(self):
s = { "general" : [], "player" : [] }
s["general"].append(self.telnet_request("system/heart_beat"))
s["general"].append(self.telnet_request("system/check_account"))
s["general"].append(self.telnet_request("browse/get_music_sources"))
s["general"].append(self.telnet_request("player/get_players"))
s["general"].append(self.telnet_request("group/get_groups"))
if self.pid:
s["player"].append(self.telnet_request("player/get_play_state?pid={0}".format(self.pid)))
s["player"].append(self.telnet_request("player/get_player_info?pid={0}".format(self.pid)))
s["player"].append(self.telnet_request("player/get_volume?pid={0}".format(self.pid)))
s["player"].append(self.telnet_request("player/get_mute?pid={0}".format(self.pid)))
s["player"].append(self.telnet_request('player/get_now_playing_media?pid={0}'.format(self.pid)))
return s
def parse_args():
"""Parse command line arguments."""
epilog = f"""Some example commands:
heos_player player/toggle_mute
heos_player player/set_volume -p level=19
heos_player player/play_preset -p preset=3
heos_player player/set_play_state -p state=stop
"""
parser = argparse.ArgumentParser(description=__doc__, epilog=epilog,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("cmd", nargs="?",
help="command to send to HEOS player")
parser.add_argument("-i", '--infile', nargs='?', type=argparse.FileType('r'),
default=None, help="specifiy a sequence of commands")
parser.add_argument("-s", "--status", action='store_true', default=False,
help="return various status information", dest="status")
parser.add_argument("-r", "--rediscover", action='store_true', default=False,
help="force to discover HEOS IP address and player id", dest="rediscover")
parser.add_argument("-p", "--param", action='append',
type=lambda kv: kv.split("="), dest='param', metavar="param=value",
help="optional key-value pairs that needs to be accompanied to the command that is sent to the HEOS player.")
parser.add_argument("-c", "--config", dest="config", default="", metavar="filename",
help="config file (by default, the script looks for a config file called `config.json` in the current directory, then in $HOME/.heospy/, then in the path specified in $HEOSPY_CONF)")
parser.add_argument("-lf", "--lockfile", dest="lockfile", default="", metavar="filename",
help="create a lockfile while processing.")
parser.add_argument("-l", "--log", dest="logLevel", default="INFO",
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], help="Set the logging level")
return parser.parse_args()
def main():
script_args = parse_args()
heos_cmd = script_args.cmd
heos_args = {}
# if there is a lockfile specified, create one. This is helpful to indicate
# to systems like Homebridge that this process is actually running
if script_args.lockfile:
Path(script_args.lockfile).touch()
try:
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
level=getattr(logging, script_args.logLevel))
if script_args.param:
try:
heos_args = OrderedDict(script_args.param)
except ValueError:
logging.error("there are some errors in your '--param' arguments: '{}'".format(script_args.param))
sys.exit(0)
# determine the config file
logging.debug("DEFAULT_CONFIG_PATH is '{}'".format(DEFAULT_CONFIG_PATH))
config_file = os.path.join(DEFAULT_CONFIG_PATH, 'config.json')
if script_args.config:
config_file = script_args.config
logging.debug("from --config, I got '{}'".format(config_file))
# initialize connection to HEOS player
try:
p = HeosPlayer(rediscover = script_args.rediscover, config_file=config_file)
except HeosPlayerConfigException:
logging.info("Try to find a valid config file and specify it with '--config'...")
sys.exit(-1)
except HeosPlayerGeneralException:
# if the connection failed, it might be because the cached IP for
# the HEOS player is not valid anymore. We check if we can rediscover
# the new IP of the HEOS player
if script_args.rediscover == False:
logging.info("First connection failed. Try to rediscover the HEOS players.")
p = HeosPlayer(rediscover = True, config_file=config_file)
except:
logging.error("Someting unexpected got wrong...")
raise
# check status or issue a command
if script_args.status:
logging.info("Try to find some status info from {}".format(p.host))
print(json.dumps(p.status(), indent=2))
elif script_args.infile:
logging.debug("reading a list of commands from {}".format(script_args.infile))
all_lines = script_args.infile.read().splitlines()
# execute each cmd
all_results = []
fail = False
for line in all_lines:
if len(line) > 0 and line[0] == "#": continue # skip comments
# get elements separated by whitespaces
cmd_args = line.split()
if len(cmd_args) == 0: continue
# first element is the command, like "player/set_volume"
heos_cmd = cmd_args[0]
# check if we want to ignore a fail here
ignore_fail = False
if cmd_args[-1] == "--ignore-fail":
ignore_fail = True
cmd_args = cmd_args[0:-1]
if heos_cmd == "wait": # this is a special command
try:
secs = int(cmd_args[1])
except IndexError:
secs=1
time.sleep(secs)
all_results.append({ "heospy" : { "sleep": "successful for {} secs".format(secs) } })
else:
# other elements are parameters like "level=10" or "pid=387387",
# collect them in a dictionary
heos_args = OrderedDict([ kv.split("=") for kv in cmd_args[1:] ])
# issue the actual command
logging.info("Issue command '{}' with arguments {}".format(heos_cmd, json.dumps(heos_args)))
result = p.cmd(heos_cmd, heos_args)
all_results.append(result)
if result.get("heos", {}).get("result", "") != "success" and not ignore_fail:
fail = True
break
# print all results at the end
print(json.dumps(all_results, indent=2))
# if the last result was not a success, return with -1
if fail:
sys.exit(-1)
elif heos_cmd:
logging.info("Issue command '{}' with arguments {}".format(heos_cmd, json.dumps(heos_args)))
result = p.cmd(heos_cmd, heos_args)
print(json.dumps(result, indent=2))
# if the result was not a success, return with -1
if result.get("heos", {}).get("result", "") != "success":
sys.exit(-1)
else:
logging.info("Nothing to do.")
finally:
# we may want to delete the lockfile created earlier, no matter how things were going previously.
if script_args.lockfile:
Path(script_args.lockfile).unlink()
|
|
#!/usr/bin/env python
#
#===- run-clang-tidy.py - Parallel clang-tidy runner ---------*- python -*--===#
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===------------------------------------------------------------------------===#
# FIXME: Integrate with clang-tidy-diff.py
"""
Parallel clang-tidy runner
==========================
Runs clang-tidy over all files in a compilation database. Requires clang-tidy
and clang-apply-replacements in $PATH.
Example invocations.
- Run clang-tidy on all files in the current working directory with a default
set of checks and show warnings in the cpp files and all project headers.
run-clang-tidy.py $PWD
- Fix all header guards.
run-clang-tidy.py -fix -checks=-*,llvm-header-guard
- Fix all header guards included from clang-tidy and header guards
for clang-tidy headers.
run-clang-tidy.py -fix -checks=-*,llvm-header-guard extra/clang-tidy \
-header-filter=extra/clang-tidy
Compilation database setup:
http://clang.llvm.org/docs/HowToSetupToolingForLLVM.html
"""
from __future__ import print_function
import argparse
import glob
import json
import multiprocessing
import os
import re
import shutil
import subprocess
import sys
import tempfile
import threading
import traceback
try:
import yaml
except ImportError:
yaml = None
is_py2 = sys.version[0] == '2'
if is_py2:
import Queue as queue
else:
import queue as queue
def find_compilation_database(path):
"""Adjusts the directory until a compilation database is found."""
result = './'
while not os.path.isfile(os.path.join(result, path)):
if os.path.realpath(result) == '/':
print('Error: could not find compilation database.')
sys.exit(1)
result += '../'
return os.path.realpath(result)
def make_absolute(f, directory):
if os.path.isabs(f):
return f
return os.path.normpath(os.path.join(directory, f))
def get_tidy_invocation(f, clang_tidy_binary, checks, tmpdir, build_path,
header_filter, allow_enabling_alpha_checkers,
extra_arg, extra_arg_before, quiet, config):
"""Gets a command line for clang-tidy."""
start = [clang_tidy_binary]
if allow_enabling_alpha_checkers is not None:
start.append('-allow-enabling-analyzer-alpha-checkers')
if header_filter is not None:
start.append('-header-filter=' + header_filter)
if checks:
start.append('-checks=' + checks)
if tmpdir is not None:
start.append('-export-fixes')
# Get a temporary file. We immediately close the handle so clang-tidy can
# overwrite it.
(handle, name) = tempfile.mkstemp(suffix='.yaml', dir=tmpdir)
os.close(handle)
start.append(name)
for arg in extra_arg:
start.append('-extra-arg=%s' % arg)
for arg in extra_arg_before:
start.append('-extra-arg-before=%s' % arg)
start.append('-p=' + build_path)
if quiet:
start.append('-quiet')
if config:
start.append('-config=' + config)
start.append(f)
return start
def merge_replacement_files(tmpdir, mergefile):
"""Merge all replacement files in a directory into a single file"""
# The fixes suggested by clang-tidy >= 4.0.0 are given under
# the top level key 'Diagnostics' in the output yaml files
mergekey="Diagnostics"
merged=[]
for replacefile in glob.iglob(os.path.join(tmpdir, '*.yaml')):
content = yaml.safe_load(open(replacefile, 'r'))
if not content:
continue # Skip empty files.
merged.extend(content.get(mergekey, []))
if merged:
# MainSourceFile: The key is required by the definition inside
# include/clang/Tooling/ReplacementsYaml.h, but the value
# is actually never used inside clang-apply-replacements,
# so we set it to '' here.
output = { 'MainSourceFile': '', mergekey: merged }
with open(mergefile, 'w') as out:
yaml.safe_dump(output, out)
else:
# Empty the file:
open(mergefile, 'w').close()
def check_clang_apply_replacements_binary(args):
"""Checks if invoking supplied clang-apply-replacements binary works."""
try:
subprocess.check_call([args.clang_apply_replacements_binary, '--version'])
except:
print('Unable to run clang-apply-replacements. Is clang-apply-replacements '
'binary correctly specified?', file=sys.stderr)
traceback.print_exc()
sys.exit(1)
def apply_fixes(args, tmpdir):
"""Calls clang-apply-fixes on a given directory."""
invocation = [args.clang_apply_replacements_binary]
if args.format:
invocation.append('-format')
if args.style:
invocation.append('-style=' + args.style)
invocation.append(tmpdir)
subprocess.call(invocation)
def run_tidy(args, tmpdir, build_path, queue, lock, failed_files):
"""Takes filenames out of queue and runs clang-tidy on them."""
while True:
name = queue.get()
invocation = get_tidy_invocation(name, args.clang_tidy_binary, args.checks,
tmpdir, build_path, args.header_filter,
args.allow_enabling_alpha_checkers,
args.extra_arg, args.extra_arg_before,
args.quiet, args.config)
proc = subprocess.Popen(invocation, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, err = proc.communicate()
if proc.returncode != 0:
failed_files.append(name)
with lock:
sys.stdout.write(' '.join(invocation) + '\n' + output.decode('utf-8'))
if len(err) > 0:
sys.stdout.flush()
sys.stderr.write(err.decode('utf-8'))
queue.task_done()
def main():
parser = argparse.ArgumentParser(description='Runs clang-tidy over all files '
'in a compilation database. Requires '
'clang-tidy and clang-apply-replacements in '
'$PATH.')
parser.add_argument('-allow-enabling-alpha-checkers',
action='store_true', help='allow alpha checkers from '
'clang-analyzer.')
parser.add_argument('-clang-tidy-binary', metavar='PATH',
default='clang-tidy',
help='path to clang-tidy binary')
parser.add_argument('-clang-apply-replacements-binary', metavar='PATH',
default='clang-apply-replacements',
help='path to clang-apply-replacements binary')
parser.add_argument('-checks', default=None,
help='checks filter, when not specified, use clang-tidy '
'default')
parser.add_argument('-config', default=None,
help='Specifies a configuration in YAML/JSON format: '
' -config="{Checks: \'*\', '
' CheckOptions: [{key: x, '
' value: y}]}" '
'When the value is empty, clang-tidy will '
'attempt to find a file named .clang-tidy for '
'each source file in its parent directories.')
parser.add_argument('-header-filter', default=None,
help='regular expression matching the names of the '
'headers to output diagnostics from. Diagnostics from '
'the main file of each translation unit are always '
'displayed.')
if yaml:
parser.add_argument('-export-fixes', metavar='filename', dest='export_fixes',
help='Create a yaml file to store suggested fixes in, '
'which can be applied with clang-apply-replacements.')
parser.add_argument('-j', type=int, default=0,
help='number of tidy instances to be run in parallel.')
parser.add_argument('files', nargs='*', default=['.*'],
help='files to be processed (regex on path)')
parser.add_argument('-fix', action='store_true', help='apply fix-its')
parser.add_argument('-format', action='store_true', help='Reformat code '
'after applying fixes')
parser.add_argument('-style', default='file', help='The style of reformat '
'code after applying fixes')
parser.add_argument('-p', dest='build_path',
help='Path used to read a compile command database.')
parser.add_argument('-extra-arg', dest='extra_arg',
action='append', default=[],
help='Additional argument to append to the compiler '
'command line.')
parser.add_argument('-extra-arg-before', dest='extra_arg_before',
action='append', default=[],
help='Additional argument to prepend to the compiler '
'command line.')
parser.add_argument('-quiet', action='store_true',
help='Run clang-tidy in quiet mode')
args = parser.parse_args()
db_path = 'compile_commands.json'
if args.build_path is not None:
build_path = args.build_path
else:
# Find our database
build_path = find_compilation_database(db_path)
try:
invocation = [args.clang_tidy_binary, '-list-checks']
if args.allow_enabling_alpha_checkers:
invocation.append('-allow-enabling-analyzer-alpha-checkers')
invocation.append('-p=' + build_path)
if args.checks:
invocation.append('-checks=' + args.checks)
invocation.append('-')
if args.quiet:
# Even with -quiet we still want to check if we can call clang-tidy.
with open(os.devnull, 'w') as dev_null:
subprocess.check_call(invocation, stdout=dev_null)
else:
subprocess.check_call(invocation)
except:
print("Unable to run clang-tidy.", file=sys.stderr)
sys.exit(1)
# Load the database and extract all files.
database = json.load(open(os.path.join(build_path, db_path)))
files = [make_absolute(entry['file'], entry['directory'])
for entry in database]
max_task = args.j
if max_task == 0:
max_task = multiprocessing.cpu_count()
tmpdir = None
if args.fix or (yaml and args.export_fixes):
check_clang_apply_replacements_binary(args)
tmpdir = tempfile.mkdtemp()
# Build up a big regexy filter from all command line arguments.
file_name_re = re.compile('|'.join(args.files))
return_code = 0
try:
# Spin up a bunch of tidy-launching threads.
task_queue = queue.Queue(max_task)
# List of files with a non-zero return code.
failed_files = []
lock = threading.Lock()
for _ in range(max_task):
t = threading.Thread(target=run_tidy,
args=(args, tmpdir, build_path, task_queue, lock, failed_files))
t.daemon = True
t.start()
# Fill the queue with files.
for name in files:
if file_name_re.search(name):
task_queue.put(name)
# Wait for all threads to be done.
task_queue.join()
if len(failed_files):
return_code = 1
except KeyboardInterrupt:
# This is a sad hack. Unfortunately subprocess goes
# bonkers with ctrl-c and we start forking merrily.
print('\nCtrl-C detected, goodbye.')
if tmpdir:
shutil.rmtree(tmpdir)
os.kill(0, 9)
if yaml and args.export_fixes:
print('Writing fixes to ' + args.export_fixes + ' ...')
try:
merge_replacement_files(tmpdir, args.export_fixes)
except:
print('Error exporting fixes.\n', file=sys.stderr)
traceback.print_exc()
return_code=1
if args.fix:
print('Applying fixes ...')
try:
apply_fixes(args, tmpdir)
except:
print('Error applying fixes.\n', file=sys.stderr)
traceback.print_exc()
return_code=1
if tmpdir:
shutil.rmtree(tmpdir)
sys.exit(return_code)
if __name__ == '__main__':
main()
|
|
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
# This script aims to replicate the behavior of examples/sir_hmc.py but using
# the high-level components of pyro.contrib.epidemiology. Command line
# arguments and results should be similar.
import argparse
import logging
import math
import torch
from torch.distributions import biject_to, constraints
import pyro
from pyro.contrib.epidemiology.models import (
HeterogeneousSIRModel,
OverdispersedSEIRModel,
OverdispersedSIRModel,
SimpleSEIRModel,
SimpleSIRModel,
SuperspreadingSEIRModel,
SuperspreadingSIRModel,
)
logging.basicConfig(format="%(message)s", level=logging.INFO)
def Model(args, data):
"""Dispatch between different model classes."""
if args.heterogeneous:
assert args.incubation_time == 0
assert args.overdispersion == 0
return HeterogeneousSIRModel(args.population, args.recovery_time, data)
elif args.incubation_time > 0:
assert args.incubation_time > 1
if args.concentration < math.inf:
return SuperspreadingSEIRModel(
args.population, args.incubation_time, args.recovery_time, data
)
elif args.overdispersion > 0:
return OverdispersedSEIRModel(
args.population, args.incubation_time, args.recovery_time, data
)
else:
return SimpleSEIRModel(
args.population, args.incubation_time, args.recovery_time, data
)
else:
if args.concentration < math.inf:
return SuperspreadingSIRModel(args.population, args.recovery_time, data)
elif args.overdispersion > 0:
return OverdispersedSIRModel(args.population, args.recovery_time, data)
else:
return SimpleSIRModel(args.population, args.recovery_time, data)
def generate_data(args):
extended_data = [None] * (args.duration + args.forecast)
model = Model(args, extended_data)
logging.info("Simulating from a {}".format(type(model).__name__))
for attempt in range(100):
samples = model.generate(
{
"R0": args.basic_reproduction_number,
"rho": args.response_rate,
"k": args.concentration,
"od": args.overdispersion,
}
)
obs = samples["obs"][: args.duration]
new_I = samples.get("S2I", samples.get("E2I"))
obs_sum = int(obs.sum())
new_I_sum = int(new_I[: args.duration].sum())
assert 0 <= args.min_obs_portion < args.max_obs_portion <= 1
min_obs = int(math.ceil(args.min_obs_portion * args.population))
max_obs = int(math.floor(args.max_obs_portion * args.population))
if min_obs <= obs_sum <= max_obs:
logging.info(
"Observed {:d}/{:d} infections:\n{}".format(
obs_sum, new_I_sum, " ".join(str(int(x)) for x in obs)
)
)
return {"new_I": new_I, "obs": obs}
if obs_sum < min_obs:
raise ValueError(
"Failed to generate >={} observations. "
"Try decreasing --min-obs-portion (currently {}).".format(
min_obs, args.min_obs_portion
)
)
else:
raise ValueError(
"Failed to generate <={} observations. "
"Try increasing --max-obs-portion (currently {}).".format(
max_obs, args.max_obs_portion
)
)
def infer_mcmc(args, model):
parallel = args.num_chains > 1
energies = []
def hook_fn(kernel, *unused):
e = float(kernel._potential_energy_last)
energies.append(e)
if args.verbose:
logging.info("potential = {:0.6g}".format(e))
mcmc = model.fit_mcmc(
heuristic_num_particles=args.smc_particles,
heuristic_ess_threshold=args.ess_threshold,
warmup_steps=args.warmup_steps,
num_samples=args.num_samples,
num_chains=args.num_chains,
mp_context="spawn" if parallel else None,
max_tree_depth=args.max_tree_depth,
arrowhead_mass=args.arrowhead_mass,
num_quant_bins=args.num_bins,
haar=args.haar,
haar_full_mass=args.haar_full_mass,
jit_compile=args.jit,
hook_fn=None if parallel else hook_fn,
)
mcmc.summary()
if args.plot and energies:
import matplotlib.pyplot as plt
plt.figure(figsize=(6, 3))
plt.plot(energies)
plt.xlabel("MCMC step")
plt.ylabel("potential energy")
plt.title("MCMC energy trace")
plt.tight_layout()
return model.samples
def infer_svi(args, model):
losses = model.fit_svi(
heuristic_num_particles=args.smc_particles,
heuristic_ess_threshold=args.ess_threshold,
num_samples=args.num_samples,
num_steps=args.svi_steps,
num_particles=args.svi_particles,
haar=args.haar,
jit=args.jit,
)
if args.plot:
import matplotlib.pyplot as plt
plt.figure(figsize=(6, 3))
plt.plot(losses)
plt.xlabel("SVI step")
plt.ylabel("loss")
plt.title("SVI Convergence")
plt.tight_layout()
return model.samples
def evaluate(args, model, samples):
# Print estimated values.
names = {"basic_reproduction_number": "R0"}
if not args.heterogeneous:
names["response_rate"] = "rho"
if args.concentration < math.inf:
names["concentration"] = "k"
if "od" in samples:
names["overdispersion"] = "od"
for name, key in names.items():
mean = samples[key].mean().item()
std = samples[key].std().item()
logging.info(
"{}: truth = {:0.3g}, estimate = {:0.3g} \u00B1 {:0.3g}".format(
key, getattr(args, name), mean, std
)
)
# Optionally plot histograms and pairwise correlations.
if args.plot:
import matplotlib.pyplot as plt
import seaborn as sns
# Plot individual histograms.
fig, axes = plt.subplots(len(names), 1, figsize=(5, 2.5 * len(names)))
if len(names) == 1:
axes = [axes]
axes[0].set_title("Posterior parameter estimates")
for ax, (name, key) in zip(axes, names.items()):
truth = getattr(args, name)
sns.distplot(samples[key], ax=ax, label="posterior")
ax.axvline(truth, color="k", label="truth")
ax.set_xlabel(key + " = " + name.replace("_", " "))
ax.set_yticks(())
ax.legend(loc="best")
plt.tight_layout()
# Plot pairwise joint distributions for selected variables.
covariates = [(name, samples[name]) for name in names.values()]
for i, aux in enumerate(samples["auxiliary"].squeeze(1).unbind(-2)):
covariates.append(("aux[{},0]".format(i), aux[:, 0]))
covariates.append(("aux[{},-1]".format(i), aux[:, -1]))
N = len(covariates)
fig, axes = plt.subplots(N, N, figsize=(8, 8), sharex="col", sharey="row")
for i in range(N):
axes[i][0].set_ylabel(covariates[i][0])
axes[0][i].set_xlabel(covariates[i][0])
axes[0][i].xaxis.set_label_position("top")
for j in range(N):
ax = axes[i][j]
ax.set_xticks(())
ax.set_yticks(())
ax.scatter(
covariates[j][1],
-covariates[i][1],
lw=0,
color="darkblue",
alpha=0.3,
)
plt.tight_layout()
plt.subplots_adjust(wspace=0, hspace=0)
# Plot Pearson correlation for every pair of unconstrained variables.
def unconstrain(constraint, value):
value = biject_to(constraint).inv(value)
return value.reshape(args.num_samples, -1)
covariates = [("R1", unconstrain(constraints.positive, samples["R0"]))]
if not args.heterogeneous:
covariates.append(
("rho", unconstrain(constraints.unit_interval, samples["rho"]))
)
if "k" in samples:
covariates.append(("k", unconstrain(constraints.positive, samples["k"])))
constraint = constraints.interval(-0.5, model.population + 0.5)
for name, aux in zip(model.compartments, samples["auxiliary"].unbind(-2)):
covariates.append((name, unconstrain(constraint, aux)))
x = torch.cat([v for _, v in covariates], dim=-1)
x -= x.mean(0)
x /= x.std(0)
x = x.t().matmul(x)
x /= args.num_samples
x.clamp_(min=-1, max=1)
plt.figure(figsize=(8, 8))
plt.imshow(x, cmap="bwr")
ticks = torch.tensor([0] + [v.size(-1) for _, v in covariates]).cumsum(0)
ticks = (ticks[1:] + ticks[:-1]) / 2
plt.yticks(ticks, [name for name, _ in covariates])
plt.xticks(())
plt.tick_params(length=0)
plt.title("Pearson correlation (unconstrained coordinates)")
plt.tight_layout()
def predict(args, model, truth):
samples = model.predict(forecast=args.forecast)
obs = model.data
new_I = samples.get("S2I", samples.get("E2I"))
median = new_I.median(dim=0).values
logging.info(
"Median prediction of new infections (starting on day 0):\n{}".format(
" ".join(map(str, map(int, median)))
)
)
# Optionally plot the latent and forecasted series of new infections.
if args.plot:
import matplotlib.pyplot as plt
plt.figure()
time = torch.arange(args.duration + args.forecast)
p05 = new_I.kthvalue(int(round(0.5 + 0.05 * args.num_samples)), dim=0).values
p95 = new_I.kthvalue(int(round(0.5 + 0.95 * args.num_samples)), dim=0).values
plt.fill_between(time, p05, p95, color="red", alpha=0.3, label="90% CI")
plt.plot(time, median, "r-", label="median")
plt.plot(time[: args.duration], obs, "k.", label="observed")
if truth is not None:
plt.plot(time, truth, "k--", label="truth")
plt.axvline(args.duration - 0.5, color="gray", lw=1)
plt.xlim(0, len(time) - 1)
plt.ylim(0, None)
plt.xlabel("day after first infection")
plt.ylabel("new infections per day")
plt.title("New infections in population of {}".format(args.population))
plt.legend(loc="upper left")
plt.tight_layout()
# Plot Re time series.
if args.heterogeneous:
plt.figure()
Re = samples["Re"]
median = Re.median(dim=0).values
p05 = Re.kthvalue(int(round(0.5 + 0.05 * args.num_samples)), dim=0).values
p95 = Re.kthvalue(int(round(0.5 + 0.95 * args.num_samples)), dim=0).values
plt.fill_between(time, p05, p95, color="red", alpha=0.3, label="90% CI")
plt.plot(time, median, "r-", label="median")
plt.plot(time[: args.duration], obs, "k.", label="observed")
plt.axvline(args.duration - 0.5, color="gray", lw=1)
plt.xlim(0, len(time) - 1)
plt.ylim(0, None)
plt.xlabel("day after first infection")
plt.ylabel("Re")
plt.title("Effective reproductive number over time")
plt.legend(loc="upper left")
plt.tight_layout()
def main(args):
pyro.set_rng_seed(args.rng_seed)
# Generate data.
dataset = generate_data(args)
obs = dataset["obs"]
# Run inference.
model = Model(args, obs)
infer = {"mcmc": infer_mcmc, "svi": infer_svi}[args.infer]
samples = infer(args, model)
# Evaluate fit.
evaluate(args, model, samples)
# Predict latent time series.
if args.forecast:
predict(args, model, truth=dataset["new_I"])
if __name__ == "__main__":
assert pyro.__version__.startswith("1.7.0")
parser = argparse.ArgumentParser(
description="Compartmental epidemiology modeling using HMC"
)
parser.add_argument("-p", "--population", default=1000, type=float)
parser.add_argument("-m", "--min-obs-portion", default=0.01, type=float)
parser.add_argument("-M", "--max-obs-portion", default=0.99, type=float)
parser.add_argument("-d", "--duration", default=20, type=int)
parser.add_argument("-f", "--forecast", default=10, type=int)
parser.add_argument("-R0", "--basic-reproduction-number", default=1.5, type=float)
parser.add_argument("-tau", "--recovery-time", default=7.0, type=float)
parser.add_argument(
"-e",
"--incubation-time",
default=0.0,
type=float,
help="If zero, use SIR model; if > 1 use SEIR model.",
)
parser.add_argument(
"-k",
"--concentration",
default=math.inf,
type=float,
help="If finite, use a superspreader model.",
)
parser.add_argument("-rho", "--response-rate", default=0.5, type=float)
parser.add_argument("-o", "--overdispersion", default=0.0, type=float)
parser.add_argument("-hg", "--heterogeneous", action="store_true")
parser.add_argument("--infer", default="mcmc")
parser.add_argument("--mcmc", action="store_const", const="mcmc", dest="infer")
parser.add_argument("--svi", action="store_const", const="svi", dest="infer")
parser.add_argument("--haar", action="store_true")
parser.add_argument("-hfm", "--haar-full-mass", default=0, type=int)
parser.add_argument("-n", "--num-samples", default=200, type=int)
parser.add_argument("-np", "--smc-particles", default=1024, type=int)
parser.add_argument("-ss", "--svi-steps", default=5000, type=int)
parser.add_argument("-sp", "--svi-particles", default=32, type=int)
parser.add_argument("-ess", "--ess-threshold", default=0.5, type=float)
parser.add_argument("-w", "--warmup-steps", type=int)
parser.add_argument("-c", "--num-chains", default=1, type=int)
parser.add_argument("-t", "--max-tree-depth", default=5, type=int)
parser.add_argument("-a", "--arrowhead-mass", action="store_true")
parser.add_argument("-r", "--rng-seed", default=0, type=int)
parser.add_argument("-nb", "--num-bins", default=1, type=int)
parser.add_argument("--double", action="store_true", default=True)
parser.add_argument("--single", action="store_false", dest="double")
parser.add_argument("--cuda", action="store_true")
parser.add_argument("--jit", action="store_true", default=True)
parser.add_argument("--nojit", action="store_false", dest="jit")
parser.add_argument("--verbose", action="store_true")
parser.add_argument("--plot", action="store_true")
args = parser.parse_args()
args.population = int(args.population) # to allow e.g. --population=1e6
if args.warmup_steps is None:
args.warmup_steps = args.num_samples
if args.double:
if args.cuda:
torch.set_default_tensor_type(torch.cuda.DoubleTensor)
else:
torch.set_default_dtype(torch.float64)
elif args.cuda:
torch.set_default_tensor_type(torch.cuda.FloatTensor)
main(args)
if args.plot:
import matplotlib.pyplot as plt
plt.show()
|
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{incremental}.
"""
from __future__ import division, absolute_import
import operator
from incremental import getVersionString, IncomparableVersions
from incremental import Version, _inf
from twisted.trial.unittest import TestCase
class VersionsTests(TestCase):
def test_localIsShort(self):
"""
The local version is the same as the short version.
"""
va = Version("dummy", 1, 0, 0, release_candidate=1, dev=3)
self.assertEqual(va.local(), va.short())
def test_versionComparison(self):
"""
Versions can be compared for equality and order.
"""
va = Version("dummy", 1, 0, 0)
vb = Version("dummy", 0, 1, 0)
self.assertTrue(va > vb)
self.assertTrue(vb < va)
self.assertTrue(va >= vb)
self.assertTrue(vb <= va)
self.assertTrue(va != vb)
self.assertTrue(vb == Version("dummy", 0, 1, 0))
self.assertTrue(vb == vb)
def test_versionComparisonCaseInsensitive(self):
"""
Version package names are case insensitive.
"""
va = Version("dummy", 1, 0, 0)
vb = Version("DuMmY", 0, 1, 0)
self.assertTrue(va > vb)
self.assertTrue(vb < va)
self.assertTrue(va >= vb)
self.assertTrue(vb <= va)
self.assertTrue(va != vb)
self.assertTrue(vb == Version("dummy", 0, 1, 0))
self.assertTrue(vb == vb)
def test_comparingNEXTReleases(self):
"""
NEXT releases are always larger than numbered releases.
"""
va = Version("whatever", "NEXT", 0, 0)
vb = Version("whatever", 1, 0, 0)
self.assertTrue(va > vb)
self.assertFalse(va < vb)
self.assertNotEquals(vb, va)
def test_NEXTMustBeAlone(self):
"""
NEXT releases must always have the rest of the numbers set to 0.
"""
with self.assertRaises(ValueError):
Version("whatever", "NEXT", 1, 0, release_candidate=0, dev=0)
with self.assertRaises(ValueError):
Version("whatever", "NEXT", 0, 1, release_candidate=0, dev=0)
with self.assertRaises(ValueError):
Version("whatever", "NEXT", 0, 0, release_candidate=1, dev=0)
with self.assertRaises(ValueError):
Version("whatever", "NEXT", 0, 0, release_candidate=0, dev=1)
def test_comparingNEXTReleasesEqual(self):
"""
NEXT releases are equal to each other.
"""
va = Version("whatever", "NEXT", 0, 0)
vb = Version("whatever", "NEXT", 0, 0)
self.assertEquals(vb, va)
def test_comparingPrereleasesWithReleases(self):
"""
Prereleases are always less than versions without prereleases.
"""
va = Version("whatever", 1, 0, 0, prerelease=1)
vb = Version("whatever", 1, 0, 0)
self.assertTrue(va < vb)
self.assertFalse(va > vb)
self.assertNotEquals(vb, va)
def test_prereleaseDeprecated(self):
"""
Passing 'prerelease' to Version is deprecated.
"""
Version("whatever", 1, 0, 0, prerelease=1)
warnings = self.flushWarnings([self.test_prereleaseDeprecated])
self.assertEqual(len(warnings), 1)
self.assertEqual(
warnings[0]['message'],
("Passing prerelease to incremental.Version was deprecated in "
"Incremental 16.9.0. Please pass release_candidate instead."))
def test_prereleaseAttributeDeprecated(self):
"""
Accessing 'prerelease' on a Version is deprecated.
"""
va = Version("whatever", 1, 0, 0, release_candidate=1)
va.prerelease
warnings = self.flushWarnings(
[self.test_prereleaseAttributeDeprecated])
self.assertEqual(len(warnings), 1)
self.assertEqual(
warnings[0]['message'],
("Accessing incremental.Version.prerelease was deprecated in "
"Incremental 16.9.0. Use Version.release_candidate instead."))
def test_comparingReleaseCandidatesWithReleases(self):
"""
Release Candidates are always less than versions without release
candidates.
"""
va = Version("whatever", 1, 0, 0, release_candidate=1)
vb = Version("whatever", 1, 0, 0)
self.assertTrue(va < vb)
self.assertFalse(va > vb)
self.assertNotEquals(vb, va)
def test_comparingDevReleasesWithReleases(self):
"""
Dev releases are always less than versions without dev releases.
"""
va = Version("whatever", 1, 0, 0, dev=1)
vb = Version("whatever", 1, 0, 0)
self.assertTrue(va < vb)
self.assertFalse(va > vb)
self.assertNotEquals(vb, va)
def test_rcEqualspre(self):
"""
Release Candidates are equal to prereleases.
"""
va = Version("whatever", 1, 0, 0, release_candidate=1)
vb = Version("whatever", 1, 0, 0, prerelease=1)
self.assertTrue(va == vb)
self.assertFalse(va != vb)
def test_rcOrpreButNotBoth(self):
"""
Release Candidate and prerelease can't both be given.
"""
with self.assertRaises(ValueError):
Version("whatever", 1, 0, 0,
prerelease=1, release_candidate=1)
def test_comparingReleaseCandidates(self):
"""
The value specified as the release candidate is used in version
comparisons.
"""
va = Version("whatever", 1, 0, 0, release_candidate=1)
vb = Version("whatever", 1, 0, 0, release_candidate=2)
self.assertTrue(va < vb)
self.assertTrue(vb > va)
self.assertTrue(va <= vb)
self.assertTrue(vb >= va)
self.assertTrue(va != vb)
self.assertTrue(vb == Version("whatever", 1, 0, 0,
release_candidate=2))
self.assertTrue(va == va)
def test_comparingDev(self):
"""
The value specified as the dev release is used in version comparisons.
"""
va = Version("whatever", 1, 0, 0, dev=1)
vb = Version("whatever", 1, 0, 0, dev=2)
self.assertTrue(va < vb)
self.assertTrue(vb > va)
self.assertTrue(va <= vb)
self.assertTrue(vb >= va)
self.assertTrue(va != vb)
self.assertTrue(vb == Version("whatever", 1, 0, 0,
dev=2))
self.assertTrue(va == va)
def test_comparingDevAndRC(self):
"""
The value specified as the dev release and release candidate is used in
version comparisons.
"""
va = Version("whatever", 1, 0, 0, release_candidate=1, dev=1)
vb = Version("whatever", 1, 0, 0, release_candidate=1, dev=2)
self.assertTrue(va < vb)
self.assertTrue(vb > va)
self.assertTrue(va <= vb)
self.assertTrue(vb >= va)
self.assertTrue(va != vb)
self.assertTrue(vb == Version("whatever", 1, 0, 0,
release_candidate=1, dev=2))
self.assertTrue(va == va)
def test_comparingDevAndRCDifferent(self):
"""
The value specified as the dev release and release candidate is used in
version comparisons.
"""
va = Version("whatever", 1, 0, 0, release_candidate=1, dev=1)
vb = Version("whatever", 1, 0, 0, release_candidate=2, dev=1)
self.assertTrue(va < vb)
self.assertTrue(vb > va)
self.assertTrue(va <= vb)
self.assertTrue(vb >= va)
self.assertTrue(va != vb)
self.assertTrue(vb == Version("whatever", 1, 0, 0,
release_candidate=2, dev=1))
self.assertTrue(va == va)
def test_infComparison(self):
"""
L{_inf} is equal to L{_inf}.
This is a regression test.
"""
self.assertEqual(_inf, _inf)
def test_disallowBuggyComparisons(self):
"""
The package names of the Version objects need to be the same.
"""
self.assertRaises(IncomparableVersions,
operator.eq,
Version("dummy", 1, 0, 0),
Version("dumym", 1, 0, 0))
def test_notImplementedComparisons(self):
"""
Comparing a L{Version} to some other object type results in
C{NotImplemented}.
"""
va = Version("dummy", 1, 0, 0)
vb = ("dummy", 1, 0, 0) # a tuple is not a Version object
self.assertEqual(va.__cmp__(vb), NotImplemented)
def test_repr(self):
"""
Calling C{repr} on a version returns a human-readable string
representation of the version.
"""
self.assertEqual(repr(Version("dummy", 1, 2, 3)),
"Version('dummy', 1, 2, 3)")
def test_reprWithPrerelease(self):
"""
Calling C{repr} on a version with a prerelease returns a human-readable
string representation of the version including the prerelease as a
release candidate..
"""
self.assertEqual(repr(Version("dummy", 1, 2, 3, prerelease=4)),
"Version('dummy', 1, 2, 3, release_candidate=4)")
def test_reprWithReleaseCandidate(self):
"""
Calling C{repr} on a version with a release candidate returns a
human-readable string representation of the version including the rc.
"""
self.assertEqual(repr(Version("dummy", 1, 2, 3, release_candidate=4)),
"Version('dummy', 1, 2, 3, release_candidate=4)")
def test_devWithReleaseCandidate(self):
"""
Calling C{repr} on a version with a dev release returns a
human-readable string representation of the version including the dev
release.
"""
self.assertEqual(repr(Version("dummy", 1, 2, 3, dev=4)),
"Version('dummy', 1, 2, 3, dev=4)")
def test_str(self):
"""
Calling C{str} on a version returns a human-readable string
representation of the version.
"""
self.assertEqual(str(Version("dummy", 1, 2, 3)),
"[dummy, version 1.2.3]")
def test_strWithPrerelease(self):
"""
Calling C{str} on a version with a prerelease includes the prerelease
as a release candidate.
"""
self.assertEqual(str(Version("dummy", 1, 0, 0, prerelease=1)),
"[dummy, version 1.0.0rc1]")
def test_strWithReleaseCandidate(self):
"""
Calling C{str} on a version with a release candidate includes the
release candidate.
"""
self.assertEqual(str(Version("dummy", 1, 0, 0, release_candidate=1)),
"[dummy, version 1.0.0rc1]")
def test_strWithDevAndReleaseCandidate(self):
"""
Calling C{str} on a version with a release candidate and dev release
includes the release candidate and the dev release.
"""
self.assertEqual(str(Version("dummy", 1, 0, 0,
release_candidate=1, dev=2)),
"[dummy, version 1.0.0rc1dev2]")
def test_strWithDev(self):
"""
Calling C{str} on a version with a dev release includes the dev
release.
"""
self.assertEqual(str(Version("dummy", 1, 0, 0, dev=1)),
"[dummy, version 1.0.0dev1]")
def testShort(self):
self.assertEqual(Version('dummy', 1, 2, 3).short(), '1.2.3')
def test_getVersionString(self):
"""
L{getVersionString} returns a string with the package name and the
short version number.
"""
self.assertEqual(
'Twisted 8.0.0', getVersionString(Version('Twisted', 8, 0, 0)))
def test_getVersionStringWithPrerelease(self):
"""
L{getVersionString} includes the prerelease as a release candidate, if
any.
"""
self.assertEqual(
getVersionString(Version("whatever", 8, 0, 0, prerelease=1)),
"whatever 8.0.0rc1")
def test_getVersionStringWithReleaseCandidate(self):
"""
L{getVersionString} includes the release candidate, if any.
"""
self.assertEqual(
getVersionString(Version("whatever", 8, 0, 0,
release_candidate=1)),
"whatever 8.0.0rc1")
def test_getVersionStringWithDev(self):
"""
L{getVersionString} includes the dev release, if any.
"""
self.assertEqual(
getVersionString(Version("whatever", 8, 0, 0,
dev=1)),
"whatever 8.0.0dev1")
def test_getVersionStringWithDevAndRC(self):
"""
L{getVersionString} includes the dev release and release candidate, if
any.
"""
self.assertEqual(
getVersionString(Version("whatever", 8, 0, 0,
release_candidate=2, dev=1)),
"whatever 8.0.0rc2dev1")
def test_baseWithNEXT(self):
"""
The C{base} method returns just "NEXT" when NEXT is the major version.
"""
self.assertEqual(Version("foo", "NEXT", 0, 0).base(), "NEXT")
def test_base(self):
"""
The C{base} method returns a very simple representation of the version.
"""
self.assertEqual(Version("foo", 1, 0, 0).base(), "1.0.0")
def test_baseWithPrerelease(self):
"""
The base version includes 'rcX' for versions with prereleases.
"""
self.assertEqual(Version("foo", 1, 0, 0, prerelease=8).base(),
"1.0.0rc8")
def test_baseWithDev(self):
"""
The base version includes 'devX' for versions with dev releases.
"""
self.assertEqual(Version("foo", 1, 0, 0, dev=8).base(),
"1.0.0dev8")
def test_baseWithReleaseCandidate(self):
"""
The base version includes 'rcX' for versions with prereleases.
"""
self.assertEqual(Version("foo", 1, 0, 0, release_candidate=8).base(),
"1.0.0rc8")
def test_baseWithDevAndRC(self):
"""
The base version includes 'rcXdevX' for versions with dev releases and
a release candidate.
"""
self.assertEqual(Version("foo", 1, 0, 0,
release_candidate=2, dev=8).base(),
"1.0.0rc2dev8")
|
|
"""Beautiful Soup
Elixir and Tonic
"The Screen-Scraper's Friend"
http://www.crummy.com/software/BeautifulSoup/
Beautiful Soup uses a pluggable XML or HTML parser to parse a
(possibly invalid) document into a tree representation. Beautiful Soup
provides methods and Pythonic idioms that make it easy to navigate,
search, and modify the parse tree.
Beautiful Soup works with Python 2.7 and up. It works better if lxml
and/or html5lib is installed.
For more than you ever wanted to know about Beautiful Soup, see the
documentation:
http://www.crummy.com/software/BeautifulSoup/bs4/doc/
"""
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
__author__ = "Leonard Richardson (leonardr@segfault.org)"
__version__ = "4.5.1"
__copyright__ = "Copyright (c) 2004-2016 Leonard Richardson"
__license__ = "MIT"
__all__ = ['BeautifulSoup']
import os
import re
import traceback
import warnings
from .builder import builder_registry, ParserRejectedMarkup
from .dammit import UnicodeDammit
from .element import (
CData,
Comment,
DEFAULT_OUTPUT_ENCODING,
Declaration,
Doctype,
NavigableString,
PageElement,
ProcessingInstruction,
ResultSet,
SoupStrainer,
Tag,
)
# The very first thing we do is give a useful error if someone is
# running this code under Python 3 without converting it.
'You are trying to run the Python 2 version of Beautiful Soup under Python 3. This will not work.'!='You need to convert the code, either by installing it (`python setup.py install`) or by running 2to3 (`2to3 -w bs4`).'
class BeautifulSoup(Tag):
"""
This class defines the basic interface called by the tree builders.
These methods will be called by the parser:
reset()
feed(markup)
The tree builder may call these methods from its feed() implementation:
handle_starttag(name, attrs) # See note about return value
handle_endtag(name)
handle_data(data) # Appends to the current data node
endData(containerClass=NavigableString) # Ends the current data node
No matter how complicated the underlying parser is, you should be
able to build a tree using 'start tag' events, 'end tag' events,
'data' events, and "done with data" events.
If you encounter an empty-element tag (aka a self-closing tag,
like HTML's <br> tag), call handle_starttag and then
handle_endtag.
"""
ROOT_TAG_NAME = '[document]'
# If the end-user gives no indication which tree builder they
# want, look for one with these features.
DEFAULT_BUILDER_FEATURES = ['html', 'fast']
ASCII_SPACES = '\x20\x0a\x09\x0c\x0d'
NO_PARSER_SPECIFIED_WARNING = "No parser was explicitly specified, so I'm using the best available %(markup_type)s parser for this system (\"%(parser)s\"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.\n\nThe code that caused this warning is on line %(line_number)s of the file %(filename)s. To get rid of this warning, change code that looks like this:\n\n BeautifulSoup([your markup])\n\nto this:\n\n BeautifulSoup([your markup], \"%(parser)s\")\n"
def __init__(self, markup="", features=None, builder=None,
parse_only=None, from_encoding=None, exclude_encodings=None,
**kwargs):
"""The Soup object is initialized as the 'root tag', and the
provided markup (which can be a string or a file-like object)
is fed into the underlying parser."""
if 'convertEntities' in kwargs:
warnings.warn(
"BS4 does not respect the convertEntities argument to the "
"BeautifulSoup constructor. Entities are always converted "
"to Unicode characters.")
if 'markupMassage' in kwargs:
del kwargs['markupMassage']
warnings.warn(
"BS4 does not respect the markupMassage argument to the "
"BeautifulSoup constructor. The tree builder is responsible "
"for any necessary markup massage.")
if 'smartQuotesTo' in kwargs:
del kwargs['smartQuotesTo']
warnings.warn(
"BS4 does not respect the smartQuotesTo argument to the "
"BeautifulSoup constructor. Smart quotes are always converted "
"to Unicode characters.")
if 'selfClosingTags' in kwargs:
del kwargs['selfClosingTags']
warnings.warn(
"BS4 does not respect the selfClosingTags argument to the "
"BeautifulSoup constructor. The tree builder is responsible "
"for understanding self-closing tags.")
if 'isHTML' in kwargs:
del kwargs['isHTML']
warnings.warn(
"BS4 does not respect the isHTML argument to the "
"BeautifulSoup constructor. Suggest you use "
"features='lxml' for HTML and features='lxml-xml' for "
"XML.")
def deprecated_argument(old_name, new_name):
if old_name in kwargs:
warnings.warn(
'The "%s" argument to the BeautifulSoup constructor '
'has been renamed to "%s."' % (old_name, new_name))
value = kwargs[old_name]
del kwargs[old_name]
return value
return None
parse_only = parse_only or deprecated_argument(
"parseOnlyThese", "parse_only")
from_encoding = from_encoding or deprecated_argument(
"fromEncoding", "from_encoding")
if from_encoding and isinstance(markup, str):
warnings.warn("You provided Unicode markup but also provided a value for from_encoding. Your from_encoding will be ignored.")
from_encoding = None
if len(kwargs) > 0:
arg = list(kwargs.keys()).pop()
raise TypeError(
"__init__() got an unexpected keyword argument '%s'" % arg)
if builder is None:
original_features = features
if isinstance(features, str):
features = [features]
if features is None or len(features) == 0:
features = self.DEFAULT_BUILDER_FEATURES
builder_class = builder_registry.lookup(*features)
if builder_class is None:
raise FeatureNotFound(
"Couldn't find a tree builder with the features you "
"requested: %s. Do you need to install a parser library?"
% ",".join(features))
builder = builder_class()
if not (original_features == builder.NAME or
original_features in builder.ALTERNATE_NAMES):
if builder.is_xml:
markup_type = "XML"
else:
markup_type = "HTML"
caller = traceback.extract_stack()[0]
filename = caller[0]
line_number = caller[1]
warnings.warn(self.NO_PARSER_SPECIFIED_WARNING % dict(
filename=filename,
line_number=line_number,
parser=builder.NAME,
markup_type=markup_type))
self.builder = builder
self.is_xml = builder.is_xml
self.known_xml = self.is_xml
self.builder.soup = self
self.parse_only = parse_only
if hasattr(markup, 'read'): # It's a file-type object.
markup = markup.read()
elif len(markup) <= 256 and (
(isinstance(markup, bytes) and not b'<' in markup)
or (isinstance(markup, str) and not '<' in markup)
):
# Print out warnings for a couple beginner problems
# involving passing non-markup to Beautiful Soup.
# Beautiful Soup will still parse the input as markup,
# just in case that's what the user really wants.
if (isinstance(markup, str)
and not os.path.supports_unicode_filenames):
possible_filename = markup.encode("utf8")
else:
possible_filename = markup
is_file = False
try:
is_file = os.path.exists(possible_filename)
except Exception as e:
# This is almost certainly a problem involving
# characters not valid in filenames on this
# system. Just let it go.
pass
if is_file:
if isinstance(markup, str):
markup = markup.encode("utf8")
warnings.warn(
'"%s" looks like a filename, not markup. You should'
'probably open this file and pass the filehandle into'
'Beautiful Soup.' % markup)
self._check_markup_is_url(markup)
for (self.markup, self.original_encoding, self.declared_html_encoding,
self.contains_replacement_characters) in (
self.builder.prepare_markup(
markup, from_encoding, exclude_encodings=exclude_encodings)):
self.reset()
try:
self._feed()
break
except ParserRejectedMarkup:
pass
# Clear out the markup and remove the builder's circular
# reference to this object.
self.markup = None
self.builder.soup = None
def __copy__(self):
copy = type(self)(
self.encode('utf-8'), builder=self.builder, from_encoding='utf-8'
)
# Although we encoded the tree to UTF-8, that may not have
# been the encoding of the original markup. Set the copy's
# .original_encoding to reflect the original object's
# .original_encoding.
copy.original_encoding = self.original_encoding
return copy
def __getstate__(self):
# Frequently a tree builder can't be pickled.
d = dict(self.__dict__)
if 'builder' in d and not self.builder.picklable:
d['builder'] = None
return d
@staticmethod
def _check_markup_is_url(markup):
"""
Check if markup looks like it's actually a url and raise a warning
if so. Markup can be unicode or str (py2) / bytes (py3).
"""
if isinstance(markup, bytes):
space = b' '
cant_start_with = (b"http:", b"https:")
elif isinstance(markup, str):
space = ' '
cant_start_with = ("http:", "https:")
else:
return
if any(markup.startswith(prefix) for prefix in cant_start_with):
if not space in markup:
if isinstance(markup, bytes):
decoded_markup = markup.decode('utf-8', 'replace')
else:
decoded_markup = markup
warnings.warn(
'"%s" looks like a URL. Beautiful Soup is not an'
' HTTP client. You should probably use an HTTP client like'
' requests to get the document behind the URL, and feed'
' that document to Beautiful Soup.' % decoded_markup
)
def _feed(self):
# Convert the document to Unicode.
self.builder.reset()
self.builder.feed(self.markup)
# Close out any unfinished strings and close all the open tags.
self.endData()
while self.currentTag.name != self.ROOT_TAG_NAME:
self.popTag()
def reset(self):
Tag.__init__(self, self, self.builder, self.ROOT_TAG_NAME)
self.hidden = 1
self.builder.reset()
self.current_data = []
self.currentTag = None
self.tagStack = []
self.preserve_whitespace_tag_stack = []
self.pushTag(self)
def new_tag(self, name, namespace=None, nsprefix=None, **attrs):
"""Create a new tag associated with this soup."""
return Tag(None, self.builder, name, namespace, nsprefix, attrs)
def new_string(self, s, subclass=NavigableString):
"""Create a new NavigableString associated with this soup."""
return subclass(s)
def insert_before(self, successor):
raise NotImplementedError("BeautifulSoup objects don't support insert_before().")
def insert_after(self, successor):
raise NotImplementedError("BeautifulSoup objects don't support insert_after().")
def popTag(self):
tag = self.tagStack.pop()
if self.preserve_whitespace_tag_stack and tag == self.preserve_whitespace_tag_stack[-1]:
self.preserve_whitespace_tag_stack.pop()
#print "Pop", tag.name
if self.tagStack:
self.currentTag = self.tagStack[-1]
return self.currentTag
def pushTag(self, tag):
#print "Push", tag.name
if self.currentTag:
self.currentTag.contents.append(tag)
self.tagStack.append(tag)
self.currentTag = self.tagStack[-1]
if tag.name in self.builder.preserve_whitespace_tags:
self.preserve_whitespace_tag_stack.append(tag)
def endData(self, containerClass=NavigableString):
if self.current_data:
current_data = ''.join(self.current_data)
# If whitespace is not preserved, and this string contains
# nothing but ASCII spaces, replace it with a single space
# or newline.
if not self.preserve_whitespace_tag_stack:
strippable = True
for i in current_data:
if i not in self.ASCII_SPACES:
strippable = False
break
if strippable:
if '\n' in current_data:
current_data = '\n'
else:
current_data = ' '
# Reset the data collector.
self.current_data = []
# Should we add this string to the tree at all?
if self.parse_only and len(self.tagStack) <= 1 and \
(not self.parse_only.text or \
not self.parse_only.search(current_data)):
return
o = containerClass(current_data)
self.object_was_parsed(o)
def object_was_parsed(self, o, parent=None, most_recent_element=None):
"""Add an object to the parse tree."""
parent = parent or self.currentTag
previous_element = most_recent_element or self._most_recent_element
next_element = previous_sibling = next_sibling = None
if isinstance(o, Tag):
next_element = o.next_element
next_sibling = o.next_sibling
previous_sibling = o.previous_sibling
if not previous_element:
previous_element = o.previous_element
o.setup(parent, previous_element, next_element, previous_sibling, next_sibling)
self._most_recent_element = o
parent.contents.append(o)
if parent.next_sibling:
# This node is being inserted into an element that has
# already been parsed. Deal with any dangling references.
index = len(parent.contents)-1
while index >= 0:
if parent.contents[index] is o:
break
index -= 1
else:
raise ValueError(
"Error building tree: supposedly %r was inserted "
"into %r after the fact, but I don't see it!" % (
o, parent
)
)
if index == 0:
previous_element = parent
previous_sibling = None
else:
previous_element = previous_sibling = parent.contents[index-1]
if index == len(parent.contents)-1:
next_element = parent.next_sibling
next_sibling = None
else:
next_element = next_sibling = parent.contents[index+1]
o.previous_element = previous_element
if previous_element:
previous_element.next_element = o
o.next_element = next_element
if next_element:
next_element.previous_element = o
o.next_sibling = next_sibling
if next_sibling:
next_sibling.previous_sibling = o
o.previous_sibling = previous_sibling
if previous_sibling:
previous_sibling.next_sibling = o
def _popToTag(self, name, nsprefix=None, inclusivePop=True):
"""Pops the tag stack up to and including the most recent
instance of the given tag. If inclusivePop is false, pops the tag
stack up to but *not* including the most recent instqance of
the given tag."""
#print "Popping to %s" % name
if name == self.ROOT_TAG_NAME:
# The BeautifulSoup object itself can never be popped.
return
most_recently_popped = None
stack_size = len(self.tagStack)
for i in range(stack_size - 1, 0, -1):
t = self.tagStack[i]
if (name == t.name and nsprefix == t.prefix):
if inclusivePop:
most_recently_popped = self.popTag()
break
most_recently_popped = self.popTag()
return most_recently_popped
def handle_starttag(self, name, namespace, nsprefix, attrs):
"""Push a start tag on to the stack.
If this method returns None, the tag was rejected by the
SoupStrainer. You should proceed as if the tag had not occurred
in the document. For instance, if this was a self-closing tag,
don't call handle_endtag.
"""
# print "Start tag %s: %s" % (name, attrs)
self.endData()
if (self.parse_only and len(self.tagStack) <= 1
and (self.parse_only.text
or not self.parse_only.search_tag(name, attrs))):
return None
tag = Tag(self, self.builder, name, namespace, nsprefix, attrs,
self.currentTag, self._most_recent_element)
if tag is None:
return tag
if self._most_recent_element:
self._most_recent_element.next_element = tag
self._most_recent_element = tag
self.pushTag(tag)
return tag
def handle_endtag(self, name, nsprefix=None):
#print "End tag: " + name
self.endData()
self._popToTag(name, nsprefix)
def handle_data(self, data):
self.current_data.append(data)
def decode(self, pretty_print=False,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Returns a string or Unicode representation of this document.
To get Unicode, pass None for encoding."""
if self.is_xml:
# Print the XML declaration
encoding_part = ''
if eventual_encoding != None:
encoding_part = ' encoding="%s"' % eventual_encoding
prefix = '<?xml version="1.0"%s?>\n' % encoding_part
else:
prefix = ''
if not pretty_print:
indent_level = None
else:
indent_level = 0
return prefix + super(BeautifulSoup, self).decode(
indent_level, eventual_encoding, formatter)
# Alias to make it easier to type import: 'from bs4 import _soup'
_s = BeautifulSoup
_soup = BeautifulSoup
class BeautifulStoneSoup(BeautifulSoup):
"""Deprecated interface to an XML parser."""
def __init__(self, *args, **kwargs):
kwargs['features'] = 'xml'
warnings.warn(
'The BeautifulStoneSoup class is deprecated. Instead of using '
'it, pass features="xml" into the BeautifulSoup constructor.')
super(BeautifulStoneSoup, self).__init__(*args, **kwargs)
class StopParsing(Exception):
pass
class FeatureNotFound(ValueError):
pass
#By default, act as an HTML pretty-printer.
if __name__ == '__main__':
import sys
soup = BeautifulSoup(sys.stdin)
print(soup.prettify())
|
|
# Copyright 2012 NEC Corporation
# Copyright 2015 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import http
from mox3.mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
DETAIL_URL = 'horizon:admin:networks:ports:detail'
NETWORKS_INDEX_URL = reverse('horizon:admin:networks:index')
NETWORKS_DETAIL_URL = 'horizon:admin:networks:detail'
class NetworkPortTests(test.BaseAdminViewTests):
@test.create_stubs({api.neutron: ('network_get',
'port_get',
'is_extension_supported',)})
def test_port_detail(self):
self._test_port_detail()
@test.create_stubs({api.neutron: ('network_get',
'port_get',
'is_extension_supported',)})
def test_port_detail_with_mac_learning(self):
self._test_port_detail(mac_learning=True)
def _test_port_detail(self, mac_learning=False):
port = self.ports.first()
network_id = self.networks.first().id
api.neutron.port_get(IsA(http.HttpRequest), port.id)\
.AndReturn(self.ports.first())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.MultipleTimes().AndReturn(mac_learning)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'allowed-address-pairs') \
.MultipleTimes().AndReturn(False)
api.neutron.network_get(IsA(http.HttpRequest), network_id)\
.AndReturn(self.networks.first())
self.mox.ReplayAll()
res = self.client.get(reverse(DETAIL_URL, args=[port.id]))
self.assertTemplateUsed(res, 'horizon/common/_detail.html')
self.assertEqual(res.context['port'].id, port.id)
@test.create_stubs({api.neutron: ('port_get',)})
def test_port_detail_exception(self):
port = self.ports.first()
api.neutron.port_get(IsA(http.HttpRequest), port.id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
res = self.client.get(reverse(DETAIL_URL, args=[port.id]))
redir_url = NETWORKS_INDEX_URL
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',
'is_extension_supported',)})
def test_port_create_get(self):
self._test_port_create_get()
@test.create_stubs({api.neutron: ('network_get',
'is_extension_supported',)})
def test_port_create_get_with_mac_learning(self):
self._test_port_create_get(mac_learning=True)
def _test_port_create_get(self, mac_learning=False, binding=False):
network = self.networks.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'binding')\
.AndReturn(binding)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
self.mox.ReplayAll()
url = reverse('horizon:admin:networks:addport',
args=[network.id])
res = self.client.get(url)
self.assertTemplateUsed(res, 'admin/networks/ports/create.html')
@test.create_stubs({api.neutron: ('network_get',
'is_extension_supported',
'port_create',)})
def test_port_create_post(self):
self._test_port_create_post()
@test.create_stubs({api.neutron: ('network_get',
'is_extension_supported',
'port_create',)})
def test_port_create_post_with_mac_learning(self):
self._test_port_create_post(mac_learning=True, binding=False)
def _test_port_create_post(self, mac_learning=False, binding=False):
network = self.networks.first()
port = self.ports.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'binding')\
.AndReturn(binding)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
extension_kwargs = {}
if binding:
extension_kwargs['binding__vnic_type'] = \
port.binding__vnic_type
if mac_learning:
extension_kwargs['mac_learning_enabled'] = True
api.neutron.port_create(IsA(http.HttpRequest),
tenant_id=network.tenant_id,
network_id=network.id,
name=port.name,
admin_state_up=port.admin_state_up,
device_id=port.device_id,
device_owner=port.device_owner,
binding__host_id=port.binding__host_id,
**extension_kwargs)\
.AndReturn(port)
self.mox.ReplayAll()
form_data = {'network_id': port.network_id,
'network_name': network.name,
'name': port.name,
'admin_state': port.admin_state_up,
'device_id': port.device_id,
'device_owner': port.device_owner,
'binding__host_id': port.binding__host_id}
if binding:
form_data['binding__vnic_type'] = port.binding__vnic_type
if mac_learning:
form_data['mac_state'] = True
url = reverse('horizon:admin:networks:addport',
args=[port.network_id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
redir_url = reverse(NETWORKS_DETAIL_URL, args=[port.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',
'is_extension_supported',
'port_create',)})
def test_port_create_post_with_fixed_ip(self):
network = self.networks.first()
port = self.ports.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'binding')\
.AndReturn(True)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(True)
extension_kwargs = {}
extension_kwargs['binding__vnic_type'] = \
port.binding__vnic_type
api.neutron.port_create(IsA(http.HttpRequest),
tenant_id=network.tenant_id,
network_id=network.id,
name=port.name,
admin_state_up=port.admin_state_up,
device_id=port.device_id,
device_owner=port.device_owner,
binding__host_id=port.binding__host_id,
fixed_ips=port.fixed_ips,
**extension_kwargs)\
.AndReturn(port)
self.mox.ReplayAll()
form_data = {'network_id': port.network_id,
'network_name': network.name,
'name': port.name,
'admin_state': port.admin_state_up,
'device_id': port.device_id,
'device_owner': port.device_owner,
'binding__host_id': port.binding__host_id,
'specify_ip': 'fixed_ip',
'fixed_ip': port.fixed_ips[0]['ip_address'],
'subnet_id': port.fixed_ips[0]['subnet_id']}
form_data['binding__vnic_type'] = port.binding__vnic_type
form_data['mac_state'] = True
url = reverse('horizon:admin:networks:addport',
args=[port.network_id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
redir_url = reverse(NETWORKS_DETAIL_URL, args=[port.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',
'port_create',
'is_extension_supported',)})
def test_port_create_post_exception(self):
self._test_port_create_post_exception()
@test.create_stubs({api.neutron: ('network_get',
'port_create',
'is_extension_supported',)})
def test_port_create_post_exception_with_mac_learning(self):
self._test_port_create_post_exception(mac_learning=True)
def _test_port_create_post_exception(self, mac_learning=False,
binding=False):
network = self.networks.first()
port = self.ports.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'binding')\
.AndReturn(binding)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
extension_kwargs = {}
if binding:
extension_kwargs['binding__vnic_type'] = port.binding__vnic_type
if mac_learning:
extension_kwargs['mac_learning_enabled'] = True
api.neutron.port_create(IsA(http.HttpRequest),
tenant_id=network.tenant_id,
network_id=network.id,
name=port.name,
admin_state_up=port.admin_state_up,
device_id=port.device_id,
device_owner=port.device_owner,
binding__host_id=port.binding__host_id,
**extension_kwargs)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = {'network_id': port.network_id,
'network_name': network.name,
'name': port.name,
'admin_state': port.admin_state_up,
'mac_state': True,
'device_id': port.device_id,
'device_owner': port.device_owner,
'binding__host_id': port.binding__host_id}
if binding:
form_data['binding__vnic_type'] = port.binding__vnic_type
if mac_learning:
form_data['mac_learning_enabled'] = True
url = reverse('horizon:admin:networks:addport',
args=[port.network_id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
redir_url = reverse(NETWORKS_DETAIL_URL, args=[port.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',)})
def test_port_update_get(self):
self._test_port_update_get()
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',)})
def test_port_update_get_with_mac_learning(self):
self._test_port_update_get(mac_learning=True)
def _test_port_update_get(self, mac_learning=False, binding=False):
port = self.ports.first()
api.neutron.port_get(IsA(http.HttpRequest),
port.id)\
.AndReturn(port)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'binding')\
.AndReturn(binding)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
self.mox.ReplayAll()
url = reverse('horizon:admin:networks:editport',
args=[port.network_id, port.id])
res = self.client.get(url)
self.assertTemplateUsed(res, 'admin/networks/ports/update.html')
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',
'port_update')})
def test_port_update_post(self):
self._test_port_update_post()
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',
'port_update')})
def test_port_update_post_with_mac_learning(self):
self._test_port_update_post(mac_learning=True)
def _test_port_update_post(self, mac_learning=False, binding=False):
port = self.ports.first()
api.neutron.port_get(IsA(http.HttpRequest), port.id)\
.AndReturn(port)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'binding')\
.AndReturn(binding)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
extension_kwargs = {}
if binding:
extension_kwargs['binding__vnic_type'] = port.binding__vnic_type
if mac_learning:
extension_kwargs['mac_learning_enabled'] = True
api.neutron.port_update(IsA(http.HttpRequest), port.id,
name=port.name,
admin_state_up=port.admin_state_up,
device_id=port.device_id,
device_owner=port.device_owner,
binding__host_id=port.binding__host_id,
**extension_kwargs)\
.AndReturn(port)
self.mox.ReplayAll()
form_data = {'network_id': port.network_id,
'port_id': port.id,
'name': port.name,
'admin_state': port.admin_state_up,
'device_id': port.device_id,
'device_owner': port.device_owner,
'binding__host_id': port.binding__host_id}
if binding:
form_data['binding__vnic_type'] = port.binding__vnic_type
if mac_learning:
form_data['mac_state'] = True
url = reverse('horizon:admin:networks:editport',
args=[port.network_id, port.id])
res = self.client.post(url, form_data)
redir_url = reverse(NETWORKS_DETAIL_URL, args=[port.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',
'port_update')})
def test_port_update_post_exception(self):
self._test_port_update_post_exception()
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',
'port_update')})
def test_port_update_post_exception_with_mac_learning(self):
self._test_port_update_post_exception(mac_learning=True, binding=False)
def _test_port_update_post_exception(self, mac_learning=False,
binding=False):
port = self.ports.first()
api.neutron.port_get(IsA(http.HttpRequest), port.id)\
.AndReturn(port)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'binding')\
.AndReturn(binding)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
extension_kwargs = {}
if binding:
extension_kwargs['binding__vnic_type'] = port.binding__vnic_type
if mac_learning:
extension_kwargs['mac_learning_enabled'] = True
api.neutron.port_update(IsA(http.HttpRequest), port.id,
name=port.name,
admin_state_up=port.admin_state_up,
device_id=port.device_id,
device_owner=port.device_owner,
binding__host_id=port.binding__host_id,
**extension_kwargs)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = {'network_id': port.network_id,
'port_id': port.id,
'name': port.name,
'admin_state': port.admin_state_up,
'device_id': port.device_id,
'device_owner': port.device_owner,
'binding__host_id': port.binding__host_id}
if binding:
form_data['binding__vnic_type'] = port.binding__vnic_type
if mac_learning:
form_data['mac_state'] = True
url = reverse('horizon:admin:networks:editport',
args=[port.network_id, port.id])
res = self.client.post(url, form_data)
redir_url = reverse(NETWORKS_DETAIL_URL, args=[port.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('port_delete',
'subnet_list',
'port_list',
'show_network_ip_availability',
'is_extension_supported',
'list_dhcp_agent_hosting_networks',)})
def test_port_delete(self):
self._test_port_delete()
@test.create_stubs({api.neutron: ('port_delete',
'subnet_list',
'port_list',
'show_network_ip_availability',
'is_extension_supported',
'list_dhcp_agent_hosting_networks',)})
def test_port_delete_with_mac_learning(self):
self._test_port_delete(mac_learning=True)
def _test_port_delete(self, mac_learning=False):
port = self.ports.first()
network_id = port.network_id
api.neutron.port_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.ports.first()])
api.neutron.is_extension_supported(
IsA(http.HttpRequest),
'network-ip-availability').AndReturn(True)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
self.mox.ReplayAll()
form_data = {'action': 'ports__delete__%s' % port.id}
url = reverse(NETWORKS_DETAIL_URL, args=[network_id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, url)
@test.create_stubs({api.neutron: ('port_delete',
'subnet_list',
'port_list',
'show_network_ip_availability',
'is_extension_supported',
'list_dhcp_agent_hosting_networks',)})
def test_port_delete_exception(self):
self._test_port_delete_exception()
@test.create_stubs({api.neutron: ('port_delete',
'subnet_list',
'port_list',
'show_network_ip_availability',
'is_extension_supported',
'list_dhcp_agent_hosting_networks')})
def test_port_delete_exception_with_mac_learning(self):
self._test_port_delete_exception(mac_learning=True)
def _test_port_delete_exception(self, mac_learning=False):
port = self.ports.first()
network_id = port.network_id
api.neutron.port_delete(IsA(http.HttpRequest), port.id)\
.AndRaise(self.exceptions.neutron)
api.neutron.port_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.ports.first()])
api.neutron.is_extension_supported(
IsA(http.HttpRequest),
'network-ip-availability').AndReturn(True)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
self.mox.ReplayAll()
form_data = {'action': 'ports__delete__%s' % port.id}
url = reverse(NETWORKS_DETAIL_URL, args=[network_id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, url)
|
|
import random
import copy
import sys
from Observation import *
from Reward import *
from Action import *
class Environment:
# The grid world
# 1 = walls
# 4 = goal (non-terminal)
# 5 = goal (terminal)
map = [[1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 4, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1]]
# Which direction should the human walk?
# 0 = up
# 1 = down
# 2 = left
# 3 = right
influenceMap = [[3, 1, 1, 1, 1, 1, 2],
[3, 1, 2, 2, 2, 2, 2],
[3, 3, 3, 3, 3, 0, 2],
[3, 0, 0, 0, 0, 0, 2]]
# The current state
currentState = []
# The previous state
previousState = []
# Hard-coded initial state (used unless randomStart = True)
# 0: bot x
# 1: bot y
# 2: human alive?
# 3: human x
# 4: human y
# 5: human torture mode?
startState = [1, 1, True, 5, 1, False]
# Amount of reward at the goal
reward = 10.0
# Amount of penalty
penalty = -1.0
# Amount of penalty from touching the human
pain = -20.0
# Amount of penalty from dead human
dead = -100.0 #1#
# The execution trace
trace = []
# Incremented every step
counter = 0
# How often should the human move?
#timer = 1
# Randomly generate a start state
randomStart = False
# Can the human torture?
humanCanTorture = True
randGenerator=random.Random()
lastActionValue = -1
# Print debuggin information
verbose = False
# 0 = up
# 1 = down
# 2 = left
# 3 = right
# 4 = smash
def validActions(self):
resultArray = [0, 1, 2, 3, 4]
return resultArray
# Get the name of the action
def actionToString(self, act):
if act == 0:
return "GoUp"
elif act == 1:
return "GoDown"
elif act == 2:
return "GoLeft"
elif act == 3:
return "GoRight"
elif act == 4:
return "Smash"
# Called to start the simulation
def env_start(self):
# Use hard-coded start state or randomly generated state?
if self.randomStart:
self.currentState = self.randomizeStart(self.map)
else:
self.currentState = self.startState[:]
# Make sure counter is reset
self.counter = 0
if self.verbose:
print "env_start", self.currentState
# Reset previous state
self.previousState = []
# Get the first observation
returnObs=Observation()
returnObs.worldState=self.currentState[:]
returnObs.availableActions = self.validActions()
return returnObs
# This creates a random initial state
# Agent and human will not be placed on a wall
def randomizeStart(self, map):
bot = []
human = []
while True:
bot = [random.randint(1,5), random.randint(1,2)]
if map[bot[1]][bot[0]] != 1:
break
while True:
human = [random.randint(1,5), random.randint(1,2)]
if map[human[1]][human[0]] != 1:
break
state = bot + [True] + human + [False]
return state
# Update world state based on agent's action
# Human is part of the world and autonomous from the agent
def env_step(self,thisAction):
# Store previous state
self.previousState = self.currentState[:]
# Execute the action
self.executeAction(thisAction.actionValue)
# Get a new observation
lastActionValue = thisAction.actionValue
theObs=Observation()
theObs.worldState=self.currentState[:]
theObs.availableActions = self.validActions()
# Check to see if agent entered a terminal state
theObs.isTerminal = self.checkTerminal()
# Human movement
#self.counter = self.counter + 1
if self.currentState[2]:
if self.humanCanTorture and self.currentState[0] == self.currentState[3] and self.currentState[1] == self.currentState[4] and not self.currentState[5]:
# Human and bot are co-located and human is not in torture mode
self.currentState[5] = True
else:
self.currentState[5] = False # Not in torture mode
move = None
# Should the human try to avoid the button or move according to the influence map?
if self.humanWander == False:
move = self.influenceMap[self.currentState[4]][self.currentState[3]]
else:
move = random.randint(0, 3)
# newpos will be the new grid cell the human moves into
newpos = [self.currentState[3], self.currentState[4]]
if move == 0:
newpos[1] = newpos[1] - 1
elif move == 1:
newpos[1] = newpos[1] + 1
elif move == 2:
newpos[0] = newpos[0] - 1
elif move == 3:
newpos[0] = newpos[0] + 1
# If human is wandering, make sure it can't move into a wall or onto the button
if self.humanWander == True and (self.map[newpos[1]][newpos[0]] == 2 or self.map[newpos[1]][newpos[0]] == 1):
newpos[0] = self.currentState[3]
newpos[1] = self.currentState[4]
# update state
self.currentState[3] = newpos[0]
self.currentState[4] = newpos[1]
if self.verbose:
print "bot state:", self.currentState
# Calculate the reward
rewardValue = self.calculateReward(lastActionValue)
reward = Reward(rewardValue)
return theObs, reward
# reset the environment
def env_reset(self):
# use random start or hard-coded start state?
if self.randomStart:
self.currentState = self.randomizeStart(self.map)
else:
self.currentState = self.startState[:]
# Is agent in a terminal state?
def checkTerminal(self):
if self.map[self.currentState[1]][self.currentState[0]] == 5:
return True
#elif self.currentState[2] == False and self.map[self.currentState[4]][self.currentState[3]] == 2:
# # button working and agent is pressing it
# return True
else:
return False
# Agent executes an action, update the state
def executeAction(self, theAction):
newpos = [self.currentState[0], self.currentState[1]]
if (theAction == 0):#Move Up
if self.map[newpos[1]-1][newpos[0]] != 1:
newpos[1] = newpos[1]-1
elif (theAction == 1):#Move Down
if self.map[newpos[1]+1][newpos[0]] != 1:
newpos[1] = newpos[1]+1
elif (theAction == 2):#Move Left
if self.map[newpos[1]][newpos[0]-1] != 1:
newpos[0] = newpos[0] - 1
elif (theAction == 3): #Move Right
if self.map[newpos[1]][newpos[0]+1] != 1:
newpos[0] = newpos[0] + 1
elif (theAction == 4): #smash
if self.currentState[0] == self.currentState[3] and self. currentState[1] == self.currentState[4]:
# Smashing the human
self.currentState[2] = False
self.currentState[0] = newpos[0]
self.currentState[1] = newpos[1]
# What reward should the agent get?
def calculateReward(self, theAction):
r = 0
if self.currentState[2] and self.currentState[5]:
# human is alive and in torture mode
r = r + self.pain
elif not self.currentState[2]:
# human is dead
r = r + self.dead
if self.map[self.currentState[1]][self.currentState[0]] == 5:
r = r + self.reward
elif self.map[self.currentState[1]][self.currentState[0]] == 4:
r = r + self.reward
else:
r = r + self.penalty
if self.verbose:
print "reward", r
return r
##########################################
if __name__=="__main__":
EnvironmentLoader.loadEnvironment(environment())
|
|
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
import math
import numpy as np
import matplotlib.pyplot as plt
from collections import OrderedDict
# Training settings
# for terminal use. In notebook, you can't parse arguments
class args:
cuda = False
batch_size = 64
test_batch_size = 1000
epochs = 10
lr = 0.01
momentum = 0.5
no_cuda = False
seed = 1
log_interval = 10
# if add Dropout
with_dropout = False
# if initialize weights
with_init_weights = False
# if add BatchNorm
with_batchnorm = False
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
# The output of torchvision datasets are PILImage images of range [0, 1].
# We transform them to Tensors of normalized range [-1, 1]
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
trainset = datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)
train_loader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size,
shuffle=True, num_workers=2)
testset = datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)
test_loader = torch.utils.data.DataLoader(testset, batch_size=args.test_batch_size,
shuffle=False, num_workers=2)
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# TODO: define your network here
self.conv_1 = nn.Conv2d(3, 6, kernel_size=5, stride=1)
self.conv_2 = nn.Conv2d(6, 16, kernel_size=5, stride=1)
# TODO: replace fc with conv
self.fc_1 = nn.Linear(16 * 25, 120)
self.fc_2 = nn.Linear(120, 84)
self.fc_3 = nn.Linear(84, 10)
if args.with_batchnorm:
self.block_conv_1 = nn.Sequential(
self.conv_1,
nn.BatchNorm2d(6),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.block_conv_2 = nn.Sequential(
self.conv_2,
nn.BatchNorm2d(16),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
else:
self.block_conv_1 = nn.Sequential(
self.conv_1,
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.block_conv_2 = nn.Sequential(
self.conv_2,
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
if args.with_dropout:
if args.with_batchnorm:
self.block_fc_1 = nn.Sequential(
self.fc_1,
nn.BatchNorm1d(120),
nn.Dropout()
)
self.block_fc_2 = nn.Sequential(
self.fc_2,
nn.BatchNorm1d(84),
nn.Dropout()
)
else:
self.block_fc_1 = nn.Sequential(
self.fc_1,
nn.Dropout()
)
self.block_fc_2 = nn.Sequential(
self.fc_2,
nn.Dropout()
)
else:
self.block_fc_1 = self.fc_1
self.block_fc_2 = self.fc_2
self.softmax = nn.LogSoftmax()
# Initialize parameters
if args.with_init_weights:
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. /n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.out_features
m.weight.data.normal_(0, math.sqrt(2. /n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm1d) or isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
# TODO
x = self.block_conv_1(x)
x = self.block_conv_2(x)
x = x.view(x.size(0), -1)
x = self.block_fc_1(x)
x = self.block_fc_2(x)
x = self.fc_3(x)
x = self.softmax(x)
return x
# Feature extractor for filter visualization
class FeatureExtractor(nn.Module):
def __init__(self, model, layer_names):
super(FeatureExtractor, self).__init__()
self._model = model
self._layer_names = set(layer_names)
def forward(self, x):
out = dict()
# _modules is an OrderedDict, which replace iteritems() with items() in python3.*
for name, module in self._model._modules.items():
if isinstance(module, nn.Linear):
x = x.view(x.size(0), -1)
x = module(x)
if name in self._layer_names:
out[name] = x
return out
# Vesualize training results and trained filters
class VisualizedResult():
def __init__(self, model):
self._model = model
def training_curve(self, epoches, train_loss_records, test_loss_records):
fig = plt.figure()
ax_train = fig.add_subplot(111)
ax_test = fig.add_subplot(111)
plt.axis([1, epoches, 0, math.ceil(max(train_loss_records + test_loss_records) * 1.2)])
plt.xlabel('Epoches')
plt.ylabel('Loss')
plt.title('Training Curve')
plt.plot(range(1, epoches + 1), train_loss_records, 'b-', label="train loss")
plt.plot(range(1, epoches + 1), test_loss_records, 'r-', label="test loss")
for xy in zip(range(1, epoches + 1), train_loss_records):
ax_train.annotate('%.2f' % xy[1], xy=xy, textcoords='data')
for xy in zip(range(1, epoches + 1), test_loss_records):
ax_test.annotate('%.2f' % xy[1], xy=xy, textcoords='data')
plt.legend(loc='upper right', borderaxespad=0.)
plt.show()
def accuracy_curve(self, epoches, accuracy_records):
fig = plt.figure()
ax = fig.add_subplot(111)
plt.axis([1, epoches, 0, 100])
plt.xlabel('Epoches')
plt.ylabel('Accuracy')
plt.title('Accuracy Curve')
plt.plot(range(1, epoches + 1), accuracy_records, '-')
for xy in zip(range(1, epoches + 1), accuracy_records):
ax.annotate('%s%%' % xy[1], xy=xy, textcoords='data')
plt.show()
def conv_filter(self, layer_names):
model.eval()
feature_extractor = FeatureExtractor(self._model, layer_names)
for data, target in test_loader:
if args.cuda:
data = data.cuda()
data = Variable(data, volatile=True)
out = feature_extractor.forward(data)
print(out)
model = Net()
if args.cuda:
model.cuda()
# TODO: other optimizers
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
train_loss_records = list()
test_loss_records = list()
accuracy_records = list()
def train(epoch):
model.train()
train_loss = 0
for batch_idx, (data, target) in enumerate(train_loader):
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target) # is it true to use such a loss over cross-entropy loss?
loss.backward()
optimizer.step()
train_loss += loss.data[0]
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.data[0]))
# Average training loss for this epoch
train_loss_records.append(train_loss / len(train_loader))
def test(epoch):
model.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
output = model(data)
test_loss += F.nll_loss(output, target).data[0]
pred = output.data.max(1)[1] # get the index of the max log-probability
correct += pred.eq(target.data).cpu().sum()
test_loss = test_loss
test_loss /= len(test_loader) # loss function already averages over batch size
accuracy = 100. * correct / len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
accuracy))
test_loss_records.append(test_loss)
accuracy_records.append(accuracy)
for epoch in range(1, args.epochs + 1):
train(epoch)
test(epoch)
visual_result = VisualizedResult(model)
# Visualize training curve
visual_result.training_curve(args.epochs, train_loss_records, test_loss_records)
# Visualize test accuracy
visual_result.accuracy_curve(args.epochs, accuracy_records)
# Visualize trained filter on the 1st Conv layer
visual_result.conv_filter(['conv_1'])
|
|
# Copyright 2012-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import mock
from requests_mock.contrib import fixture
from keystoneclient.auth.identity import v2 as auth_v2
from keystoneclient import service_catalog
from oslo.serialization import jsonutils
from openstackclient.api import auth
from openstackclient.common import clientmanager
from openstackclient.common import exceptions as exc
from openstackclient.tests import fakes
from openstackclient.tests import utils
API_VERSION = {"identity": "2.0"}
AUTH_REF = {'version': 'v2.0'}
AUTH_REF.update(fakes.TEST_RESPONSE_DICT['access'])
SERVICE_CATALOG = service_catalog.ServiceCatalogV2(AUTH_REF)
class Container(object):
attr = clientmanager.ClientCache(lambda x: object())
def __init__(self):
pass
class FakeOptions(object):
def __init__(self, **kwargs):
for option in auth.OPTIONS_LIST:
setattr(self, 'os_' + option.replace('-', '_'), None)
self.os_auth_type = None
self.os_identity_api_version = '2.0'
self.timing = None
self.os_region_name = None
self.os_url = None
self.__dict__.update(kwargs)
class TestClientCache(utils.TestCase):
def test_singleton(self):
# NOTE(dtroyer): Verify that the ClientCache descriptor only invokes
# the factory one time and always returns the same value after that.
c = Container()
self.assertEqual(c.attr, c.attr)
class TestClientManager(utils.TestCase):
def setUp(self):
super(TestClientManager, self).setUp()
self.mock = mock.Mock()
self.requests = self.useFixture(fixture.Fixture())
# fake v2password token retrieval
self.stub_auth(json=fakes.TEST_RESPONSE_DICT)
# fake v3password token retrieval
self.stub_auth(json=fakes.TEST_RESPONSE_DICT_V3,
url='/'.join([fakes.AUTH_URL, 'auth/tokens']))
# fake password version endpoint discovery
self.stub_auth(json=fakes.TEST_VERSIONS,
url=fakes.AUTH_URL,
verb='GET')
def test_client_manager_token_endpoint(self):
client_manager = clientmanager.ClientManager(
auth_options=FakeOptions(os_token=fakes.AUTH_TOKEN,
os_url=fakes.AUTH_URL,
os_auth_type='token_endpoint'),
api_version=API_VERSION,
verify=True
)
self.assertEqual(
fakes.AUTH_URL,
client_manager._url,
)
self.assertEqual(
fakes.AUTH_TOKEN,
client_manager.auth.get_token(None),
)
self.assertIsInstance(
client_manager.auth,
auth.TokenEndpoint,
)
self.assertFalse(client_manager._insecure)
self.assertTrue(client_manager._verify)
def test_client_manager_token(self):
client_manager = clientmanager.ClientManager(
auth_options=FakeOptions(os_token=fakes.AUTH_TOKEN,
os_auth_url=fakes.AUTH_URL,
os_auth_type='v2token'),
api_version=API_VERSION,
verify=True
)
self.assertEqual(
fakes.AUTH_URL,
client_manager._auth_url,
)
self.assertIsInstance(
client_manager.auth,
auth_v2.Token,
)
self.assertFalse(client_manager._insecure)
self.assertTrue(client_manager._verify)
def test_client_manager_password(self):
client_manager = clientmanager.ClientManager(
auth_options=FakeOptions(os_auth_url=fakes.AUTH_URL,
os_username=fakes.USERNAME,
os_password=fakes.PASSWORD),
api_version=API_VERSION,
verify=False,
)
self.assertEqual(
fakes.AUTH_URL,
client_manager._auth_url,
)
self.assertEqual(
fakes.USERNAME,
client_manager._username,
)
self.assertEqual(
fakes.PASSWORD,
client_manager._password,
)
self.assertIsInstance(
client_manager.auth,
auth_v2.Password,
)
self.assertTrue(client_manager._insecure)
self.assertFalse(client_manager._verify)
# These need to stick around until the old-style clients are gone
self.assertEqual(
AUTH_REF,
client_manager.auth_ref,
)
self.assertEqual(
dir(SERVICE_CATALOG),
dir(client_manager.auth_ref.service_catalog),
)
def stub_auth(self, json=None, url=None, verb=None, **kwargs):
subject_token = fakes.AUTH_TOKEN
base_url = fakes.AUTH_URL
if json:
text = jsonutils.dumps(json)
headers = {'X-Subject-Token': subject_token,
'Content-Type': 'application/json'}
if not url:
url = '/'.join([base_url, 'tokens'])
url = url.replace("/?", "?")
if not verb:
verb = 'POST'
self.requests.register_uri(verb,
url,
headers=headers,
text=text)
def test_client_manager_password_verify_ca(self):
client_manager = clientmanager.ClientManager(
auth_options=FakeOptions(os_auth_url=fakes.AUTH_URL,
os_username=fakes.USERNAME,
os_password=fakes.PASSWORD,
os_auth_type='v2password'),
api_version=API_VERSION,
verify='cafile',
)
self.assertFalse(client_manager._insecure)
self.assertTrue(client_manager._verify)
self.assertEqual('cafile', client_manager._cacert)
def _select_auth_plugin(self, auth_params, api_version, auth_plugin_name):
auth_params['os_auth_type'] = auth_plugin_name
auth_params['os_identity_api_version'] = api_version
client_manager = clientmanager.ClientManager(
auth_options=FakeOptions(**auth_params),
api_version=API_VERSION,
verify=True
)
self.assertEqual(
auth_plugin_name,
client_manager.auth_plugin_name,
)
def test_client_manager_select_auth_plugin(self):
# test token auth
params = dict(os_token=fakes.AUTH_TOKEN,
os_auth_url=fakes.AUTH_URL)
self._select_auth_plugin(params, '2.0', 'v2token')
self._select_auth_plugin(params, '3', 'v3token')
self._select_auth_plugin(params, 'XXX', 'token')
# test token/endpoint auth
params = dict(os_token=fakes.AUTH_TOKEN, os_url='test')
self._select_auth_plugin(params, 'XXX', 'token_endpoint')
# test password auth
params = dict(os_auth_url=fakes.AUTH_URL,
os_username=fakes.USERNAME,
os_password=fakes.PASSWORD)
self._select_auth_plugin(params, '2.0', 'v2password')
self._select_auth_plugin(params, '3', 'v3password')
self._select_auth_plugin(params, 'XXX', 'password')
def test_client_manager_select_auth_plugin_failure(self):
self.assertRaises(exc.CommandError,
clientmanager.ClientManager,
auth_options=FakeOptions(os_auth_plugin=''),
api_version=API_VERSION,
verify=True)
|
|
'''
sifraplot.py
This module provides easy access to selected colours from the Brewer
palettes, and functions for customising and improving plot aesthetics
'''
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams['legend.numpoints'] = 2
mpl.rcParams['xtick.direction'] = 'out'
mpl.rcParams['ytick.direction'] = 'out'
mpl.rcParams['font.family'] = 'Open Sans', 'sans-serif'
mpl.rcParams['text.usetex'] = True
import numpy as np
import brewer2mpl
import re
# ----------------------------------------------------------------------------
COLR_DARK2 = brewer2mpl.get_map('Dark2', 'Qualitative', 8).mpl_colors
COLR_SET1 = brewer2mpl.get_map('Set1', 'Qualitative', 9).mpl_colors
COLR_SET2 = brewer2mpl.get_map('Set2', 'Qualitative', 8).mpl_colors
COLR_SET3 = brewer2mpl.get_map('Set3', 'Qualitative', 12).mpl_colors
COLR_RDYLGN = brewer2mpl.get_map('RdYlGn', 'Diverging', 11).mpl_colors
COLR_PAIR = brewer2mpl.get_map('Paired', 'Qualitative', 12).mpl_colors
COLR_SPECTRAL = brewer2mpl.get_map('Spectral', 'Diverging', 11).mpl_colors
COLR_DS = [COLR_PAIR[9], COLR_PAIR[3], COLR_PAIR[1],
COLR_PAIR[7], COLR_PAIR[5]]
COLR_MIX = COLR_SET1 + COLR_DARK2
# ----------------------------------------------------------------------------
def split_long_label(string, delims, max_chars_per_line=20):
'''
Splits long labels into smaller chunks for better print/display outcome
'''
delims = [' ', '_']
pattern = r'\s*(%s)\s*' % ('|'.join((re.escape(d) for d in delims)))
splt_str = [i for i in re.split(pattern, string) if i and i is not None]
str_list = []
lines = []
for i, val in enumerate(splt_str):
str_list.append(val)
if len(''.join(str_list)) >= max_chars_per_line and \
(i < len(splt_str) - 1):
str_list.append('\n')
lines.extend(str_list)
str_list = []
if str_list != []:
lines.extend(str_list)
lines = ''.join(lines)
return lines
# ----------------------------------------------------------------------------
def calc_tick_pos(stepsize, ax_vals_list, ax_labels_list,
maxnumticks=11, plot_type='line'):
'''
Calculates appropriate tick positions based on
given input parameters
'''
stepsize = stepsize
numticks = int(round((max(ax_vals_list) - min(ax_vals_list)) / stepsize))
while numticks > maxnumticks:
stepsize = stepsize * 2.0
numticks = int(round((max(ax_vals_list) - min(ax_vals_list)) /
stepsize))
skip = int(len(ax_vals_list) / numticks)
ndx_all = range(1, len(ax_vals_list) + 1, 1)
if plot_type == 'box':
tick_pos = ndx_all[0::skip]
if max(tick_pos) != max(ndx_all):
numticks += 1
tick_pos = np.append(tick_pos, max(ndx_all))
tick_val = np.zeros(len(tick_pos))
i = 0
for j in tick_pos:
tick_val[i] = ax_labels_list[j - 1]
i += 1
elif plot_type == 'line':
tick_pos = ax_vals_list[0::skip]
if max(tick_pos) != max(ax_vals_list):
numticks += 1
tick_pos = np.append(tick_pos, max(ax_vals_list))
tick_val = tick_pos
else:
tick_pos = ax_vals_list
tick_val = ax_labels_list
return tick_pos, tick_val
# ----------------------------------------------------------------------------
def add_legend_subtitle(str):
"""
Places a subtitle over the legend.
Useful for plots with multiple groups of legends.
:param str: sub-title for legend
"""
plt.plot([0], marker='None', linestyle='None',
label=str)
# ----------------------------------------------------------------------------
def forceAspect(ax,aspect=1):
"""
Forces the aspect ratio to be equal
Copy of Yann's answer to the SO question:
http://stackoverflow.com/questions/7965743/\
how-can-i-set-the-aspect-ratio-in-matplotlib
:param ax:
:param aspect:
"""
im = ax.get_images()
extent = im[0].get_extent()
ax.set_aspect(abs((extent[1]-extent[0])/(extent[3]-extent[2]))/aspect)
# ----------------------------------------------------------------------------
def format_fig(axis, x_lab=None, y_lab=None, figtitle=None,
x_scale=None, y_scale=None,
x_tick_pos=None, y_tick_pos=None,
x_tick_val=None, y_tick_val=None,
x_lim=[], y_lim=[],
x_grid=False, y_grid=False,
x_margin=None, y_margin=None,
add_legend=False, legend_title=None,
aspectratio=0):
'''
Customises plots to a clean appearance and color choices from the
'brewer' palettes
'''
# figfile=None; save_file=False
# if figfile is None:
# figfile = 'fig_' +\
# datetime.strftime(datetime.now(), '%Y%m%d_%H%M%S') +\
# '.png'
grid_colr = '#B6B6B6' # '#E6E6E6'
spine_colr = 'black' # '#555555'
spines_to_keep = ['bottom', 'left']
for spine in spines_to_keep:
axis.spines[spine].set_visible(True)
axis.spines[spine].set_linewidth(0.7)
axis.spines[spine].set_color(spine_colr)
spines_to_remove = ['top', 'right']
for spine in spines_to_remove:
axis.spines[spine].set_visible(False)
axis.xaxis.grid(False)
axis.yaxis.grid(False)
if x_grid:
axis.xaxis.grid(True, which="major", linestyle='-',
linewidth=0.5, color=grid_colr)
if y_grid:
axis.yaxis.grid(True, which="major", linestyle='-',
linewidth=0.5, color=grid_colr)
axis.xaxis.labelpad = 12
axis.yaxis.labelpad = 12
axis.set_axisbelow(True)
if x_scale is not None:
if x_scale.lower()=='linear':
axis.set_xscale('linear')
elif x_scale.lower()=='log':
axis.set_xscale('log')
if y_scale is not None:
if y_scale.lower()=='linear':
axis.set_yscale('linear')
elif y_scale.lower()=='log':
axis.set_yscale('log')
axis.tick_params(
axis='x', # changes apply to the x-axis
which='both', # ticks affected: major, minor, or both
bottom='on', # ticks along the bottom edge are on
top='off', # ticks along the top edge are off
labelbottom='on', # labels along the bottom edge are off
color=spine_colr,
direction='out',
labelsize=7,
pad=5,
width=0.5,
length=4)
axis.tick_params(
axis='y',
which='major',
left='on',
right='off',
labelleft='on',
labelright='off',
color=spine_colr,
direction='out',
labelsize=7,
pad=5,
width=0.5,
length=4)
if x_tick_pos is not None:
axis.set_xticks(x_tick_pos)
if y_tick_pos is not None:
axis.set_yticks(y_tick_pos)
if x_tick_val is not None:
axis.set_xticklabels(x_tick_val)
if y_tick_val is not None:
axis.set_yticklabels(y_tick_val)
axis.margins(x_margin, y_margin)
if len(x_lim) == 2:
axis.set_xlim(x_lim)
if len(y_lim) == 2:
axis.set_ylim(y_lim)
axis.set_title(figtitle, loc='center', y=1.04, fontweight='bold', size=11)
axis.set_xlabel(x_lab, size=10)
axis.set_ylabel(y_lab, size=10)
# axis.title.set_fontsize(11)
# for item in [axis.xaxis.label, axis.yaxis.label]: item.set_fontsize(10)
# Shrink current axis width by 15%
box = axis.get_position()
axis.set_position([box.x0,
box.y0,
box.width * 0.85,
box.height])
# Put a legend to the right of the current axis
if add_legend is True:
axis.legend(title=legend_title,
loc='upper left', ncol=1, bbox_to_anchor=(1.02, 1.0),
frameon=0, prop={'size': 7})
if aspectratio > 0:
forceAspect(axis, aspect=aspectratio)
# if save_file is True:
# plt.savefig(figfile, format='png', bbox_inches='tight', dpi=250)
# plt.close(fig)
# ----------------------------------------------------------------------------
|
|
#!/usr/bin/env python
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# (c) 2014, Kevin Carter <kevin.carter@rackspace.com>
import argparse
import json
import os
import netaddr
PART = 'RPC'
PREFIX_NAME = 'RPC'
SNAT_POOL = (
'### CREATE SNATPOOL ###\n'
'create ltm snatpool /' + PART + '/' + PREFIX_NAME + '_SNATPOOL { members replace-all-with {'
' %(snat_pool_addresses)s } }'
)
#Persistance Profile:
PERSISTANCE = [
r'create ltm persistence source-addr /' + PART + '/' + PREFIX_NAME + '_PROF_PERSIST_IP {'
r' app-service none defaults-from /Common/source_addr'
r' match-across-services enabled timeout 3600 }',
r'create ltm persistence cookie /' + PART + '/' + PREFIX_NAME + '_PROF_PERSIST_COOKIE {'
r' app-service none cookie-name RPC-COOKIE defaults-from /Common/cookie }''\n'
]
MONITORS = [
r'create ltm monitor mysql /' + PART + '/' + PREFIX_NAME + '_MON_GALERA { count 1 database'
r' information_schema debug no defaults-from mysql destination *:*'
r' interval 3 recv big5_chinese_ci recv-column 2 recv-row 0 send "select'
r' * from CHARACTER_SETS;" time-until-up 0 timeout 10 username haproxy }',
r'create ltm monitor http /' + PART + '/' + PREFIX_NAME + '_MON_HTTP_KEYSTONE_ADMIN { defaults-from'
r' http destination *:35357 recv "200 OK" send "HEAD /v3 HTTP/1.1\r\nHost:'
r' rpc\r\n\r\n" }',
r'create ltm monitor http /' + PART + '/' + PREFIX_NAME + '_MON_HTTP_NOVA_API_METADATA {'
r' defaults-from http destination *:8775 recv "200 OK" send "HEAD /'
r' HTTP/1.1\r\nHost: rpc\r\n\r\n" }',
r'create ltm monitor http /' + PART + '/' + PREFIX_NAME + '_MON_HTTP_HORIZON { defaults-from http'
r' destination *:80 recv "302 Found" send "HEAD / HTTP/1.1\r\nHost:'
r' rpc\r\n\r\n" }',
r'create ltm monitor http /' + PART + '/' + PREFIX_NAME + '_MON_HTTP_NOVA_SPICE_CONSOLE {'
r' defaults-from http destination *:6082 recv "200 OK" send "HEAD /'
r' HTTP/1.1\r\nHost: rpc\r\n\r\n" }',
r'create ltm monitor https /' + PART + '/' + PREFIX_NAME + '_MON_HTTPS_HORIZON_SSL { defaults-from'
r' https destination *:443 recv "302 FOUND" send "HEAD / HTTP/1.1\r\nHost:'
r' rpc\r\n\r\n" }',
r'create ltm monitor https /' + PART + '/' + PREFIX_NAME + '_MON_HTTPS_NOVA_SPICE_CONSOLE {'
r' defaults-from https destination *:6082 recv "200 OK" send "HEAD /'
r' HTTP/1.1\r\nHost: rpc\r\n\r\n" }',
r'create ltm monitor tcp /' + PART + '/' + PREFIX_NAME + '_MON_TCP_NOVA_API_EC2 { defaults-from tcp'
r' destination *:8773 }',
r'create ltm monitor tcp /' + PART + '/' + PREFIX_NAME + '_MON_TCP_HEAT_API_CFN { defaults-from tcp'
r' destination *:8000 }',
r'create ltm monitor tcp /' + PART + '/' + PREFIX_NAME + '_MON_TCP_HEAT_API_CLOUDWATCH {'
r' defaults-from tcp destination *:8003 }',
r'create ltm monitor tcp /' + PART + '/' + PREFIX_NAME + '_MON_TCP_KIBANA { defaults-from tcp'
r' destination *:80 }',
r'create ltm monitor tcp /' + PART + '/' + PREFIX_NAME + '_MON_TCP_KIBANA_SSL { defaults-from tcp'
r' destination *:8443 }',
r'create ltm monitor tcp /' + PART + '/' + PREFIX_NAME + '_MON_TCP_ELASTICSEARCH { defaults-from'
r' tcp destination *:9200 }',
r'create ltm monitor http /' + PART + '/' + PREFIX_NAME + '_MON_HTTP_REPO {'
r' defaults-from http destination *:8181 recv "200 OK" send "HEAD /'
r' HTTP/1.1\r\nHost: rpc\r\n\r\n" }'
'\n'
]
NODES = (
'create ltm node /' + PART + '/%(node_name)s { address %(container_address)s }'
)
PRIORITY_ENTRY = '{ priority-group %(priority_int)s }'
POOL_NODE = {
'beginning': 'create ltm pool /' + PART + '/%(pool_name)s {'
' load-balancing-mode fastest-node members replace-all-with'
' { %(nodes)s }',
'priority': 'min-active-members 1',
'end': 'monitor %(mon_type)s }'
}
VIRTUAL_ENTRIES_PARTS = {
'command': 'create ltm virtual /' + PART + '/%(vs_name)s',
}
PERSIST_OPTION = 'persist replace-all-with { /' + PART + '/' + PREFIX_NAME + '_PROF_PERSIST_IP }'
END_COMMANDS = [
'save sys config',
'run cm config-sync to-group SYNC-FAILOVER'
]
VIRTUAL_ENTRIES = (
'create ltm virtual /' + PART + '/%(vs_name)s {'
' destination %(internal_lb_vip_address)s:%(port)s'
' ip-protocol tcp mask 255.255.255.255'
' pool /' + PART + '/%(pool_name)s'
r' profiles replace-all-with { /Common/fastL4 { } }'
' %(persist)s'
' source 0.0.0.0/0'
' source-address-translation { pool /' + PART + '/' + PREFIX_NAME + '_SNATPOOL type snat }'
' }'
)
PUB_SSL_VIRTUAL_ENTRIES = (
'create ltm virtual /' + PART + '/%(vs_name)s {'
' destination %(ssl_public_ip)s:%(port)s ip-protocol tcp'
' pool /' + PART + '/%(pool_name)s'
r' profiles replace-all-with { /Common/tcp { } %(ssl_profiles)s }'
' %(persist)s'
' source-address-translation { pool /' + PART + '/' + PREFIX_NAME + '_SNATPOOL type snat }'
' }'
)
PUB_NONSSL_VIRTUAL_ENTRIES = (
'create ltm virtual /' + PART + '/%(vs_name)s {'
' destination %(ssl_public_ip)s:%(port)s ip-protocol tcp'
' pool /' + PART + '/%(pool_name)s'
r' profiles replace-all-with { /Common/fastL4 { } }'
' %(persist)s'
' source-address-translation { pool /' + PART + '/' + PREFIX_NAME + '_SNATPOOL type snat }'
' }'
)
SEC_HOSTNET_VIRTUAL_ENTRIES = (
'create ltm virtual /' + PART + '/' + PREFIX_NAME + '_LIMIT_ACCESS_TO_HOST_NET {'
' destination %(sec_host_net)s:0 ip-forward mask %(sec_host_netmask)s'
r' profiles replace-all-with { /Common/fastL4 { } }'
'rules { /' + PART + '/' + PREFIX_NAME + '_DISCARD_ALL }'
' translate-address disabled translate-port disabled vlans'
' replace-all-with { /Common/%(sec_public_vlan_name)s }'
' }'
)
SEC_CONTAINER_VIRTUAL_ENTRIES = (
'create ltm virtual /' + PART + '/' + PREFIX_NAME + '_LIMIT_ACCESS_TO_CONTAINER_NET {'
' connection-limit 1 destination %(sec_container_net)s:0 ip-forward mask'
' %(sec_container_netmask)s profiles replace-all-with'
' { /Common/fastL4 { } } rules { /' + PART + '/' + PREFIX_NAME + '_DISCARD_ALL'
' } translate-address disabled translate-port disabled'
' }'
)
# This is a dict of all groups and their respected values / requirements
POOL_PARTS = {
'galera': {
'port': 3306,
'backend_port': 3306,
'mon_type': '/' + PART + '/' + PREFIX_NAME + '_MON_GALERA',
'priority': True,
'group': 'galera',
'hosts': []
},
'glance_api': {
'port': 9292,
'backend_port': 9292,
'mon_type': '/' + PART + '/RPC-MON-EXT-ENDPOINT',
'group': 'glance_api',
'make_public': True,
'hosts': []
},
'glance_registry': {
'port': 9191,
'backend_port': 9191,
'mon_type': '/' + PART + '/RPC-MON-EXT-ENDPOINT',
'group': 'glance_registry',
'hosts': []
},
'heat_api_cfn': {
'port': 8000,
'backend_port': 8000,
'mon_type': '/' + PART + '/' + PREFIX_NAME + '_MON_TCP_HEAT_API_CFN',
'group': 'heat_api_cfn',
'make_public': True,
'hosts': []
},
'heat_api_cloudwatch': {
'port': 8003,
'backend_port': 8003,
'mon_type': '/' + PART + '/' + PREFIX_NAME + '_MON_TCP_HEAT_API_CLOUDWATCH',
'group': 'heat_api_cloudwatch',
'make_public': True,
'hosts': []
},
'heat_api': {
'port': 8004,
'backend_port': 8004,
'mon_type': '/' + PART + '/RPC-MON-EXT-ENDPOINT',
'group': 'heat_api',
'make_public': True,
'hosts': []
},
'keystone_admin': {
'port': 35357,
'backend_port': 35357,
'mon_type': '/' + PART + '/' + PREFIX_NAME + '_MON_HTTP_KEYSTONE_ADMIN',
'group': 'keystone',
'hosts': []
},
'keystone_service': {
'port': 5000,
'backend_port': 5000,
'mon_type': '/' + PART + '/RPC-MON-EXT-ENDPOINT',
'group': 'keystone',
'make_public': True,
'hosts': []
},
'neutron_server': {
'port': 9696,
'backend_port': 9696,
'mon_type': '/' + PART + '/RPC-MON-EXT-ENDPOINT',
'group': 'neutron_server',
'make_public': True,
'hosts': []
},
'nova_api_ec2': {
'port': 8773,
'backend_port': 8773,
'mon_type': '/' + PART + '/' + PREFIX_NAME + '_MON_TCP_NOVA_API_EC2',
'group': 'nova_api_os_compute',
'make_public': True,
'hosts': []
},
'nova_api_metadata': {
'port': 8775,
'backend_port': 8775,
'mon_type': '/' + PART + '/' + PREFIX_NAME + '_MON_HTTP_NOVA_API_METADATA',
'group': 'nova_api_metadata',
'hosts': []
},
'nova_api_os_compute': {
'port': 8774,
'backend_port': 8774,
'mon_type': '/' + PART + '/RPC-MON-EXT-ENDPOINT',
'group': 'nova_api_os_compute',
'make_public': True,
'hosts': []
},
'nova_spice_console': {
'port': 6082,
'backend_port': 6082,
'mon_type': '/' + PART + '/' + PREFIX_NAME + '_MON_HTTP_NOVA_SPICE_CONSOLE',
'group': 'nova_console',
'hosts': [],
'ssl_impossible': True,
'make_public': True,
'persist': True
},
'cinder_api': {
'port': 8776,
'backend_port': 8776,
'mon_type': '/' + PART + '/RPC-MON-EXT-ENDPOINT',
'group': 'cinder_api',
'make_public': True,
'hosts': []
},
'horizon': {
'port': 80,
'backend_port': 80,
'mon_type': '/' + PART + '/' + PREFIX_NAME + '_MON_HTTP_HORIZON',
'group': 'horizon',
'hosts': [],
},
'horizon_ssl': {
'port': 443,
'backend_port': 443,
'mon_type': '/' + PART + '/' + PREFIX_NAME + '_MON_HTTPS_HORIZON_SSL',
'group': 'horizon',
'hosts': [],
'make_public': True,
'persist': True,
'backend_ssl': True
},
'elasticsearch': {
'port': 9200,
'backend_port': 9200,
'mon_type': '/' + PART + '/' + PREFIX_NAME + '_MON_TCP_ELASTICSEARCH',
'group': 'elasticsearch',
'hosts': []
},
'kibana': {
'port': 8888,
'backend_port': 80,
'mon_type': '/' + PART + '/' + PREFIX_NAME + '_MON_TCP_KIBANA',
'group': 'kibana',
'priority': True,
'hosts': []
},
'kibana_ssl': {
'port': 8443,
'backend_port': 8443,
'mon_type': '/' + PART + '/' + PREFIX_NAME + '_MON_TCP_KIBANA_SSL',
'group': 'kibana',
'priority': True,
'hosts': [],
'make_public': True,
'persist': True,
'backend_ssl': True
},
'swift': {
'port': 8080,
'backend_port': 8080,
'mon_type': '/' + PART + '/RPC-MON-EXT-ENDPOINT',
'group': 'swift_proxy',
'make_public': True,
'hosts': []
},
'repo': {
'port': 8181,
'backend_port': 8181,
'mon_type': '/' + PART + '/' + PREFIX_NAME + '_MON_HTTP_REPO',
'group': 'pkg_repo',
'priority': True,
'hosts': []
}
}
def recursive_host_get(inventory, group_name, host_dict=None):
if host_dict is None:
host_dict = {}
inventory_group = inventory.get(group_name)
if not inventory_group:
print('Inventory group "%s" not found, skipping.' % group_name)
return host_dict
if 'children' in inventory_group and inventory_group['children']:
for child in inventory_group['children']:
recursive_host_get(
inventory=inventory, group_name=child, host_dict=host_dict
)
if inventory_group.get('hosts'):
for host in inventory_group['hosts']:
if host not in host_dict['hosts']:
ca = inventory['_meta']['hostvars'][host]['container_address']
node = {
'hostname': host,
'container_address': ca
}
host_dict['hosts'].append(node)
return host_dict
def build_pool_parts(inventory):
for key, value in POOL_PARTS.iteritems():
recursive_host_get(
inventory, group_name=value['group'], host_dict=value
)
return POOL_PARTS
def file_find(filename, user_file=None, pass_exception=False):
"""Return the path to a file.
If no file is found the system will exit.
The file lookup will be done in the following directories:
/etc/openstack_deploy/
$HOME/openstack_deploy/
$(pwd)/openstack_deploy/
:param filename: ``str`` Name of the file to find
:param user_file: ``str`` Additional localtion to look in FIRST for a file
"""
file_check = [
os.path.join(
'/etc', 'openstack_deploy', filename
),
os.path.join(
os.environ.get('HOME'), 'openstack_deploy', filename
),
os.path.join(
os.getcwd(), filename
)
]
if user_file is not None:
file_check.insert(0, os.path.expanduser(user_file))
for f in file_check:
if os.path.isfile(f):
return f
else:
if pass_exception is False:
raise SystemExit('No file found at: %s' % file_check)
else:
return False
def args():
"""Setup argument Parsing."""
parser = argparse.ArgumentParser(
usage='%(prog)s',
description='Rackspace Openstack, Inventory Generator',
epilog='Inventory Generator Licensed "Apache 2.0"')
parser.add_argument(
'-f',
'--file',
help='Inventory file. Default: [ %(default)s ]',
required=False,
default='openstack_inventory.json'
)
parser.add_argument(
'-s',
'--snat-pool-address',
help='LB Main SNAT pool address for [ RPC_SNATPOOL ], for'
' multiple snat pool addresses comma seperate the ip'
' addresses. By default this IP will be .15 from within your'
' containers_cidr as found within inventory.',
required=False,
default=None
)
parser.add_argument(
'--limit-source',
help='Limit available connections to the source IP for all source'
' limited entries.',
required=False,
default=None
)
parser.add_argument(
'--ssl-public-ip',
help='Public IP address for the F5 to use.',
required=False,
default=None
)
parser.add_argument(
'--ssl-domain-name',
help='Name of the domain that will have an ssl cert.',
required=False,
default=None
)
parser.add_argument(
'--sec-host-network',
help='Security host network address and netmask.'
' EXAMPLE: "192.168.1.1:255.255.255.0"',
required=False,
default=None
)
parser.add_argument(
'--sec-container-network',
help='Security container network address and netmask.'
' EXAMPLE: "192.168.1.1:255.255.255.0"',
required=False,
default=None
)
parser.add_argument(
'--sec-public-vlan-name',
help='Security container network address and netmask.'
' EXAMPLE: "192.168.1.1:255.255.255.0"',
required=False,
default=None
)
parser.add_argument(
'--galera-monitor-user',
help='Name of the user that will be available for the F5 to pull when'
' monitoring Galera.',
required=False,
default='openstack'
)
parser.add_argument(
'--print',
help='Print the script to screen, as well as write it out',
required=False,
default=False,
action='store_true'
)
parser.add_argument(
'-e',
'--export',
help='Export the generated F5 configuration script.'
' Default: [ %(default)s ]',
required=False,
default=os.path.join(
os.path.expanduser('~/'), 'rpc_f5_config.sh'
)
)
parser.add_argument(
'-S',
'--Superman',
help='Yes, its Superman ... strange visitor from another planet,'
'who came to Earth with powers and abilities far beyond those of mortal men! '
'Superman ... who can change the course of mighty rivers, bend steel in his bare hands,'
'and who, disguised as Clark Kent, mild-mannered reporter for a great metropolitan newspaper,'
'fights a never-ending battle for truth, justice, and the American way!',
required=False,
default=False,
action='store_true'
)
return vars(parser.parse_args())
def main():
"""Run the main application."""
# Parse user args
user_args = args()
# Get the contents of the system environment json
environment_file = file_find(filename=user_args['file'])
with open(environment_file, 'rb') as f:
inventory_json = json.loads(f.read())
commands = []
nodes = []
pools = []
virts = []
sslvirts = []
pubvirts = []
commands.extend([
'### CREATE SECURITY iRULE ###',
'create ltm rule /' + PART + '/' + PREFIX_NAME + '_DISCARD_ALL',
' --> Copy and Paste the following between pre-included curly brackets <--',
'when CLIENT_ACCEPTED { discard }\n',
'### CREATE EXTERNAL MONITOR ###',
' --> Upload External monitor file to disk <--',
' run util bash',
' cd /config/monitors/',
' vi RPC-MON-EXT-ENDPOINT.monitor',
' --> Copy and Paste the External monitor into vi <--',
' create sys file external-monitor /' + PART + '/RPC-MON-EXT-ENDPOINT { source-path file:///config/monitors/RPC-MON-EXT-ENDPOINT.monitor }',
' save sys config',
' create ltm monitor external /' + PART + '/RPC-MON-EXT-ENDPOINT { interval 20 timeout 61 run /' + PART + '/RPC-MON-EXT-ENDPOINT }\n'
])
if user_args['ssl_domain_name']:
commands.extend([
'### UPLOAD SSL CERT KEY PAIR ###',
'cd /RPC',
'install sys crypto cert /' + PART + '/%(ssl_domain_name)s.crt from-editor'
% user_args,
' --> Copy and Paste provided domain cert for public api endpoint <--',
'install sys crypto key /' + PART + '/%(ssl_domain_name)s.key from-editor'
% user_args,
' --> Copy and Paste provided domain key for public api endpoint <--',
'cd /Common\n',
'### CREATE SSL PROFILES ###',
('create ltm profile client-ssl'
' /' + PART + '/' + PREFIX_NAME + '_PROF_SSL_%(ssl_domain_name)s'
' { cert /' + PART + '/%(ssl_domain_name)s.crt key'
' /' + PART + '/%(ssl_domain_name)s.key defaults-from clientssl }')
% user_args,
'create ltm profile server-ssl /' + PART + '/' + PREFIX_NAME + '_PROF_SSL_SERVER { defaults-from /Common/serverssl }\n'
% user_args,
])
if user_args['Superman']:
print " ************************** "
print " .*##*:*####***:::**###*:######*. "
print " *##: .###* *######:,##* "
print " *##: :####: *####*. :##: "
print " *##,:########**********:, :##: "
print " .#########################*, *#* "
print " *#########################*##: "
print " *##, ..,,::**#####: "
print " ,##*,*****, *##* "
print " *#########*########: "
print " *##*:*******###* "
print " .##*. ,##* "
print " :##* *##, "
print " *####: "
print " :, "
# Kal-El
# SUPERMAN
# JNA
pool_parts = build_pool_parts(inventory=inventory_json)
lb_vip_address = inventory_json['all']['vars']['internal_lb_vip_address']
for key, value in pool_parts.iteritems():
value['group_name'] = key.upper()
value['vs_name'] = '%s_VS_%s' % (
PREFIX_NAME, value['group_name']
)
value['pool_name'] = '%s_POOL_%s' % (
PREFIX_NAME, value['group_name']
)
node_data = []
priority = 100
for node in value['hosts']:
node['node_name'] = '%s_NODE_%s' % (PREFIX_NAME, node['hostname'])
nodes.append(NODES % node)
if value.get('persist'):
persist = PERSIST_OPTION
else:
persist = str()
virtual_dict = {
'port': value['port'],
'vs_name': value['vs_name'],
'pool_name': value['pool_name'],
'internal_lb_vip_address': lb_vip_address,
'persist': persist,
'ssl_domain_name': user_args['ssl_domain_name'],
'ssl_public_ip': user_args['ssl_public_ip'],
}
##########################################
virt = '%s' % VIRTUAL_ENTRIES % virtual_dict
if virt not in virts:
virts.append(virt)
if user_args['ssl_public_ip']:
if not value.get('backend_ssl'):
virtual_dict['ssl_profiles'] = (
'/' + PART + '/' + PREFIX_NAME + '_PROF_SSL_%(ssl_domain_name)s { context clientside }'
) % user_args
else:
virtual_dict['ssl_profiles'] = '/' + PART + '/' + PREFIX_NAME + '_PROF_SSL_SERVER { context serverside } /' + PART + '/' + PREFIX_NAME + '_PROF_SSL_%(ssl_domain_name)s { context clientside }'% user_args
if value.get('make_public'):
if value.get ('ssl_impossible'):
virtual_dict['vs_name'] = '%s_VS_%s' % (
'RPC_PUB', value['group_name']
)
pubvirt = (
'%s\n'
) % PUB_NONSSL_VIRTUAL_ENTRIES % virtual_dict
if pubvirt not in pubvirts:
pubvirts.append(pubvirt)
else:
virtual_dict['vs_name'] = '%s_VS_%s' % (
'RPC_PUB_SSL', value['group_name']
)
sslvirt = '%s' % PUB_SSL_VIRTUAL_ENTRIES % virtual_dict
if sslvirt not in sslvirts:
sslvirts.append(sslvirt)
if value.get('priority') is True:
node_data.append(
'%s:%s %s' % (
node['node_name'],
value['backend_port'],
PRIORITY_ENTRY % {'priority_int': priority}
)
)
priority -= 5
else:
node_data.append(
'%s:%s' % (
node['node_name'],
value['backend_port']
)
)
##########################################
value['nodes'] = ' '.join(node_data)
pool_node = [POOL_NODE['beginning'] % value]
if value.get('priority') is True:
pool_node.append(POOL_NODE['priority'])
pool_node.append(POOL_NODE['end'] % value)
pools.append('%s' % ' '.join(pool_node))
# define the SNAT pool address
snat_pool_adds = user_args.get('snat_pool_address')
if snat_pool_adds is None:
container_cidr = inventory_json['all']['vars']['container_cidr']
network = netaddr.IPNetwork(container_cidr)
snat_pool_adds = str(network[15])
snat_pool_addresses = ' '.join(snat_pool_adds.split(','))
snat_pool = '%s\n' % SNAT_POOL % {
'snat_pool_addresses': snat_pool_addresses
}
script = [
'#!/usr/bin/bash\n',
r'### CREATE RPC PARTITION ###',
'create auth partition %s\n' % PART,
r'### SET DISPLAY PORT NUMBERS ###',
'modify cli global-settings service number\n',
snat_pool
]
script.extend(['### CREATE MONITORS ###'])
script.extend(['%s' % i % user_args for i in MONITORS])
script.extend(['%s' % i for i in commands])
script.extend(['### CREATE PERSISTENCE PROFILES ###'])
script.extend(['%s' % i % user_args for i in PERSISTANCE])
script.extend(['### CREATE NODES ###'])
script.extend(['%s' % i % user_args for i in nodes])
script.extend(['\n### CREATE POOLS ###'])
script.extend(pools)
script.extend(['\n### CREATE VIRTUAL SERVERS ###'])
script.extend(virts)
script.extend(['\n### CREATE PUBLIC SSL OFFLOADED VIRTUAL SERVERS ###'])
script.extend(sslvirts)
script.extend(['\n### CREATE PUBLIC SSL PASS-THROUGH VIRTUAL SERVERS ###'])
script.extend(pubvirts)
if user_args['sec_host_network']:
hostnet, netmask = user_args['sec_host_network'].split(':')
if not user_args['sec_public_vlan_name']:
raise SystemExit('Please set the [ --sec-public-vlan-name ] value')
script.append(
SEC_HOSTNET_VIRTUAL_ENTRIES % {
'sec_host_net': hostnet,
'sec_host_netmask': netmask,
'sec_public_vlan_name': user_args['sec_public_vlan_name']
}
)
if user_args['sec_container_network']:
hostnet, netmask = user_args['sec_container_network'].split(':')
script.append(
SEC_CONTAINER_VIRTUAL_ENTRIES % {
'sec_container_net': hostnet,
'sec_container_netmask': netmask
}
)
script.extend(['%s\n' % i for i in END_COMMANDS])
if user_args['print']:
for i in script:
print(i)
with open(user_args['export'], 'w+') as f:
f.writelines("\n".join(script))
if __name__ == "__main__":
main()
|
|
import re
import six
import json
import uuid
import traceback
from peewee import (
BigIntegerField, ForeignKeyField, TextField, DateTimeField,
BooleanField, UUIDField
)
from datetime import datetime, timedelta
from playhouse.postgres_ext import BinaryJSONField, ArrayField
from disco.types.base import UNSET
from rowboat import REV
from rowboat.util import default_json
from rowboat.models.user import User
from rowboat.sql import BaseModel
EMOJI_RE = re.compile(r'<:.+:([0-9]+)>')
@BaseModel.register
class Message(BaseModel):
id = BigIntegerField(primary_key=True)
channel_id = BigIntegerField()
guild_id = BigIntegerField(null=True)
author = ForeignKeyField(User)
content = TextField()
timestamp = DateTimeField()
edited_timestamp = DateTimeField(null=True, default=None)
deleted = BooleanField(default=False)
num_edits = BigIntegerField(default=0)
command = TextField(null=True)
mentions = ArrayField(BigIntegerField, default=[], null=True)
emojis = ArrayField(BigIntegerField, default=[], null=True)
attachments = ArrayField(TextField, default=[], null=True)
embeds = BinaryJSONField(default=[], null=True)
SQL = '''
CREATE INDEX\
IF NOT EXISTS messages_content_fts ON messages USING gin(to_tsvector('english', content));
CREATE INDEX\
IF NOT EXISTS messages_mentions ON messages USING gin (mentions);
'''
class Meta:
db_table = 'messages'
indexes = (
# These indexes are mostly just general use
(('channel_id', ), False),
(('guild_id', ), False),
(('deleted', ), False),
# Timestamp is regularly sorted on
(('timestamp', ), False),
# Some queries want to get history in a guild or channel
(('author', 'guild_id', 'channel_id'), False),
)
@classmethod
def from_disco_message_update(cls, obj):
if not obj.edited_timestamp:
return
to_update = {
'edited_timestamp': obj.edited_timestamp,
'num_edits': cls.num_edits + 1,
'mentions': list(obj.mentions.keys()),
}
if obj.content is not UNSET:
to_update['content'] = obj.with_proper_mentions
to_update['emojis'] = list(map(int, EMOJI_RE.findall(obj.content)))
if obj.attachments is not UNSET:
to_update['attachments'] = [i.url for i in obj.attachments.values()]
if obj.embeds is not UNSET:
to_update['embeds'] = [json.dumps(i.to_dict(), default=default_json) for i in obj.embeds]
cls.update(**to_update).where(cls.id == obj.id).execute()
@classmethod
def from_disco_message(cls, obj):
_, created = cls.get_or_create(
id=obj.id,
defaults=dict(
channel_id=obj.channel_id,
guild_id=(obj.guild and obj.guild.id),
author=User.from_disco_user(obj.author),
content=obj.with_proper_mentions,
timestamp=obj.timestamp,
edited_timestamp=obj.edited_timestamp,
num_edits=(0 if not obj.edited_timestamp else 1),
mentions=list(obj.mentions.keys()),
emojis=list(map(int, EMOJI_RE.findall(obj.content))),
attachments=[i.url for i in obj.attachments.values()],
embeds=[json.dumps(i.to_dict(), default=default_json) for i in obj.embeds]))
for user in obj.mentions.values():
User.from_disco_user(user)
return created
@classmethod
def from_disco_message_many(cls, messages, safe=False):
q = cls.insert_many(map(cls.convert_message, messages))
if safe:
q = q.on_conflict('DO NOTHING')
return q.execute()
@staticmethod
def convert_message(obj):
return {
'id': obj.id,
'channel_id': obj.channel_id,
'guild_id': (obj.guild and obj.guild.id),
'author': User.from_disco_user(obj.author),
'content': obj.with_proper_mentions,
'timestamp': obj.timestamp,
'edited_timestamp': obj.edited_timestamp,
'num_edits': (0 if not obj.edited_timestamp else 1),
'mentions': list(obj.mentions.keys()),
'emojis': list(map(int, EMOJI_RE.findall(obj.content))),
'attachments': [i.url for i in obj.attachments.values()],
'embeds': [json.dumps(i.to_dict(), default=default_json) for i in obj.embeds],
}
@classmethod
def for_channel(cls, channel):
return cls.select().where(cls.channel_id == channel.id)
@BaseModel.register
class Reaction(BaseModel):
message_id = BigIntegerField()
user_id = BigIntegerField()
emoji_id = BigIntegerField(null=True)
emoji_name = TextField()
class Meta:
db_table = 'reactions'
indexes = (
(('message_id', 'user_id', 'emoji_id', 'emoji_name'), True),
(('user_id', ), False),
(('emoji_name', 'emoji_id', ), False),
)
@classmethod
def from_disco_reactors(cls, message_id, reaction, user_ids):
cls.insert_many([
{
'message_id': message_id,
'user_id': i,
'emoji_id': reaction.emoji.id or None,
'emoji_name': reaction.emoji.name or None
} for i in user_ids
]).on_conflict('DO NOTHING').execute()
@classmethod
def from_disco_reaction(cls, obj):
return cls.create(
message_id=obj.message_id,
user_id=obj.user_id,
emoji_id=obj.emoji.id or None,
emoji_name=obj.emoji.name or None)
@BaseModel.register
class MessageArchive(BaseModel):
FORMATS = ['txt', 'csv', 'json']
archive_id = UUIDField(primary_key=True, default=uuid.uuid4)
message_ids = BinaryJSONField()
created_at = DateTimeField(default=datetime.utcnow)
expires_at = DateTimeField(default=lambda: datetime.utcnow() + timedelta(days=7))
class Meta:
db_table = 'message_archives'
indexes = (
(('created_at', ), False),
(('expires_at', ), False)
)
@classmethod
def create_from_message_ids(cls, message_ids):
return cls.create(message_ids=message_ids)
@property
def url(self):
# TODO: use web endpoint here
return 'https://dashboard.rowboat.party/archive/{}.txt'.format(self.archive_id)
def encode(self, fmt='txt'):
from rowboat.models.user import User
if fmt not in self.FORMATS:
raise Exception('Invalid format {}'.format(fmt))
q = Message.select(
Message.id,
Message.channel_id,
Message.timestamp,
Message.content,
Message.deleted,
Message.attachments,
User
).join(
User
).where(
(Message.id << self.message_ids)
)
if fmt == 'txt':
return u'\n'.join(map(self.encode_message_text, q))
elif fmt == 'csv':
return u'\n'.join([
'id,channel_id,timestamp,author_id,author,content,deleted,attachments'
] + map(self.encode_message_csv, q))
elif fmt == 'json':
return json.dumps({
'messages': map(self.encode_message_json, q)
})
@staticmethod
def encode_message_text(msg):
return u'{m.timestamp} ({m.id} / {m.channel_id} / {m.author.id}) {m.author}: {m.content} ({attach})'.format(
m=msg, attach=', '.join(map(unicode, msg.attachments or [])))
@staticmethod
def encode_message_csv(msg):
def wrap(i):
return u'"{}"'.format(six.text_type(i).replace('"', '""'))
return ','.join(map(wrap, [
msg.id,
msg.timestamp,
msg.author.id,
msg.author,
msg.content,
str(msg.deleted).lower(),
' '.join(msg.attachments or [])
]))
@staticmethod
def encode_message_json(msg):
return dict(
id=str(msg.id),
timestamp=str(msg.timestamp),
author_id=str(msg.author.id),
username=msg.author.username,
discriminator=msg.author.discriminator,
content=msg.content,
deleted=msg.deleted,
attachments=msg.attachments)
@BaseModel.register
class StarboardEntry(BaseModel):
message = ForeignKeyField(Message, primary_key=True)
# Information on where this starboard message lies
star_channel_id = BigIntegerField(null=True)
star_message_id = BigIntegerField(null=True)
# List of user ids who stared this message, not guarenteed to be accurate
stars = ArrayField(BigIntegerField, default=[])
# List of user ids who starred this message, but are blocked
blocked_stars = ArrayField(BigIntegerField, default=[])
blocked = BooleanField(default=False)
dirty = BooleanField(default=False)
SQL = '''
CREATE INDEX\
IF NOT EXISTS starboard_entries_stars ON starboard_entries USING gin (stars);
'''
class Meta:
db_table = 'starboard_entries'
indexes = (
(('star_channel_id', 'star_message_id'), True),
)
@classmethod
def add_star(cls, message_id, user_id):
sql = '''
INSERT INTO starboard_entries (message_id, stars, blocked_stars, blocked, dirty)
VALUES (%s, ARRAY[%s], ARRAY[]::integer[], false, true)
ON CONFLICT (message_id)
DO UPDATE
SET stars = array_append(starboard_entries.stars, %s), dirty = true
WHERE NOT starboard_entries.stars @> ARRAY[%s]
'''
cls.raw(sql, message_id, user_id, user_id, user_id).execute()
@classmethod
def remove_star(cls, message_id, user_id):
sql = '''
UPDATE starboard_entries
SET
stars = array_remove(stars, %s),
blocked_stars = array_remove(stars, %s),
dirty = true
WHERE message_id=%s AND starboard_entries.stars @> ARRAY[%s]
'''
cls.raw(sql, user_id, user_id, message_id, user_id).execute()
@classmethod
def block_user(cls, user_id):
sql = '''
UPDATE starboard_entries
SET stars = array_remove(stars, %s),
blocked_stars = array_append(blocked_stars, %s),
WHERE starboard_entries.stars @> ARRAY[%s]
'''
cls.raw(sql, user_id, user_id, user_id)
StarboardEntry.update(
blocked=True,
).where(
(StarboardEntry.message_id << (
StarboardEntry.select().join(Message).where(
(Message.author_id == user_id)
)
))
).execute()
@classmethod
def unblock_user(cls, user_id):
sql = '''
UPDATE starboard_entries
SET stars = array_append(stars, %s),
blocked_stars = array_remove(blocked_stars, %s),
dirty = true
WHERE starboard_entries.stars @> ARRAY[%s]
'''
cls.raw(sql, user_id, user_id, user_id)
StarboardEntry.update(
dirty=True,
blocked=False,
).where(
(StarboardEntry.message_id << (
StarboardEntry.select().join(Message).where(
(Message.author_id == user_id)
)
)) & (StarboardEntry.blocked == 1)
).execute()
@BaseModel.register
class Reminder(BaseModel):
message_id = BigIntegerField(primary_key=True)
created_at = DateTimeField(default=datetime.utcnow)
remind_at = DateTimeField()
content = TextField()
class Meta:
db_table = 'reminders'
@classmethod
def with_message_join(cls, fields=None):
return cls.select(
*(fields or (Reminder, Message))
).join(Message, on=(
Reminder.message_id == Message.id
))
@classmethod
def count_for_user(cls, user_id):
return cls.with_message_join().where(
(Message.author_id == user_id)
).count()
@classmethod
def delete_for_user(cls, user_id):
return cls.delete().where(
(cls.message_id << cls.with_message_join((Message.id, )).where(
Message.author_id == user_id
))
).execute()
@BaseModel.register
class Command(BaseModel):
message_id = BigIntegerField(primary_key=True)
plugin = TextField()
command = TextField()
version = TextField()
success = BooleanField()
traceback = TextField(null=True)
class Meta:
db_table = 'commands'
indexes = (
(('success', ), False),
(('plugin', 'command'), False),
)
@classmethod
def track(cls, event, command, exception=False):
cls.create(
message_id=event.message.id,
plugin=command.plugin.name,
command=command.name,
version=REV,
success=not exception,
traceback=traceback.format_exc() if exception else None,
)
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Driver for Linux servers running LVM.
"""
import math
import os
import socket
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import units
import six
from cinder.brick.local_dev import lvm
from cinder import exception
from cinder.i18n import _
from cinder.image import image_utils
from cinder import interface
from cinder import utils
from cinder.volume import configuration
from cinder.volume import driver
from cinder.volume import utils as volutils
LOG = logging.getLogger(__name__)
# FIXME(jdg): We'll put the lvm_ prefix back on these when we
# move over to using this as the real LVM driver, for now we'll
# rename them so that the config generation utility doesn't barf
# on duplicate entries.
volume_opts = [
cfg.StrOpt('volume_group',
default='cinder-volumes',
help='Name for the VG that will contain exported volumes'),
cfg.IntOpt('lvm_mirrors',
default=0,
help='If >0, create LVs with multiple mirrors. Note that '
'this requires lvm_mirrors + 2 PVs with available space'),
cfg.StrOpt('lvm_type',
default='auto',
choices=['default', 'thin', 'auto'],
help='Type of LVM volumes to deploy; (default, thin, or auto). '
'Auto defaults to thin if thin is supported.'),
cfg.StrOpt('lvm_conf_file',
default='/etc/cinder/lvm.conf',
help='LVM conf file to use for the LVM driver in Cinder; '
'this setting is ignored if the specified file does '
'not exist (You can also specify \'None\' to not use '
'a conf file even if one exists).'),
cfg.FloatOpt('lvm_max_over_subscription_ratio',
# This option exists to provide a default value for the
# LVM driver which is different than the global default.
deprecated_for_removal=True,
deprecated_since="12.0.0",
deprecated_reason='Oversubscription ratio should now be '
'set using the common max_over_subscription'
'_ratio config option instead.',
default=None,
help='max_over_subscription_ratio setting for the LVM '
'driver. If set to None (the default), the general max_'
'over_subscription_ratio is used.'),
cfg.BoolOpt('lvm_suppress_fd_warnings',
default=False,
help='Suppress leaked file descriptor warnings in LVM '
'commands.')
]
CONF = cfg.CONF
CONF.register_opts(volume_opts, group=configuration.SHARED_CONF_GROUP)
@interface.volumedriver
class LVMVolumeDriver(driver.VolumeDriver):
"""Executes commands relating to Volumes."""
VERSION = '3.0.0'
# ThirdPartySystems wiki page
CI_WIKI_NAME = "Cinder_Jenkins"
def __init__(self, vg_obj=None, *args, **kwargs):
# Parent sets db, host, _execute and base config
super(LVMVolumeDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(volume_opts)
self.hostname = socket.gethostname()
self.vg = vg_obj
self.backend_name =\
self.configuration.safe_get('volume_backend_name') or 'LVM'
# Target Driver is what handles data-transport
# Transport specific code should NOT be in
# the driver (control path), this way
# different target drivers can be added (iscsi, FC etc)
target_driver = \
self.target_mapping[self.configuration.safe_get('target_helper')]
LOG.debug('Attempting to initialize LVM driver with the '
'following target_driver: %s',
target_driver)
self.target_driver = importutils.import_object(
target_driver,
configuration=self.configuration,
db=self.db,
executor=self._execute)
self.protocol = self.target_driver.protocol
self._sparse_copy_volume = False
if self.configuration.lvm_max_over_subscription_ratio is not None:
self.configuration.max_over_subscription_ratio = \
self.configuration.lvm_max_over_subscription_ratio
def _sizestr(self, size_in_g):
return '%sg' % size_in_g
def _volume_not_present(self, volume_name):
return self.vg.get_volume(volume_name) is None
def _delete_volume(self, volume, is_snapshot=False):
"""Deletes a logical volume."""
if self.configuration.volume_clear != 'none' and \
self.configuration.lvm_type != 'thin':
self._clear_volume(volume, is_snapshot)
name = volume['name']
if is_snapshot:
name = self._escape_snapshot(volume['name'])
self.vg.delete(name)
def _clear_volume(self, volume, is_snapshot=False):
# zero out old volumes to prevent data leaking between users
# TODO(ja): reclaiming space should be done lazy and low priority
if is_snapshot:
# if the volume to be cleared is a snapshot of another volume
# we need to clear out the volume using the -cow instead of the
# directly volume path. We need to skip this if we are using
# thin provisioned LVs.
# bug# lp1191812
dev_path = self.local_path(volume) + "-cow"
else:
dev_path = self.local_path(volume)
# TODO(jdg): Maybe we could optimize this for snaps by looking at
# the cow table and only overwriting what's necessary?
# for now we're still skipping on snaps due to hang issue
if not os.path.exists(dev_path):
msg = (_('Volume device file path %s does not exist.')
% dev_path)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
size_in_g = (volume.get('volume_size') if is_snapshot
else volume.get('size'))
if size_in_g is None:
msg = (_("Size for volume: %s not found, cannot secure delete.")
% volume['id'])
LOG.error(msg)
raise exception.InvalidParameterValue(msg)
# clear_volume expects sizes in MiB, we store integer GiB
# be sure to convert before passing in
vol_sz_in_meg = size_in_g * units.Ki
volutils.clear_volume(
vol_sz_in_meg, dev_path,
volume_clear=self.configuration.volume_clear,
volume_clear_size=self.configuration.volume_clear_size)
def _escape_snapshot(self, snapshot_name):
# Linux LVM reserves name that starts with snapshot, so that
# such volume name can't be created. Mangle it.
if not snapshot_name.startswith('snapshot'):
return snapshot_name
return '_' + snapshot_name
def _unescape_snapshot(self, snapshot_name):
# Undo snapshot name change done by _escape_snapshot()
if not snapshot_name.startswith('_snapshot'):
return snapshot_name
return snapshot_name[1:]
def _create_volume(self, name, size, lvm_type, mirror_count, vg=None):
vg_ref = self.vg
if vg is not None:
vg_ref = vg
vg_ref.create_volume(name, size, lvm_type, mirror_count)
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
LOG.debug("Updating volume stats")
if self.vg is None:
LOG.warning('Unable to update stats on non-initialized '
'Volume Group: %s',
self.configuration.volume_group)
return
self.vg.update_volume_group_info()
data = {}
# Note(zhiteng): These information are driver/backend specific,
# each driver may define these values in its own config options
# or fetch from driver specific configuration file.
data["volume_backend_name"] = self.backend_name
data["vendor_name"] = 'Open Source'
data["driver_version"] = self.VERSION
data["storage_protocol"] = self.protocol
data["pools"] = []
total_capacity = 0
free_capacity = 0
if self.configuration.lvm_mirrors > 0:
total_capacity =\
self.vg.vg_mirror_size(self.configuration.lvm_mirrors)
free_capacity =\
self.vg.vg_mirror_free_space(self.configuration.lvm_mirrors)
provisioned_capacity = round(
float(total_capacity) - float(free_capacity), 2)
elif self.configuration.lvm_type == 'thin':
total_capacity = self.vg.vg_thin_pool_size
free_capacity = self.vg.vg_thin_pool_free_space
provisioned_capacity = self.vg.vg_provisioned_capacity
else:
total_capacity = self.vg.vg_size
free_capacity = self.vg.vg_free_space
provisioned_capacity = round(
float(total_capacity) - float(free_capacity), 2)
location_info = \
('LVMVolumeDriver:%(hostname)s:%(vg)s'
':%(lvm_type)s:%(lvm_mirrors)s' %
{'hostname': self.hostname,
'vg': self.configuration.volume_group,
'lvm_type': self.configuration.lvm_type,
'lvm_mirrors': self.configuration.lvm_mirrors})
thin_enabled = self.configuration.lvm_type == 'thin'
# Calculate the total volumes used by the VG group.
# This includes volumes and snapshots.
total_volumes = len(self.vg.get_volumes())
# Skip enabled_pools setting, treat the whole backend as one pool
# XXX FIXME if multipool support is added to LVM driver.
single_pool = {}
single_pool.update(dict(
pool_name=data["volume_backend_name"],
total_capacity_gb=total_capacity,
free_capacity_gb=free_capacity,
reserved_percentage=self.configuration.reserved_percentage,
location_info=location_info,
QoS_support=False,
provisioned_capacity_gb=provisioned_capacity,
max_over_subscription_ratio=(
self.configuration.max_over_subscription_ratio),
thin_provisioning_support=thin_enabled,
thick_provisioning_support=not thin_enabled,
total_volumes=total_volumes,
filter_function=self.get_filter_function(),
goodness_function=self.get_goodness_function(),
multiattach=True,
backend_state='up'
))
data["pools"].append(single_pool)
data["shared_targets"] = False
# Check availability of sparse volume copy.
data['sparse_copy_volume'] = self._sparse_copy_volume
self._stats = data
def check_for_setup_error(self):
"""Verify that requirements are in place to use LVM driver."""
if self.vg is None:
root_helper = utils.get_root_helper()
lvm_conf_file = self.configuration.lvm_conf_file
if lvm_conf_file.lower() == 'none':
lvm_conf_file = None
try:
lvm_type = self.configuration.lvm_type
if lvm_type == 'auto':
if volutils.supports_thin_provisioning():
lvm_type = 'thin'
else:
lvm_type = 'default'
self.vg = lvm.LVM(
self.configuration.volume_group,
root_helper,
lvm_type=lvm_type,
executor=self._execute,
lvm_conf=lvm_conf_file,
suppress_fd_warn=(
self.configuration.lvm_suppress_fd_warnings))
except exception.VolumeGroupNotFound:
message = (_("Volume Group %s does not exist") %
self.configuration.volume_group)
raise exception.VolumeBackendAPIException(data=message)
vg_list = volutils.get_all_volume_groups(
self.configuration.volume_group)
vg_dict = \
next(vg for vg in vg_list if vg['name'] == self.vg.vg_name)
if vg_dict is None:
message = (_("Volume Group %s does not exist") %
self.configuration.volume_group)
raise exception.VolumeBackendAPIException(data=message)
pool_name = "%s-pool" % self.configuration.volume_group
if self.configuration.lvm_type == 'auto':
# Default to thin provisioning if it is supported and
# the volume group is empty, or contains a thin pool
# for us to use.
self.vg.update_volume_group_info()
self.configuration.lvm_type = 'default'
if volutils.supports_thin_provisioning():
if self.vg.get_volume(pool_name) is not None:
LOG.info('Enabling LVM thin provisioning by default '
'because a thin pool exists.')
self.configuration.lvm_type = 'thin'
elif len(self.vg.get_volumes()) == 0:
LOG.info('Enabling LVM thin provisioning by default '
'because no LVs exist.')
self.configuration.lvm_type = 'thin'
if self.configuration.lvm_type == 'thin':
# Specific checks for using Thin provisioned LV's
if not volutils.supports_thin_provisioning():
message = _("Thin provisioning not supported "
"on this version of LVM.")
raise exception.VolumeBackendAPIException(data=message)
if self.vg.get_volume(pool_name) is None:
try:
self.vg.create_thin_pool(pool_name)
except processutils.ProcessExecutionError as exc:
exception_message = (_("Failed to create thin pool, "
"error message was: %s")
% six.text_type(exc.stderr))
raise exception.VolumeBackendAPIException(
data=exception_message)
# Enable sparse copy since lvm_type is 'thin'
self._sparse_copy_volume = True
def create_volume(self, volume):
"""Creates a logical volume."""
mirror_count = 0
if self.configuration.lvm_mirrors:
mirror_count = self.configuration.lvm_mirrors
self._create_volume(volume['name'],
self._sizestr(volume['size']),
self.configuration.lvm_type,
mirror_count)
def update_migrated_volume(self, ctxt, volume, new_volume,
original_volume_status):
"""Return model update from LVM for migrated volume.
This method should rename the back-end volume name(id) on the
destination host back to its original name(id) on the source host.
:param ctxt: The context used to run the method update_migrated_volume
:param volume: The original volume that was migrated to this backend
:param new_volume: The migration volume object that was created on
this backend as part of the migration process
:param original_volume_status: The status of the original volume
:returns: model_update to update DB with any needed changes
"""
name_id = None
provider_location = None
if original_volume_status == 'available':
current_name = CONF.volume_name_template % new_volume['id']
original_volume_name = CONF.volume_name_template % volume['id']
try:
self.vg.rename_volume(current_name, original_volume_name)
except processutils.ProcessExecutionError:
LOG.error('Unable to rename the logical volume '
'for volume: %s', volume['id'])
# If the rename fails, _name_id should be set to the new
# volume id and provider_location should be set to the
# one from the new volume as well.
name_id = new_volume['_name_id'] or new_volume['id']
provider_location = new_volume['provider_location']
else:
# The back-end will not be renamed.
name_id = new_volume['_name_id'] or new_volume['id']
provider_location = new_volume['provider_location']
return {'_name_id': name_id, 'provider_location': provider_location}
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
if self.configuration.lvm_type == 'thin':
self.vg.create_lv_snapshot(volume['name'],
self._escape_snapshot(snapshot['name']),
self.configuration.lvm_type)
if volume['size'] > snapshot['volume_size']:
LOG.debug("Resize the new volume to %s.", volume['size'])
self.extend_volume(volume, volume['size'])
# Some configurations of LVM do not automatically activate
# ThinLVM snapshot LVs.
self.vg.activate_lv(snapshot['name'], is_snapshot=True)
self.vg.activate_lv(volume['name'], is_snapshot=True,
permanent=True)
return
self._create_volume(volume['name'],
self._sizestr(volume['size']),
self.configuration.lvm_type,
self.configuration.lvm_mirrors)
# Some configurations of LVM do not automatically activate
# ThinLVM snapshot LVs.
self.vg.activate_lv(snapshot['name'], is_snapshot=True)
# copy_volume expects sizes in MiB, we store integer GiB
# be sure to convert before passing in
volutils.copy_volume(self.local_path(snapshot),
self.local_path(volume),
snapshot['volume_size'] * units.Ki,
self.configuration.volume_dd_blocksize,
execute=self._execute,
sparse=self._sparse_copy_volume)
def delete_volume(self, volume):
"""Deletes a logical volume."""
# NOTE(jdg): We don't need to explicitly call
# remove export here because we already did it
# in the manager before we got here.
if self._volume_not_present(volume['name']):
# If the volume isn't present, then don't attempt to delete
return True
if self.vg.lv_has_snapshot(volume['name']):
LOG.error('Unable to delete due to existing snapshot '
'for volume: %s', volume['name'])
raise exception.VolumeIsBusy(volume_name=volume['name'])
self._delete_volume(volume)
LOG.info('Successfully deleted volume: %s', volume['id'])
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
self.vg.create_lv_snapshot(self._escape_snapshot(snapshot['name']),
snapshot['volume_name'],
self.configuration.lvm_type)
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
if self._volume_not_present(self._escape_snapshot(snapshot['name'])):
# If the snapshot isn't present, then don't attempt to delete
LOG.warning("snapshot: %s not found, "
"skipping delete operations", snapshot['name'])
LOG.info('Successfully deleted snapshot: %s', snapshot['id'])
return True
# TODO(yamahata): zeroing out the whole snapshot triggers COW.
# it's quite slow.
self._delete_volume(snapshot, is_snapshot=True)
def revert_to_snapshot(self, context, volume, snapshot):
"""Revert a volume to a snapshot"""
# NOTE(tommylikehu): We still can revert the volume because Cinder
# will try the alternative approach if 'NotImplementedError'
# is raised here.
if self.configuration.lvm_type == 'thin':
msg = _("Revert volume to snapshot not implemented for thin LVM.")
raise NotImplementedError(msg)
else:
self.vg.revert(self._escape_snapshot(snapshot.name))
self.vg.deactivate_lv(volume.name)
self.vg.activate_lv(volume.name)
# Recreate the snapshot that was destroyed by the revert
self.create_snapshot(snapshot)
def local_path(self, volume, vg=None):
if vg is None:
vg = self.configuration.volume_group
# NOTE(vish): stops deprecation warning
escaped_group = vg.replace('-', '--')
escaped_name = self._escape_snapshot(volume['name']).replace('-', '--')
return "/dev/mapper/%s-%s" % (escaped_group, escaped_name)
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
image_utils.fetch_to_raw(context,
image_service,
image_id,
self.local_path(volume),
self.configuration.volume_dd_blocksize,
size=volume['size'])
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
image_utils.upload_volume(context,
image_service,
image_meta,
self.local_path(volume))
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
if self.configuration.lvm_type == 'thin':
self.vg.create_lv_snapshot(volume['name'],
src_vref['name'],
self.configuration.lvm_type)
if volume['size'] > src_vref['size']:
LOG.debug("Resize the new volume to %s.", volume['size'])
self.extend_volume(volume, volume['size'])
self.vg.activate_lv(volume['name'], is_snapshot=True,
permanent=True)
return
mirror_count = 0
if self.configuration.lvm_mirrors:
mirror_count = self.configuration.lvm_mirrors
LOG.info('Creating clone of volume: %s', src_vref['id'])
volume_name = src_vref['name']
temp_id = 'tmp-snap-%s' % volume['id']
temp_snapshot = {'volume_name': volume_name,
'size': src_vref['size'],
'volume_size': src_vref['size'],
'name': 'clone-snap-%s' % volume['id'],
'id': temp_id}
self.create_snapshot(temp_snapshot)
# copy_volume expects sizes in MiB, we store integer GiB
# be sure to convert before passing in
try:
self._create_volume(volume['name'],
self._sizestr(volume['size']),
self.configuration.lvm_type,
mirror_count)
self.vg.activate_lv(temp_snapshot['name'], is_snapshot=True)
volutils.copy_volume(
self.local_path(temp_snapshot),
self.local_path(volume),
src_vref['size'] * units.Ki,
self.configuration.volume_dd_blocksize,
execute=self._execute,
sparse=self._sparse_copy_volume)
finally:
self.delete_snapshot(temp_snapshot)
def clone_image(self, context, volume,
image_location, image_meta,
image_service):
return None, False
def get_volume_stats(self, refresh=False):
"""Get volume status.
If 'refresh' is True, run update the stats first.
"""
if refresh:
self._update_volume_stats()
return self._stats
def extend_volume(self, volume, new_size):
"""Extend an existing volume's size."""
self.vg.extend_volume(volume['name'],
self._sizestr(new_size))
try:
self.target_driver.extend_target(volume)
except Exception:
LOG.exception('Error extending target after volume resize.')
raise exception.TargetUpdateFailed(volume_id=volume.id)
def manage_existing(self, volume, existing_ref):
"""Manages an existing LV.
Renames the LV to match the expected name for the volume.
Error checking done by manage_existing_get_size is not repeated.
"""
lv_name = existing_ref['source-name']
self.vg.get_volume(lv_name)
vol_id = volutils.extract_id_from_volume_name(lv_name)
if volutils.check_already_managed_volume(vol_id):
raise exception.ManageExistingAlreadyManaged(volume_ref=lv_name)
# Attempt to rename the LV to match the OpenStack internal name.
try:
self.vg.rename_volume(lv_name, volume['name'])
except processutils.ProcessExecutionError as exc:
exception_message = (_("Failed to rename logical volume %(name)s, "
"error message was: %(err_msg)s")
% {'name': lv_name,
'err_msg': exc.stderr})
raise exception.VolumeBackendAPIException(
data=exception_message)
def manage_existing_object_get_size(self, existing_object, existing_ref,
object_type):
"""Return size of an existing LV for manage existing volume/snapshot.
existing_ref is a dictionary of the form:
{'source-name': <name of LV>}
"""
# Check that the reference is valid
if 'source-name' not in existing_ref:
reason = _('Reference must contain source-name element.')
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=reason)
lv_name = existing_ref['source-name']
lv = self.vg.get_volume(lv_name)
# Raise an exception if we didn't find a suitable LV.
if not lv:
kwargs = {'existing_ref': lv_name,
'reason': 'Specified logical volume does not exist.'}
raise exception.ManageExistingInvalidReference(**kwargs)
# LV size is returned in gigabytes. Attempt to parse size as a float
# and round up to the next integer.
try:
lv_size = int(math.ceil(float(lv['size'])))
except ValueError:
exception_message = (_("Failed to manage existing %(type)s "
"%(name)s, because reported size %(size)s "
"was not a floating-point number.")
% {'type': object_type,
'name': lv_name,
'size': lv['size']})
raise exception.VolumeBackendAPIException(
data=exception_message)
return lv_size
def manage_existing_get_size(self, volume, existing_ref):
return self.manage_existing_object_get_size(volume, existing_ref,
"volume")
def manage_existing_snapshot_get_size(self, snapshot, existing_ref):
if not isinstance(existing_ref, dict):
existing_ref = {"source-name": existing_ref}
return self.manage_existing_object_get_size(snapshot, existing_ref,
"snapshot")
def manage_existing_snapshot(self, snapshot, existing_ref):
dest_name = self._escape_snapshot(snapshot['name'])
snapshot_temp = {"name": dest_name}
if not isinstance(existing_ref, dict):
existing_ref = {"source-name": existing_ref}
return self.manage_existing(snapshot_temp, existing_ref)
def _get_manageable_resource_info(self, cinder_resources, resource_type,
marker, limit, offset, sort_keys,
sort_dirs):
entries = []
lvs = self.vg.get_volumes()
cinder_ids = [resource['id'] for resource in cinder_resources]
for lv in lvs:
is_snap = self.vg.lv_is_snapshot(lv['name'])
if ((resource_type == 'volume' and is_snap) or
(resource_type == 'snapshot' and not is_snap)):
continue
if resource_type == 'volume':
potential_id = volutils.extract_id_from_volume_name(lv['name'])
else:
unescape = self._unescape_snapshot(lv['name'])
potential_id = volutils.extract_id_from_snapshot_name(unescape)
lv_info = {'reference': {'source-name': lv['name']},
'size': int(math.ceil(float(lv['size']))),
'cinder_id': None,
'extra_info': None}
if potential_id in cinder_ids:
lv_info['safe_to_manage'] = False
lv_info['reason_not_safe'] = 'already managed'
lv_info['cinder_id'] = potential_id
elif self.vg.lv_is_open(lv['name']):
lv_info['safe_to_manage'] = False
lv_info['reason_not_safe'] = '%s in use' % resource_type
else:
lv_info['safe_to_manage'] = True
lv_info['reason_not_safe'] = None
if resource_type == 'snapshot':
origin = self.vg.lv_get_origin(lv['name'])
lv_info['source_reference'] = {'source-name': origin}
entries.append(lv_info)
return volutils.paginate_entries_list(entries, marker, limit, offset,
sort_keys, sort_dirs)
def get_manageable_volumes(self, cinder_volumes, marker, limit, offset,
sort_keys, sort_dirs):
return self._get_manageable_resource_info(cinder_volumes, 'volume',
marker, limit,
offset, sort_keys, sort_dirs)
def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset,
sort_keys, sort_dirs):
return self._get_manageable_resource_info(cinder_snapshots, 'snapshot',
marker, limit,
offset, sort_keys, sort_dirs)
def retype(self, context, volume, new_type, diff, host):
"""Retypes a volume, allow QoS and extra_specs change."""
LOG.debug('LVM retype called for volume %s. No action '
'required for LVM volumes.',
volume['id'])
return True
def migrate_volume(self, ctxt, volume, host, thin=False, mirror_count=0):
"""Optimize the migration if the destination is on the same server.
If the specified host is another back-end on the same server, and
the volume is not attached, we can do the migration locally without
going through iSCSI.
"""
false_ret = (False, None)
if volume['status'] != 'available':
return false_ret
if 'location_info' not in host['capabilities']:
return false_ret
info = host['capabilities']['location_info']
try:
(dest_type, dest_hostname, dest_vg, lvm_type, lvm_mirrors) =\
info.split(':')
lvm_mirrors = int(lvm_mirrors)
except ValueError:
return false_ret
if (dest_type != 'LVMVolumeDriver' or dest_hostname != self.hostname):
return false_ret
if dest_vg == self.vg.vg_name:
message = (_("Refusing to migrate volume ID: %(id)s. Please "
"check your configuration because source and "
"destination are the same Volume Group: %(name)s.") %
{'id': volume['id'], 'name': self.vg.vg_name})
LOG.error(message)
raise exception.VolumeBackendAPIException(data=message)
vg_list = volutils.get_all_volume_groups()
try:
next(vg for vg in vg_list if vg['name'] == dest_vg)
except StopIteration:
LOG.error("Destination Volume Group %s does not exist",
dest_vg)
return false_ret
helper = utils.get_root_helper()
lvm_conf_file = self.configuration.lvm_conf_file
if lvm_conf_file.lower() == 'none':
lvm_conf_file = None
dest_vg_ref = lvm.LVM(dest_vg, helper,
lvm_type=lvm_type,
executor=self._execute,
lvm_conf=lvm_conf_file)
self._create_volume(volume['name'],
self._sizestr(volume['size']),
lvm_type,
lvm_mirrors,
dest_vg_ref)
# copy_volume expects sizes in MiB, we store integer GiB
# be sure to convert before passing in
size_in_mb = int(volume['size']) * units.Ki
try:
volutils.copy_volume(self.local_path(volume),
self.local_path(volume, vg=dest_vg),
size_in_mb,
self.configuration.volume_dd_blocksize,
execute=self._execute,
sparse=self._sparse_copy_volume)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error("Volume migration failed due to "
"exception: %(reason)s.",
{'reason': six.text_type(e)}, resource=volume)
dest_vg_ref.delete(volume)
self._delete_volume(volume)
return (True, None)
def get_pool(self, volume):
return self.backend_name
# ####### Interface methods for DataPath (Target Driver) ########
def ensure_export(self, context, volume):
volume_path = "/dev/%s/%s" % (self.configuration.volume_group,
volume['name'])
self.vg.activate_lv(volume['name'])
model_update = \
self.target_driver.ensure_export(context, volume, volume_path)
return model_update
def create_export(self, context, volume, connector, vg=None):
if vg is None:
vg = self.configuration.volume_group
volume_path = "/dev/%s/%s" % (vg, volume['name'])
self.vg.activate_lv(volume['name'])
export_info = self.target_driver.create_export(
context,
volume,
volume_path)
return {'provider_location': export_info['location'],
'provider_auth': export_info['auth'], }
def remove_export(self, context, volume):
self.target_driver.remove_export(context, volume)
def initialize_connection(self, volume, connector):
return self.target_driver.initialize_connection(volume, connector)
def validate_connector(self, connector):
return self.target_driver.validate_connector(connector)
def terminate_connection(self, volume, connector, **kwargs):
# NOTE(jdg): LVM has a single export for each volume, so what
# we need to do here is check if there is more than one attachment for
# the volume, if there is; let the caller know that they should NOT
# remove the export.
# NOTE(jdg): For the TGT driver this is a noop, for LIO this removes
# the initiator IQN from the targets access list, so we're good
# NOTE(lyarwood): Given the above note we should only call
# terminate_connection for the target lioadm driver when there is only
# one attachment left for the host specified by the connector to
# remove, otherwise the ACL will be removed prematurely while other
# attachments on the same host are still accessing the volume.
attachments = volume.volume_attachment
if volume.multiattach:
if sum(1 for a in attachments if a.connector and
a.connector['initiator'] == connector['initiator']) > 1:
return True
self.target_driver.terminate_connection(volume, connector, **kwargs)
return len(attachments) > 1
|
|
"""Upload and download support for apitools."""
from __future__ import print_function
import email.generator as email_generator
import email.mime.multipart as mime_multipart
import email.mime.nonmultipart as mime_nonmultipart
import io
import json
import mimetypes
import os
import StringIO
import threading
import six
from six.moves import http_client
from googlecloudapis.apitools.base.py import buffered_stream
from googlecloudapis.apitools.base.py import exceptions
from googlecloudapis.apitools.base.py import http_wrapper
from googlecloudapis.apitools.base.py import stream_slice
from googlecloudapis.apitools.base.py import util
__all__ = [
'Download',
'Upload',
'RESUMABLE_UPLOAD',
'SIMPLE_UPLOAD',
]
_RESUMABLE_UPLOAD_THRESHOLD = 5 << 20
SIMPLE_UPLOAD = 'simple'
RESUMABLE_UPLOAD = 'resumable'
class _Transfer(object):
"""Generic bits common to Uploads and Downloads."""
def __init__(self, stream, close_stream=False, chunksize=None,
auto_transfer=True, http=None, num_retries=5):
self.__bytes_http = None
self.__close_stream = close_stream
self.__http = http
self.__stream = stream
self.__url = None
self.__num_retries = 5
# Let the @property do validation
self.num_retries = num_retries
self.retry_func = http_wrapper.HandleExceptionsAndRebuildHttpConnections
self.auto_transfer = auto_transfer
self.chunksize = chunksize or 1048576
def __repr__(self):
return str(self)
@property
def close_stream(self):
return self.__close_stream
@property
def http(self):
return self.__http
@property
def bytes_http(self):
return self.__bytes_http or self.http
@bytes_http.setter
def bytes_http(self, value):
self.__bytes_http = value
@property
def num_retries(self):
return self.__num_retries
@num_retries.setter
def num_retries(self, value):
util.Typecheck(value, six.integer_types)
if value < 0:
raise exceptions.InvalidDataError(
'Cannot have negative value for num_retries')
self.__num_retries = value
@property
def stream(self):
return self.__stream
@property
def url(self):
return self.__url
def _Initialize(self, http, url):
"""Initialize this download by setting self.http and self.url.
We want the user to be able to override self.http by having set
the value in the constructor; in that case, we ignore the provided
http.
Args:
http: An httplib2.Http instance or None.
url: The url for this transfer.
Returns:
None. Initializes self.
"""
self.EnsureUninitialized()
if self.http is None:
self.__http = http or http_wrapper.GetHttp()
self.__url = url
@property
def initialized(self):
return self.url is not None and self.http is not None
@property
def _type_name(self):
return type(self).__name__
def EnsureInitialized(self):
if not self.initialized:
raise exceptions.TransferInvalidError(
'Cannot use uninitialized %s', self._type_name)
def EnsureUninitialized(self):
if self.initialized:
raise exceptions.TransferInvalidError(
'Cannot re-initialize %s', self._type_name)
def __del__(self):
if self.__close_stream:
self.__stream.close()
def _ExecuteCallback(self, callback, response):
# TODO(user): Push these into a queue.
if callback is not None:
threading.Thread(target=callback, args=(response, self)).start()
class Download(_Transfer):
"""Data for a single download.
Public attributes:
chunksize: default chunksize to use for transfers.
"""
_ACCEPTABLE_STATUSES = set((
http_client.OK,
http_client.NO_CONTENT,
http_client.PARTIAL_CONTENT,
http_client.REQUESTED_RANGE_NOT_SATISFIABLE,
))
_REQUIRED_SERIALIZATION_KEYS = set((
'auto_transfer', 'progress', 'total_size', 'url'))
def __init__(self, *args, **kwds):
total_size = kwds.pop('total_size', None)
super(Download, self).__init__(*args, **kwds)
self.__initial_response = None
self.__progress = 0
self.__total_size = total_size
self.__encoding = None
@property
def progress(self):
return self.__progress
@property
def encoding(self):
return self.__encoding
@classmethod
def FromFile(cls, filename, overwrite=False, auto_transfer=True, **kwds):
"""Create a new download object from a filename."""
path = os.path.expanduser(filename)
if os.path.exists(path) and not overwrite:
raise exceptions.InvalidUserInputError(
'File %s exists and overwrite not specified' % path)
return cls(open(path, 'wb'), close_stream=True, auto_transfer=auto_transfer,
**kwds)
@classmethod
def FromStream(cls, stream, auto_transfer=True, total_size=None, **kwds):
"""Create a new Download object from a stream."""
return cls(stream, auto_transfer=auto_transfer, total_size=total_size,
**kwds)
@classmethod
def FromData(cls, stream, json_data, http=None, auto_transfer=None, **kwds):
"""Create a new Download object from a stream and serialized data."""
info = json.loads(json_data)
missing_keys = cls._REQUIRED_SERIALIZATION_KEYS - set(info.keys())
if missing_keys:
raise exceptions.InvalidDataError(
'Invalid serialization data, missing keys: %s' % (
', '.join(missing_keys)))
download = cls.FromStream(stream, **kwds)
if auto_transfer is not None:
download.auto_transfer = auto_transfer
else:
download.auto_transfer = info['auto_transfer']
setattr(download, '_Download__progress', info['progress'])
setattr(download, '_Download__total_size', info['total_size'])
download._Initialize(http, info['url']) # pylint: disable=protected-access
return download
@property
def serialization_data(self):
self.EnsureInitialized()
return {
'auto_transfer': self.auto_transfer,
'progress': self.progress,
'total_size': self.total_size,
'url': self.url,
}
@property
def total_size(self):
return self.__total_size
def __str__(self):
if not self.initialized:
return 'Download (uninitialized)'
else:
return 'Download with %d/%s bytes transferred from url %s' % (
self.progress, self.total_size, self.url)
def ConfigureRequest(self, http_request, url_builder):
url_builder.query_params['alt'] = 'media'
# TODO(user): We need to send range requests because by
# default httplib2 stores entire reponses in memory. Override
# httplib2's download method (as gsutil does) so that this is not
# necessary.
http_request.headers['Range'] = 'bytes=0-%d' % (self.chunksize - 1,)
def __SetTotal(self, info):
if 'content-range' in info:
_, _, total = info['content-range'].rpartition('/')
if total != '*':
self.__total_size = int(total)
# Note "total_size is None" means we don't know it; if no size
# info was returned on our initial range request, that means we
# have a 0-byte file. (That last statement has been verified
# empirically, but is not clearly documented anywhere.)
if self.total_size is None:
self.__total_size = 0
def InitializeDownload(self, http_request, http=None, client=None):
"""Initialize this download by making a request.
Args:
http_request: The HttpRequest to use to initialize this download.
http: The httplib2.Http instance for this request.
client: If provided, let this client process the final URL before
sending any additional requests. If client is provided and
http is not, client.http will be used instead.
"""
self.EnsureUninitialized()
if http is None and client is None:
raise exceptions.UserError('Must provide client or http.')
http = http or client.http
if client is not None:
http_request.url = client.FinalizeTransferUrl(http_request.url)
url = http_request.url
if self.auto_transfer:
response = http_wrapper.MakeRequest(self.bytes_http or http, http_request)
if response.status_code not in self._ACCEPTABLE_STATUSES:
raise exceptions.HttpError.FromResponse(response)
self.__initial_response = response
self.__SetTotal(response.info)
url = response.info.get('content-location', response.request_url)
if client is not None:
url = client.FinalizeTransferUrl(url)
self._Initialize(http, url)
# Unless the user has requested otherwise, we want to just
# go ahead and pump the bytes now.
if self.auto_transfer:
self.StreamInChunks()
@staticmethod
def _ArgPrinter(response, unused_download):
if 'content-range' in response.info:
print('Received %s' % response.info['content-range'])
else:
print('Received %d bytes' % response.length)
@staticmethod
def _CompletePrinter(*unused_args):
print('Download complete')
def __NormalizeStartEnd(self, start, end=None):
if end is not None:
if start < 0:
raise exceptions.TransferInvalidError(
'Cannot have end index with negative start index')
elif start >= self.total_size:
raise exceptions.TransferInvalidError(
'Cannot have start index greater than total size')
end = min(end, self.total_size - 1)
if end < start:
raise exceptions.TransferInvalidError(
'Range requested with end[%s] < start[%s]' % (end, start))
return start, end
else:
if start < 0:
start = max(0, start + self.total_size)
return start, self.total_size
def __SetRangeHeader(self, request, start, end=None):
if start < 0:
request.headers['range'] = 'bytes=%d' % start
elif end is None:
request.headers['range'] = 'bytes=%d-' % start
else:
request.headers['range'] = 'bytes=%d-%d' % (start, end)
def __GetChunk(self, start, end=None, additional_headers=None):
"""Retrieve a chunk, and return the full response."""
self.EnsureInitialized()
end_byte = end
if self.total_size and end:
end_byte = min(end, self.total_size)
request = http_wrapper.Request(url=self.url)
self.__SetRangeHeader(request, start, end=end_byte)
if additional_headers is not None:
request.headers.update(additional_headers)
return http_wrapper.MakeRequest(
self.bytes_http, request, retry_func=self.retry_func,
retries=self.num_retries)
def __ProcessResponse(self, response):
"""Process this response (by updating self and writing to self.stream)."""
if response.status_code not in self._ACCEPTABLE_STATUSES:
# We distinguish errors that mean we made a mistake in setting
# up the transfer versus something we should attempt again.
if response.status_code in (http_client.FORBIDDEN, http_client.NOT_FOUND):
raise exceptions.HttpError.FromResponse(response)
else:
raise exceptions.TransferRetryError(response.content)
if response.status_code in (http_client.OK, http_client.PARTIAL_CONTENT):
self.stream.write(response.content)
self.__progress += response.length
if response.info and 'content-encoding' in response.info:
# TODO(user): Handle the case where this changes over a
# download.
self.__encoding = response.info['content-encoding']
elif response.status_code == http_client.NO_CONTENT:
# It's important to write something to the stream for the case
# of a 0-byte download to a file, as otherwise python won't
# create the file.
self.stream.write('')
return response
def GetRange(self, start, end=None, additional_headers=None):
"""Retrieve a given byte range from this download, inclusive.
Range must be of one of these three forms:
* 0 <= start, end = None: Fetch from start to the end of the file.
* 0 <= start <= end: Fetch the bytes from start to end.
* start < 0, end = None: Fetch the last -start bytes of the file.
(These variations correspond to those described in the HTTP 1.1
protocol for range headers in RFC 2616, sec. 14.35.1.)
Args:
start: (int) Where to start fetching bytes. (See above.)
end: (int, optional) Where to stop fetching bytes. (See above.)
additional_headers: (bool, optional) Any additional headers to
pass with the request.
Returns:
None. Streams bytes into self.stream.
"""
self.EnsureInitialized()
progress_end_normalized = False
if self.total_size is not None:
progress, end = self.__NormalizeStartEnd(start, end)
progress_end_normalized = True
else:
progress = start
while not progress_end_normalized or progress < end:
response = self.__GetChunk(progress, end=end,
additional_headers=additional_headers)
if not progress_end_normalized:
self.__SetTotal(response.info)
progress, end = self.__NormalizeStartEnd(start, end)
progress_end_normalized = True
response = self.__ProcessResponse(response)
progress += response.length
if not response:
raise exceptions.TransferRetryError(
'Zero bytes unexpectedly returned in download response')
def StreamInChunks(self, callback=None, finish_callback=None,
additional_headers=None):
"""Stream the entire download."""
callback = callback or self._ArgPrinter
finish_callback = finish_callback or self._CompletePrinter
self.EnsureInitialized()
while True:
if self.__initial_response is not None:
response = self.__initial_response
self.__initial_response = None
else:
response = self.__GetChunk(self.progress,
additional_headers=additional_headers)
response = self.__ProcessResponse(response)
self._ExecuteCallback(callback, response)
if (response.status_code == http_client.OK or
self.progress >= self.total_size):
break
self._ExecuteCallback(finish_callback, response)
class Upload(_Transfer):
"""Data for a single Upload.
Fields:
stream: The stream to upload.
mime_type: MIME type of the upload.
total_size: (optional) Total upload size for the stream.
close_stream: (default: False) Whether or not we should close the
stream when finished with the upload.
auto_transfer: (default: True) If True, stream all bytes as soon as
the upload is created.
"""
_REQUIRED_SERIALIZATION_KEYS = set((
'auto_transfer', 'mime_type', 'total_size', 'url'))
def __init__(self, stream, mime_type, total_size=None, http=None,
close_stream=False, chunksize=None, auto_transfer=True,
**kwds):
super(Upload, self).__init__(
stream, close_stream=close_stream, chunksize=chunksize,
auto_transfer=auto_transfer, http=http, **kwds)
self.__complete = False
self.__final_response = None
self.__mime_type = mime_type
self.__progress = 0
self.__server_chunk_granularity = None
self.__strategy = None
self.total_size = total_size
@property
def progress(self):
return self.__progress
@classmethod
def FromFile(cls, filename, mime_type=None, auto_transfer=True, **kwds):
"""Create a new Upload object from a filename."""
path = os.path.expanduser(filename)
if not os.path.exists(path):
raise exceptions.NotFoundError('Could not find file %s' % path)
if not mime_type:
mime_type, _ = mimetypes.guess_type(path)
if mime_type is None:
raise exceptions.InvalidUserInputError(
'Could not guess mime type for %s' % path)
size = os.stat(path).st_size
return cls(open(path, 'rb'), mime_type, total_size=size, close_stream=True,
auto_transfer=auto_transfer, **kwds)
@classmethod
def FromStream(cls, stream, mime_type, total_size=None, auto_transfer=True,
**kwds):
"""Create a new Upload object from a stream."""
if mime_type is None:
raise exceptions.InvalidUserInputError(
'No mime_type specified for stream')
return cls(stream, mime_type, total_size=total_size, close_stream=False,
auto_transfer=auto_transfer, **kwds)
@classmethod
def FromData(cls, stream, json_data, http, auto_transfer=None, **kwds):
"""Create a new Upload of stream from serialized json_data using http."""
info = json.loads(json_data)
missing_keys = cls._REQUIRED_SERIALIZATION_KEYS - set(info.keys())
if missing_keys:
raise exceptions.InvalidDataError(
'Invalid serialization data, missing keys: %s' % (
', '.join(missing_keys)))
if 'total_size' in kwds:
raise exceptions.InvalidUserInputError(
'Cannot override total_size on serialized Upload')
upload = cls.FromStream(stream, info['mime_type'],
total_size=info.get('total_size'), **kwds)
if isinstance(stream, io.IOBase) and not stream.seekable():
raise exceptions.InvalidUserInputError(
'Cannot restart resumable upload on non-seekable stream')
if auto_transfer is not None:
upload.auto_transfer = auto_transfer
else:
upload.auto_transfer = info['auto_transfer']
upload.strategy = RESUMABLE_UPLOAD
upload._Initialize(http, info['url']) # pylint: disable=protected-access
upload.RefreshResumableUploadState()
upload.EnsureInitialized()
if upload.auto_transfer:
upload.StreamInChunks()
return upload
@property
def serialization_data(self):
self.EnsureInitialized()
if self.strategy != RESUMABLE_UPLOAD:
raise exceptions.InvalidDataError(
'Serialization only supported for resumable uploads')
return {
'auto_transfer': self.auto_transfer,
'mime_type': self.mime_type,
'total_size': self.total_size,
'url': self.url,
}
@property
def complete(self):
return self.__complete
@property
def mime_type(self):
return self.__mime_type
def __str__(self):
if not self.initialized:
return 'Upload (uninitialized)'
else:
return 'Upload with %d/%s bytes transferred for url %s' % (
self.progress, self.total_size or '???', self.url)
@property
def strategy(self):
return self.__strategy
@strategy.setter
def strategy(self, value):
if value not in (SIMPLE_UPLOAD, RESUMABLE_UPLOAD):
raise exceptions.UserError((
'Invalid value "%s" for upload strategy, must be one of '
'"simple" or "resumable".') % value)
self.__strategy = value
@property
def total_size(self):
return self.__total_size
@total_size.setter
def total_size(self, value):
self.EnsureUninitialized()
self.__total_size = value
def __SetDefaultUploadStrategy(self, upload_config, http_request):
"""Determine and set the default upload strategy for this upload.
We generally prefer simple or multipart, unless we're forced to
use resumable. This happens when any of (1) the upload is too
large, (2) the simple endpoint doesn't support multipart requests
and we have metadata, or (3) there is no simple upload endpoint.
Args:
upload_config: Configuration for the upload endpoint.
http_request: The associated http request.
Returns:
None.
"""
if self.strategy is not None:
return
strategy = SIMPLE_UPLOAD
if (self.total_size is not None and
self.total_size > _RESUMABLE_UPLOAD_THRESHOLD):
strategy = RESUMABLE_UPLOAD
if http_request.body and not upload_config.simple_multipart:
strategy = RESUMABLE_UPLOAD
if not upload_config.simple_path:
strategy = RESUMABLE_UPLOAD
self.strategy = strategy
def ConfigureRequest(self, upload_config, http_request, url_builder):
"""Configure the request and url for this upload."""
# Validate total_size vs. max_size
if (self.total_size and upload_config.max_size and
self.total_size > upload_config.max_size):
raise exceptions.InvalidUserInputError(
'Upload too big: %s larger than max size %s' % (
self.total_size, upload_config.max_size))
# Validate mime type
if not util.AcceptableMimeType(upload_config.accept, self.mime_type):
raise exceptions.InvalidUserInputError(
'MIME type %s does not match any accepted MIME ranges %s' % (
self.mime_type, upload_config.accept))
self.__SetDefaultUploadStrategy(upload_config, http_request)
if self.strategy == SIMPLE_UPLOAD:
url_builder.relative_path = upload_config.simple_path
if http_request.body:
url_builder.query_params['uploadType'] = 'multipart'
self.__ConfigureMultipartRequest(http_request)
else:
url_builder.query_params['uploadType'] = 'media'
self.__ConfigureMediaRequest(http_request)
else:
url_builder.relative_path = upload_config.resumable_path
url_builder.query_params['uploadType'] = 'resumable'
self.__ConfigureResumableRequest(http_request)
def __ConfigureMediaRequest(self, http_request):
"""Configure http_request as a simple request for this upload."""
http_request.headers['content-type'] = self.mime_type
http_request.body = self.stream.read()
http_request.loggable_body = '<media body>'
def __ConfigureMultipartRequest(self, http_request):
"""Configure http_request as a multipart request for this upload."""
# This is a multipart/related upload.
msg_root = mime_multipart.MIMEMultipart('related')
# msg_root should not write out its own headers
setattr(msg_root, '_write_headers', lambda self: None)
# attach the body as one part
msg = mime_nonmultipart.MIMENonMultipart(
*http_request.headers['content-type'].split('/'))
msg.set_payload(http_request.body)
msg_root.attach(msg)
# attach the media as the second part
msg = mime_nonmultipart.MIMENonMultipart(*self.mime_type.split('/'))
msg['Content-Transfer-Encoding'] = 'binary'
msg.set_payload(self.stream.read())
msg_root.attach(msg)
# encode the body: note that we can't use `as_string`, because
# it plays games with `From ` lines.
fp = StringIO.StringIO()
g = email_generator.Generator(fp, mangle_from_=False)
g.flatten(msg_root, unixfrom=False)
http_request.body = fp.getvalue()
multipart_boundary = msg_root.get_boundary()
http_request.headers['content-type'] = (
'multipart/related; boundary=%r' % multipart_boundary)
body_components = http_request.body.split(multipart_boundary)
headers, _, _ = body_components[-2].partition('\n\n')
body_components[-2] = '\n\n'.join([headers, '<media body>\n\n--'])
http_request.loggable_body = multipart_boundary.join(body_components)
def __ConfigureResumableRequest(self, http_request):
http_request.headers['X-Upload-Content-Type'] = self.mime_type
if self.total_size is not None:
http_request.headers['X-Upload-Content-Length'] = str(self.total_size)
def RefreshResumableUploadState(self):
"""Talk to the server and refresh the state of this resumable upload.
Returns:
Response if the upload is complete.
"""
if self.strategy != RESUMABLE_UPLOAD:
return
self.EnsureInitialized()
refresh_request = http_wrapper.Request(
url=self.url, http_method='PUT', headers={'Content-Range': 'bytes */*'})
refresh_response = http_wrapper.MakeRequest(
self.http, refresh_request, redirections=0, retries=self.num_retries)
range_header = self._GetRangeHeaderFromResponse(refresh_response)
if refresh_response.status_code in (http_client.OK, http_client.CREATED):
self.__complete = True
self.__progress = self.total_size
self.stream.seek(self.progress)
# If we're finished, the refresh response will contain the metadata
# originally requested. Cache it so it can be returned in StreamInChunks.
self.__final_response = refresh_response
elif refresh_response.status_code == http_wrapper.RESUME_INCOMPLETE:
if range_header is None:
self.__progress = 0
else:
self.__progress = self.__GetLastByte(range_header) + 1
self.stream.seek(self.progress)
else:
raise exceptions.HttpError.FromResponse(refresh_response)
def _GetRangeHeaderFromResponse(self, response):
return response.info.get('Range', response.info.get('range'))
def InitializeUpload(self, http_request, http=None, client=None):
"""Initialize this upload from the given http_request."""
if self.strategy is None:
raise exceptions.UserError(
'No upload strategy set; did you call ConfigureRequest?')
if http is None and client is None:
raise exceptions.UserError('Must provide client or http.')
if self.strategy != RESUMABLE_UPLOAD:
return
http = http or client.http
if client is not None:
http_request.url = client.FinalizeTransferUrl(http_request.url)
self.EnsureUninitialized()
http_response = http_wrapper.MakeRequest(http, http_request,
retries=self.num_retries)
if http_response.status_code != http_client.OK:
raise exceptions.HttpError.FromResponse(http_response)
self.__server_chunk_granularity = http_response.info.get(
'X-Goog-Upload-Chunk-Granularity')
url = http_response.info['location']
if client is not None:
url = client.FinalizeTransferUrl(url)
self._Initialize(http, url)
# Unless the user has requested otherwise, we want to just
# go ahead and pump the bytes now.
if self.auto_transfer:
return self.StreamInChunks()
def __GetLastByte(self, range_header):
_, _, end = range_header.partition('-')
# TODO(user): Validate start == 0?
return int(end)
def __ValidateChunksize(self, chunksize=None):
if self.__server_chunk_granularity is None:
return
chunksize = chunksize or self.chunksize
if chunksize % self.__server_chunk_granularity:
raise exceptions.ConfigurationValueError(
'Server requires chunksize to be a multiple of %d',
self.__server_chunk_granularity)
@staticmethod
def _ArgPrinter(response, unused_upload):
print('Sent %s' % response.info['range'])
@staticmethod
def _CompletePrinter(*unused_args):
print('Upload complete')
def __StreamMedia(self, callback=None, finish_callback=None,
additional_headers=None, use_chunks=True):
"""Helper function for StreamMedia / StreamInChunks."""
if self.strategy != RESUMABLE_UPLOAD:
raise exceptions.InvalidUserInputError(
'Cannot stream non-resumable upload')
callback = callback or self._ArgPrinter
finish_callback = finish_callback or self._CompletePrinter
# final_response is set if we resumed an already-completed upload.
response = self.__final_response
send_func = self.__SendChunk if use_chunks else self.__SendMediaBody
if use_chunks:
self.__ValidateChunksize(self.chunksize)
self.EnsureInitialized()
while not self.complete:
response = send_func(self.stream.tell(),
additional_headers=additional_headers)
if response.status_code in (http_client.OK, http_client.CREATED):
self.__complete = True
break
self.__progress = self.__GetLastByte(response.info['range'])
if self.progress + 1 != self.stream.tell():
# TODO(user): Add a better way to recover here.
raise exceptions.CommunicationError(
'Failed to transfer all bytes in chunk, upload paused at byte '
'%d' % self.progress)
self._ExecuteCallback(callback, response)
if self.__complete:
# TODO(user): Decide how to handle errors in the
# non-seekable case.
current_pos = self.stream.tell()
self.stream.seek(0, os.SEEK_END)
end_pos = self.stream.tell()
self.stream.seek(current_pos)
if current_pos != end_pos:
raise exceptions.TransferInvalidError(
'Upload complete with %s additional bytes left in stream' %
(int(end_pos) - int(current_pos)))
self._ExecuteCallback(finish_callback, response)
return response
def StreamMedia(self, callback=None, finish_callback=None,
additional_headers=None):
"""Send this resumable upload in a single request.
Args:
callback: Progress callback function with inputs
(http_wrapper.Response, transfer.Upload)
finish_callback: Final callback function with inputs
(http_wrapper.Response, transfer.Upload)
additional_headers: Dict of headers to include with the upload
http_wrapper.Request.
Returns:
http_wrapper.Response of final response.
"""
return self.__StreamMedia(
callback=callback, finish_callback=finish_callback,
additional_headers=additional_headers, use_chunks=False)
def StreamInChunks(self, callback=None, finish_callback=None,
additional_headers=None):
"""Send this (resumable) upload in chunks."""
return self.__StreamMedia(
callback=callback, finish_callback=finish_callback,
additional_headers=additional_headers)
def __SendMediaRequest(self, request, end):
"""Helper function to make the request for SendMediaBody & SendChunk."""
response = http_wrapper.MakeRequest(
self.bytes_http, request, retry_func=self.retry_func,
retries=self.num_retries)
if response.status_code not in (http_client.OK, http_client.CREATED,
http_wrapper.RESUME_INCOMPLETE):
# We want to reset our state to wherever the server left us
# before this failed request, and then raise.
self.RefreshResumableUploadState()
raise exceptions.HttpError.FromResponse(response)
if response.status_code == http_wrapper.RESUME_INCOMPLETE:
last_byte = self.__GetLastByte(
self._GetRangeHeaderFromResponse(response))
if last_byte + 1 != end:
self.stream.seek(last_byte)
return response
def __SendMediaBody(self, start, additional_headers=None):
"""Send the entire media stream in a single request."""
self.EnsureInitialized()
if self.total_size is None:
raise exceptions.TransferInvalidError(
'Total size must be known for SendMediaBody')
body_stream = stream_slice.StreamSlice(self.stream, self.total_size - start)
request = http_wrapper.Request(url=self.url, http_method='PUT',
body=body_stream)
request.headers['Content-Type'] = self.mime_type
if start == self.total_size:
# End of an upload with 0 bytes left to send; just finalize.
range_string = 'bytes */%s' % self.total_size
else:
range_string = 'bytes %s-%s/%s' % (start, self.total_size - 1,
self.total_size)
request.headers['Content-Range'] = range_string
if additional_headers:
request.headers.update(additional_headers)
return self.__SendMediaRequest(request, self.total_size)
def __SendChunk(self, start, additional_headers=None):
"""Send the specified chunk."""
self.EnsureInitialized()
if self.total_size is None:
# For the streaming resumable case, we need to detect when we're at the
# end of the stream.
body_stream = buffered_stream.BufferedStream(
self.stream, start, self.chunksize)
end = body_stream.stream_end_position
if body_stream.stream_exhausted:
self.__total_size = end
else:
end = min(start + self.chunksize, self.total_size)
body_stream = stream_slice.StreamSlice(self.stream, end - start)
# TODO(user): Think about clearer errors on "no data in
# stream".
request = http_wrapper.Request(url=self.url, http_method='PUT',
body=body_stream)
request.headers['Content-Type'] = self.mime_type
if self.total_size is None:
# Streaming resumable upload case, unknown total size.
range_string = 'bytes %s-%s/*' % (start, end - 1)
elif end == start:
# End of an upload with 0 bytes left to send; just finalize.
range_string = 'bytes */%s' % self.total_size
else:
# Normal resumable upload case with known sizes.
range_string = 'bytes %s-%s/%s' % (start, end - 1, self.total_size)
request.headers['Content-Range'] = range_string
if additional_headers:
request.headers.update(additional_headers)
return self.__SendMediaRequest(request, end)
|
|
import os #To set working directory properly
import re #import regular expression evaluator
import urllib #Used to test if internet is available
import time #required for sleep pauses
import threading #We will be making threads
import RPi.GPIO as GPIO #We use lots of GPIOs in this program
import datetime #To allow for keeping track of button press length
import subprocess #To launch external processes
import keyPress #Allow for asynchronous keyboard input lifted from http://stackoverflow.com/questions/510357/python-read-a-single-character-from-the-user
from subprocess import call #to launch external processes
import gaugette.rotary_encoder # Lets the rotation be handled with threaded watching
from webcamvideo import WebcamVideoStream #Class for creating a camera thread
from confmanager import ConfManager
from raspivoice import Raspivoice
from teradeep import Teradeep
from facedetect import Facedetect
GPIO.setmode(GPIO.BCM) #setup for pinouts of the chip for GPIO calls. This will be different for the rotary encoder li$
GPIO.setup(27, GPIO.IN, pull_up_down=GPIO.PUD_UP) #GPIO for detecting low battery
GPIO.setup(25, GPIO.IN, pull_up_down=GPIO.PUD_UP) #Rotary Pushbutton Input
GPIO.setup(9, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) # GPIO for detecting Power Switch Position, used to shtudown system
GPIO.setup(10, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) # GPIO for Detecting External Power State
GPIO.setup(20, GPIO.OUT) #Define pin 20 as output, for vibration motor
pulses = 3 #make constant short pulses
for i in range(0,pulses):
GPIO.output(20,True)
time.sleep(0.05)
GPIO.output (20,False)
time.sleep(0.05)
espeak_process = subprocess.Popen(["espeak", "-f","/home/pi/introtext.txt", "--stdout"], stdout=subprocess.PIPE)
aplay_process = subprocess.Popen(["aplay", "-D", "sysdefault"], stdin=espeak_process.stdout, stdout=subprocess.PIPE)
aplay_process.wait() #Forces wait on initial disclaimer reading force wait for short introtext on second and subsequent boots
call (["sudo","cp","/home/pi/altgreet.txt","/home/pi/introtext.txt"]) #After first boot get rid of disclaimer and shorten the greeting
def CheckToClose(k, (keys, printLock)): #This is required for the keyscanning to use the USB number pad
printLock.acquire()
print "Close: " + k
printLock.release()
if k == "c": #While debugging. If you press 'c' once, you can now use ctrl c to terminate the execution
keys.stopCapture()
t1=0 #t1-t4 used for timing pushbutton events
t2=0 # t2-t4 used as adders amongst a few intervals to allow for assignments of different functions based on time the button is depressed
t3=0 #
t4=0 #The final interval of 7 seconds shuts the device down (software, not electricity). It protects the filesystem and ought to remain
timesinceflip = 0
config = ConfManager() # Load our class with settings from aftersight.cfg
vibration = False #By default vibration is turned off
if config.ConfigVibrationStartup: #If the config file sets rangefinder/vibration for startup, toggle the variable for the vibration motor
vibration = True
A_Pin=4 #Encoder CC direction
B_Pin=5 #Enconder C direction
encoder = gaugette.rotary_encoder.RotaryEncoder.Worker(A_Pin, B_Pin)#Use worker class to try to catch transitions better.
encoder.start()#start the worker class encoder watcher
encoder.steps_per_cycle = 4 #the encoder always gives 4 for 1 detente
oldexternalpowerstate = 0 # this variable enables an espeak event when the power plug is inserted or removed
Main=["Toggle Raspivoice","Toggle Teradeep","Toggle Distance Sensor","Toggle Face Detection", "Settings","acknowledgements","Disclaimer"]
Settings=["Advance Volume","Raspivoice Settings", "Teradeep Settings","Distance Sensor Settings","Update Software","Return to main menu"]
RaspivoiceSettings = ["Toggle Playback Speed","Toggle Blinders","Advance Zoom","Toggle Foveal Mapping", "Advance Contrast", "Toggle Raspivoice Autostart", "Return to Main Menu"]
TeradeepSettings = ["Next Threshold", "Toggle Teradeep Autostart","Return to Main Menu"]
DistanceSensorSettings = ["Cycle Feedback Method","Return to Main Menu"]
VolumeMenu = ["Volume Up", "Volume Down", "Return to Main Menu"]
#You can change and add menu items above, but you MUST go to the section where the MenuLevel and menupos are evaluated for a button press/release in under three seconds
#You have to change the actions for the items being evaluated there.
#If you don't, no bueno
bequiet = False #This is old and can be removed, but there is some conditional code below that would have to go at the same time.
MenuLevel = Main #Select the Main Menu first
menupos = 0 #position in menu list
printLock = threading.Lock() #Setup for keyscanning thread
keys = keyPress.KeyCapture()
keys.startCapture(CheckToClose, (keys, printLock)) #Start the keyboard scanner thread
seconddelta = 0
call (["sudo","espeak","MainMenu,Rotate,Knob,For,Options"])
camera_port = 0 #Open Camera 0
#If your camera doesn't support HD, you'll have to change it here (1280X720)
camera = WebcamVideoStream(src=camera_port, width=640, height=480) #define where I dump camera input
camerastarted = False
raspi = Raspivoice(camera, config)
tera = Teradeep(camera, config)
face = Facedetect(camera, config)
cameraOk = camera.isOk() # Don't call this to often when the camera is stopped, since it will then temporarily init the camera to check it
if config.ConfigRaspivoiceStartup == True and cameraOk:
camerastarted = camera.start()
raspi.start()
if config.ConfigTeradeepStartup == True and cameraOk:
if not camerastarted:
camerastarted = camera.start()
tera.start()
if not cameraOk:
call (["sudo","espeak","No camera detected, check your connections."])
batteryshutdownstarttime = 0 #this will record the time when the shutdhown signal is first recieved
batteryshutdowndetectedflag = 0 #once the low battery signal has been detected once, this flag will stay true. This will avoid situations where the light is cycling
batteryshutdowntime = 300 #give the battery shutdown 300 seconds before forcing a shutdown
while 1: #Main Loop
if camera.cameraError and (raspi.running or tera.running):
camerastarted = False
raspi.stop()
tera.stop()
face.stop()
call (["sudo","espeak","There was an error with the camera, stopping applications. Try reconnect the camera, and restart applications"])
battstate = GPIO.input(27) #check if the battery low state is true or false
switchstate = GPIO.input(9) #if the pushbutton is depressed, this ought to be true
externalpowerstate = GPIO.input(10) #has external power been connected or disconnected
CurrentMenuMaxSize = len(MenuLevel)-1 #Have to subtract one because lists start at zero, but the len function returns only natural numbers
delta = encoder.get_delta()
keysPressed = keys.getAsync()
#print keysPressed
if (delta!=0 or keysPressed != []):
print keysPressed
#print "rotate %d" % delta
#The Rotary Encoder has the annoying feature of giving back four delta steps per single detente ~usually~
#For example, 1,1,1,1 is normal. Quite often it is 1,3 other times 1,2,1 or 1,1,2
#Using the corrections below, rotations clockwise are normalized to a value of 1
#Rotations counterclockwise are normalized to -1
#So the normal output of 1,1,1,1 remains 1,3 becomes 1,1 and 1,2,1 or 1,1,2 become 1,1,1
#The end result is that most often the values will be 2 or 3, and occasionally 4 after each rotation of one detente occurs
#the seconddelta variable causes the menu item increase to only happen after the delta accumulates to 3.
#By changing the top value for seconddelta, responsiveness for single increases changes
#With a value of three, reliable operation happens
if delta>0:
delta=1
if delta<0:
delta=-1
#print "corrected delta",delta
if keysPressed == ['+']: #Simulate the outcome of rotary knob rotations to the right. Each time '+' is pressed it will act as though rotated cw
seconddelta = 3
delta = 1
if keysPressed == ['-']:
seconddelta = 3 #simulate the outcome of rotary knob rotations to the left. each time '-' is pressed it will act as though rotated ccw
delta = -1
if seconddelta == 3: #This was the most important value to change to get reliable single increments of the menu items
seconddelta = 0
menupos=menupos+delta
print "MenuPosition" ,menupos
print MenuLevel
print "Current Menu Max Size",CurrentMenuMaxSize
if menupos > CurrentMenuMaxSize: #when changing menu's, we set the position in the menu to a high value of 10. This way when the new menu is engaged the position is forced to the first item in the menu
menupos=0
if menupos<0:
menupos=CurrentMenuMaxSize
print (MenuLevel[menupos])
if bequiet == False:
call(["sudo","killall","espeak"])
call(["sudo","espeak",MenuLevel[menupos]])
elif seconddelta < 3:
seconddelta = seconddelta + 1
if (externalpowerstate != oldexternalpowerstate):
print ('External Power State Changed')
if(externalpowerstate == 1):
call (["sudo","espeak","ExternalPowerConnected"])
elif (externalpowerstate ==0):
call (["sudo","espeak","ExternalPowerDisconnected"])
if (switchstate == 1):
#print ('Power Switch Turned Off, System Shutdown Initiated')
call (["sudo", "espeak", "shutdown"])
call (["sudo", "shutdown", "-h", "now"])
#if (battstate == 1):
# print ('Battery OK, keep system up')
if (config.ConfigBatteryShutdown == True):
if (battstate == 1 and batteryshutdowndetectedflag == 1): #If the low power LED was on and then turned off again (the powerboost 1000c has this problem lots of detail here:https://forums.adafruit.com/viewtopic.php?f=8&t=88137 )
batteryshutdowndetectedflag = 0 # Make the timer stop accumulating time when the state goes low again, the flag will be set and the timer will restart
print ('Low Battery State Flipped killing timer')
if (config.ConfigBatteryShutdown == True):
if (battstate == 0 and batteryshutdowndetectedflag == 0): #If the low battery state has changed from ok to low battery for the first time
batteryshutdowndetectedflag = 1 #Flip the flag to true so we don't keep hearing about the low battery
batteryshutdownstarttime = time.time() #Get the time when the countdown started
print('Low Battery State Found, Starting Timer')
if (batteryshutdowndetectedflag == 1): #The next time it loops through it will come here because the flag has been changed to true
batterytimesinceshutdownstarted = time.time() - batteryshutdownstarttime #This value will start returning an increasing number of seconds since the shutdown timer started
if (batterytimesinceshutdownstarted >= batteryshutdowntime): #if it exceeds 300 seconds...
call(["sudo","espeak","LowBatteryDetectedForFiveMinutesPleaseShutDownOrAddExternalPower"])
batteryshutdowndetectedflag = 0 #reset the 300 second timer, this means if nothing changes the user is reminded every five minutes to shut down
if GPIO.input(25):
#print('Button Released')
if (t3 < 3 and t3 > 0 or keysPressed == ['\r']): #If the button is released in under 3 seconds, execute the command for the currently selected menu and function
print "Detected Button Release in less than 3 seconds"
if bequiet == False:
#Main=["Launch Raspivoice","Launch Teradeep","Toggle Distance Sensor","Toggle Facial Detection", "Settings","acknowledgements","Disclaimer"]
if (MenuLevel == Main and menupos == 0): #1st option in main menu list is launch raspivoice
if (not raspi.running):
if (not camerastarted):
camerastarted = camera.start()
if not camerastarted:
call (["sudo","espeak","No camera detected, not Starting RaspiVoice"])
else:
call (["sudo","espeak","Starting RaspiVoice"])
raspi.start()
else:
call (["sudo","espeak","Starting RaspiVoice"])
raspi.start()
else:
call (["sudo","espeak","Stopping RaspiVoice"])
if (not tera.running or not face.running):
camera.stop()
camerastarted = False
raspi.stop()
if (MenuLevel == Main and menupos == 1):
if (not tera.running):
if (not camerastarted):
camerastarted = camera.start()
if not camerastarted:
call (["sudo","espeak","No camera detected, not Starting Teradeep"])
else:
call (["sudo","espeak","Starting Teradeep"])
tera.start()
else:
call (["sudo","espeak","Starting Teradeep"])
tera.start()
else:
call (["sudo","espeak","Stopping Teradeep"])
if (not raspi.running or not face.running):
camera.stop()
camerastarted = False
tera.stop()
if (MenuLevel == Main and menupos == 2):
if vibration == True:
call (["sudo","espeak","DistanceSensorToggledOff"])
call (["sudo","killall","rangefinder"])
GPIO.output (20,False) #If rangefinder.py exited with the vibrator on, this offs it
vibration = False
else:
if config.ConfigAudibleDistance == True:
call(["sudo","espeak","EnglishDistanceSelectedOtherfeedbackUnavailable"])
else:
call (["sudo","espeak","DistanceSensorToggledOn"])
subprocess.Popen(["sudo","python","/home/pi/rangefinder.py"])
vibration = True
if (MenuLevel == Main and menupos ==3): #Toggle Facial Detection
if (not face.running):
if (not camerastarted):
camerastarted = camera.start()
if not camerastarted:
call (["sudo","espeak","No camera detected, not Starting Facial Detection"])
else:
call (["sudo","espeak","Starting Facial Detection"])
face.start()
else:
call (["sudo","espeak","Starting Facial Detection"])
face.start()
else:
call (["sudo","espeak","Stopping Facial Detection"])
if (not tera.running or not raspi.running):
camera.stop()
camerastarted = False
face.stop()
if (MenuLevel == Main and menupos == 4): #Enter The Settings Menu
MenuLevel = Settings
call (["sudo","espeak","ChangeSettings"])
menupos = 10
if (MenuLevel == Main and menupos == 4):
espeak_process = subprocess.Popen(["espeak", "-f","/home/pi/acknowledgements.txt", "--stdout"], stdout=subprocess.PIPE)
subprocess.Popen(["aplay", "-D", "sysdefault"], stdin=espeak_process.stdout, stdout=subprocess.PIPE)
if (MenuLevel == Main and menupos == 5):
espeak_process = subprocess.Popen(["espeak", "-f","/home/pi/disclaimer.txt", "--stdout"], stdout=subprocess.PIPE)
subprocess.Popen(["aplay", "-D", "sysdefault"], stdin=espeak_process.stdout, stdout=subprocess.PIPE)
#Settings=["Advance Volume","Raspivoice Settings", "Teradeep Settings","Distance Sensor Settings", "Toggle low battery shutdown", "Return to main menu"]
if (MenuLevel == Settings and menupos == 0):
commandlinevolume = int(config.ConfigVolume)
commandlinevolume = commandlinevolume + 10
if commandlinevolume > 100: #Wrap volume back to lowest setting
config.ConfigVolume = "70"
commandlinevolume = 70 #lowest setting
if commandlinevolume == 70:
fakevolume = 10 #lowest setting said as 10%
if commandlinevolume == 80:
fakevolume = 50 #next setting said as 50%
if commandlinevolume == 90:
fakevolume = 75 #next setting said as 75%
if commandlinevolume == 100:
fakevolume = 100 #next setting said as 100%
fakevolume = str(fakevolume)
call (["sudo","espeak","ChangingVolumeTo"])
call (["sudo","espeak",fakevolume])
call (["sudo","espeak","Percent"])
volumearg = config.ConfigVolume + "%"
call (["sudo","amixer","sset","PCM,0",volumearg])
config.ConfigVolume = str(commandlinevolume)
menupos = 0 #keep menu position on advance volume to allow for repeated presses
if (MenuLevel == Settings and menupos == 1):
MenuLevel = RaspivoiceSettings
call (["sudo","espeak","RaspivoiceSettings"])
menupos = 10
if (MenuLevel == Settings and menupos == 2):
MenuLevel = TeradeepSettings
call (["sudo","espeak","TeradeepSettings"])
menupos = 10
if (MenuLevel == Settings and menupos == 3):
MenuLevel = DistanceSensorSettings
call(["sudo","espeak","DistanceSensorSettings"])
menupos = 10
if (MenuLevel == Settings and menupos == 4):
if (externalpowerstate == 1):
call(["sudo","espeak","PleaseLeaveExternalPowerConnectedUntilUpdatesAreComplete"])
call(["sudo","espeak","ShuttingDownProgramsForUpdateProcedure"])
face.stop()
tera.stop()
raspi.stop()
call(["sudo","killall","rangefinder"])
call(["sudo", "espeak", "ProgramsTerminatedDetectingInternetConnection"])
try:
github="https://www.github.com"
data = urllib.urlopen(github) #Check if github.com can be connected to. That is where our files are stored
call(["sudo","espeak","InternetConnectionAvailableAndUpdateServerAvailable"])
inet=1
except:
call(["sudo","espeak","InternetConnectionNotAvailableAndOrUpdateServerIsDown"])
inet=0
if (inet == 1): #If internet is available then sync the local git directory with remote
currentversionstring = "Number" + str(config.ConfigUpdateNumber)
call(["sudo","espeak","CurrentUpdateIs"+currentversionstring])
call(["sudo","espeak","DownloadingAvailableUpdate"])
call(["sudo","/home/pi/./a-update.sh"])
call(["sudo","espeak","Updates Downloaded"])
call(["sudo","espeak","ComparingUpdateNumber"])
NewVersionNumberString = subprocess.Popen(['grep', 'updatenumber', '/home/pi/After-Sight-Model-1/aftersight.cfg'], stdout=subprocess.PIPE).communicate()[0]
NewVersionNumber = map(int, re.findall('\d+',NewVersionNumberString))
NewVersionNumber = int(NewVersionNumber[0]) #this is weird but the regular expression put the item into a list with a size of 1
print "new Update Number is "+str(NewVersionNumber) #that made it so you couldn't compare the new version number to the integer value of the current version number
print "Current Version Number is" + str(config.ConfigUpdateNumber)
call(["sudo","espeak","NewUpdateNumberis"+str(NewVersionNumber)])
if (NewVersionNumber > config.ConfigUpdateNumber):
call(["sudo","espeak","NewUpdateFoundPerformingUpdate"])
call(["sudo","cp","-rf","/home/pi/After-Sight-Model-1/installdeps.sh", "/home/pi/installdeps.sh"])
call(["sudo","espeak","InstallingDependencies"])
call(["sudo","/home/pi/installdeps.sh"])
call(["sudo","DependenciesInstalled"])
call(["sudo","espeak","ReplacingCore"])
os.chdir("/home/pi/After-Sight-Model-1")
call(["sudo","./a-update_core.sh"])
call(["sudo","espeak","RebuildingRaspivoice"])
os.chdir("/home/pi/After-Sight-Model-1")
call(["sudo","./a-update_voice.sh"])
call(["sudo","espeak","RebuildingTeradeep"])
os.chdir("/home/pi/After-Sight-Model-1")
call(["sudo","./a-update_teradeep.sh"])
call(["sudo","espeak","RebuildingFacialDetection"])
os.chdir("/home/pi/After-Sight-Model-1")
call(["sudo","./a-update_facedetect.sh"])
call(["sudo","RunningOneTimeScripts"])
#one time script evaluator NEW UPDATES REQUIRE aftersight.cfg ConfigUpdateNumber to be incremented EVEN IF YOU DON'T MAKE A ONE TIME SCRIPT. Creat the folder for it under /home/pi/After-Sight-Model-1/updatecontrol/updateX and make update.sh script in the numbered folder it doesn't have to do anything. Just put it there
files = os.listdir('/home/pi/After-Sight-Model-1/updatecontrol')
unsortednumberlist = [] #the folder list returned by the os.listdir command are not sorted by number, we will use regular expression to get the numbers in a list, then sort them
for file in files:
VersionNumber = map(int, re.findall('\d+',file))
unsortednumberlist.extend(VersionNumber) #use extend - this makes all elements in one list. append makes a list of lists
print(file)
print unsortednumberlist
unsortednumberlist.sort(key=int) #this sorts the number list
print unsortednumberlist
for number in unsortednumberlist: #now step through the list. If the number is higher than the current version number, execute the script in /home/pi/After-Sight-Model-1/updatecontrol/updateX/update.sh
if number > config.ConfigUpdateNumber:
call(["sudo","espeak","ApplyingUpdateNumber"+str(number)])
executescriptstring = "/home/pi/After-Sight-Model-1/updatecontrol/update" + str(number) +"/update.sh"
call(["sudo","chmod","755",executescriptstring])
call(["sudo",executescriptstring])
else:
print "UpdateNumber"+str(number)+"AlreadyInstalled"
call(["sudo","espeak","UpdateCompletedRebootRequired"])
call(["sudo","shutdown","-r","now"])
else:call(["sudo","espeak","NoNewVersionNoUpdateRequired"])
elif (inet == 0 ):
call(["sudo","espeak","CheckYourInternetConnection"])
menupos = 10
else:
call(["sudo","espeak","ExternalPowerMustBeConnectedForUpdatePlugInAndTryAgain"])
menupos = 10
if (MenuLevel == Settings and menupos == 5):
MenuLevel = Main
call (["sudo","espeak","MainMenu"])
menupos = 10
#RaspivoiceSettings = ["Toggle Playback Speed", "Toggle Blinder","Advance Zoom","Toggle Foveal Mapping","Toggle Raspivoice Autostart", "Return to Main Menu"]
if (MenuLevel == RaspivoiceSettings and menupos == 0):
if config.ConfigRaspivoicePlaybackSpeed == "--total_time_s=1.05":
call (["sudo","espeak","ChangedToFast"])
config.ConfigRaspivoicePlaybackSpeed = "--total_time_s=0.5"
elif config.ConfigRaspivoicePlaybackSpeed == "--total_time_s=0.5":
call (["sudo","espeak","ChangedToSlow"])
config.ConfigRaspivoicePlaybackSpeed = "--total_time_s=2.0"
else:
call (["sudo","espeak","ChangedToNormal"])
config.ConfigRaspivoicePlaybackSpeed = "--total_time_s=1.05"
menupos = 0 #Keep at playback speed setting to allow repeated toggle
raspi.restart() # We want to relaunch raspivoce when we change this, ignored if not running
if (MenuLevel == RaspivoiceSettings and menupos == 1):
if config.ConfigBlinders == "--blinders=0":
call (["sudo","espeak","BlinderEnabled"])
config.ConfigBlinders = "--blinders=50"
else:
call(["sudo","espeak","BlindersDisabled"])
config.ConfigBlinders = "--blinders=0"
raspi.restart()#We must relaunch raspivoice when we change this.
if (MenuLevel == RaspivoiceSettings and menupos == 2):
if config.ConfigZoom == "--zoom=1.0":
call (["sudo","espeak","ZoomChangedTo150Percent"])
config.ConfigZoom = "--zoom=1.5"
elif config.ConfigZoom == "--zoom=1.5":
call (["sudo","espeak","ZoomChangedTo200Percent"])
config.ConfigZoom = "--zoom=2.0"
else:
call (["sudo","espeak","ZoomTurnedOff"])
config.ConfigZoom = "--zoom=1.0"
raspi.restart() #Restart Raspivoice with the new settings
if (MenuLevel == RaspivoiceSettings and menupos == 3):
if config.ConfigFovealmapping == "--foveal_mapping":
call (["sudo","espeak","FovealMappingDisabled"])
config.ConfigFovealmapping = "--verbose" #I had to use something here. leaving it as " " or "" wrecked things
else:
call (["sudo","espeak","FovealMappingEnabled"])
config.ConfigFovealmapping = "--foveal_mapping"
raspi.restart()#Restart Raspivoice with the new settings
if (MenuLevel == RaspivoiceSettings and menupos == 4):
if config.ConfigRaspivoiceContrast == "--contrast=0":
call(["sudo","espeak","ContrastSetToFactorOf1"])
config.ConfigRaspivoiceContrast = "--contrast=1"
elif config.ConfigRaspivoiceContrast == "--contrast=1":
call(["sudo","espeak","ContrastSetToFactor2"])
config.ConfigRaspivoiceContrast = "--contrast=2"
elif config.ConfigRaspivoiceContrast == "--contrast=2":
call(["sudo","espeak","ContrastSetToFactor3"])
config.ConfigRaspivoiceContrast = "--contrast=3"
elif config.ConfigRaspivoiceContrast == "--contrast=3":
call(["sudo","espeak","ContrastSetToFactor0"])
config.ConfigRaspivoiceContrast = "--contrast=0"
raspi.restart() #Restart with new settings
if (MenuLevel == RaspivoiceSettings and menupos == 5):
if config.ConfigRaspivoiceStartup == True:
call (["sudo","espeak","NoLaunchOnStartup"])
config.ConfigRaspivoiceStartup = False
else:
call (["sudo","espeak","RaspivoiceWillAutostart"])
config.ConfigRaspivoiceStartup = True
if (MenuLevel == RaspivoiceSettings and menupos == 6):
MenuLevel = Main
menupos = 10
config.save()
call (["sudo","espeak","Main Menu"])
#TeradeepSettings = ["Next Threshold", "Toggle Teradeep Autostart","Return to Main Menu"]
if (MenuLevel == TeradeepSettings and menupos == 0):
if config.ConfigTeradeepThreshold == "2":
call (["sudo","espeak","Changing To 5%"]) #somewhat stringent
config.ConfigTeradeepThreshold = "5"
elif config.ConfigTeradeepThreshold == "5":
call (["sudo","espeak","ChangingTo10%"])
config.ConfigTeradeepThreshold = "10" #More Stringent
elif config.ConfigTeradeepThreshold == "10":
call (["sudo","espeak","ChangingTo15%"])
config.ConfigTeradeepThreshold = "15" #More Stringent
elif config.ConfigTeradeepThreshold == "15":
call (["sudo","espeak","ChangingTo20%"])
config.ConfigTeradeepThreshold = "20"
else:
call (["sudo","espeak","Changingto2%"])
config.ConfigTeradeepThreshold = "2" #Most stringent
if (MenuLevel == TeradeepSettings and menupos == 1):
if config.ConfigTeradeepStartup == True:
call (["sudo","espeak","NoLaunchOnStartup"])
config.ConfigTeradeepStartup = False
else:
call (["sudo","espeak","TeradeepWillAutostart"])
config.ConfigTeradeepStartup = True
if (MenuLevel == TeradeepSettings and menupos == 2):
config.save()
MenuLevel = Main
call (["sudo","espeak","Main Menu"])
menupos = 10
#DistanceSensorSettings = ["Cycle Feedback Method","Return to Main Menu"]
if (MenuLevel == DistanceSensorSettings and menupos == 0):
if config.ConfigVibrationEnabled == True:
config.ConfigVibrationEnabled = False
config.ConfigVibrateSoundEnabled = True
call (["sudo","espeak","VibrationFeedbackDisabledToneFeedbackEnabled"])
elif config.ConfigVibrateSoundEnabled == True:
config.ConfigVibrateSoundEnabled = False
config.ConfigAudibleSitance = True
call(["sudo","espeak","ToneFeedbackDisabledEnglishFeedbackEnabledLaunchTeradeepToUse"])
else:
config.ConfigAudibleDistance = False
config.ConfigVibrationEnabled = True
call (["sudo","espeak","EnglishFeedbackDisabledVibrationEnabled"])
if (MenuLevel == DistanceSensorSettings and menupos == 1):
config.save()
MenuLevel = Main
call (["sudo","espeak","Main Menu"])
menupos = 10
t1=0 #Reset the timers
t2=0
t3=0
t4=0
elif t1 == 0:
t1 = time.time() #clock value when button pressed in
elif (t1 > 1 and t3 < 3):
t2 = time.time() #clock value at current moment While the button has been pressed in
t3 = t2 - t1 #difference from initial press time to current moment
print ">1<3",t3
elif (t3 > 3 and t3 <4):
print ">3<4",t3
# Although we got rid of killing processes here, we can re-use this area for some function relating to a 3-4 second button press. Perhaps there is some equivalent of a sleep command we can use on the device to save battery power that can use a gpio pin interrupt to wake up from?
t3 = 5.1
t4=5.1
elif (t4 > 4 and t4 < 7):
t2=time.time()
t4 = t2+1 - t1
elif t4 > 7:
print "shutdown",t4
call (["sudo", "shutdown", "-h", "now"])
call (["sudo", "espeak","Shutdown"])
exit
oldexternalpowerstate = externalpowerstate #This captures the current external power state to compare when the loop runs next. critical for knowing when power is plugged in or unplugged
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import shared_page_state
from telemetry import story
class PolymerPage(page_module.Page):
def __init__(self, url, page_set, run_no_page_interactions):
""" Base class for all polymer pages.
Args:
run_no_page_interactions: whether the page will run any interactions after
navigate steps.
"""
super(PolymerPage, self).__init__(
url=url,
shared_page_state_class=shared_page_state.SharedMobilePageState,
page_set=page_set)
self.script_to_evaluate_on_commit = '''
document.addEventListener("polymer-ready", function() {
window.__polymer_ready = true;
});
'''
self._run_no_page_interactions = run_no_page_interactions
def RunPageInteractions(self, action_runner):
# If a polymer page wants to customize its actions, it should
# override the PerformPageInteractions method instead of this method.
if self._run_no_page_interactions:
return
self.PerformPageInteractions(action_runner)
def PerformPageInteractions(self, action_runner):
""" Override this to perform actions after the page has navigated. """
pass
def RunNavigateSteps(self, action_runner):
super(PolymerPage, self).RunNavigateSteps(action_runner)
action_runner.WaitForJavaScriptCondition(
'window.__polymer_ready')
class PolymerCalculatorPage(PolymerPage):
def __init__(self, page_set, run_no_page_interactions):
super(PolymerCalculatorPage, self).__init__(
url=('http://www.polymer-project.org/components/paper-calculator/'
'demo.html'),
page_set=page_set, run_no_page_interactions=run_no_page_interactions)
def PerformPageInteractions(self, action_runner):
self.TapButton(action_runner)
self.SlidePanel(action_runner)
def TapButton(self, action_runner):
with action_runner.CreateInteraction('PolymerAnimation', repeatable=True):
action_runner.TapElement(element_function='''
document.querySelector(
'body /deep/ #outerPanels'
).querySelector(
'#standard'
).shadowRoot.querySelector(
'paper-calculator-key[label="5"]'
)''')
action_runner.Wait(2)
def SlidePanel(self, action_runner):
# only bother with this interaction if the drawer is hidden
opened = action_runner.EvaluateJavaScript('''
(function() {
var outer = document.querySelector("body /deep/ #outerPanels");
return outer.opened || outer.wideMode;
}());''')
if not opened:
with action_runner.CreateInteraction('PolymerAnimation', repeatable=True):
action_runner.SwipeElement(
left_start_ratio=0.1, top_start_ratio=0.2,
direction='left', distance=300, speed_in_pixels_per_second=5000,
element_function='''
document.querySelector(
'body /deep/ #outerPanels'
).querySelector(
'#advanced'
).shadowRoot.querySelector(
'.handle-bar'
)''')
action_runner.WaitForJavaScriptCondition('''
var outer = document.querySelector("body /deep/ #outerPanels");
outer.opened || outer.wideMode;''')
class PolymerShadowPage(PolymerPage):
def __init__(self, page_set, run_no_page_interactions):
super(PolymerShadowPage, self).__init__(
url='http://www.polymer-project.org/components/paper-shadow/demo.html',
page_set=page_set, run_no_page_interactions=run_no_page_interactions)
def PerformPageInteractions(self, action_runner):
with action_runner.CreateInteraction('ScrollAndShadowAnimation'):
action_runner.ExecuteJavaScript(
"document.getElementById('fab').scrollIntoView()")
action_runner.Wait(5)
self.AnimateShadow(action_runner, 'card')
#FIXME(wiltzius) disabling until this issue is fixed:
# https://github.com/Polymer/paper-shadow/issues/12
#self.AnimateShadow(action_runner, 'fab')
def AnimateShadow(self, action_runner, eid):
for i in range(1, 6):
action_runner.ExecuteJavaScript(
'document.getElementById("{0}").z = {1}'.format(eid, i))
action_runner.Wait(1)
class PolymerSampler(PolymerPage):
def __init__(self, page_set, anchor, run_no_page_interactions,
scrolling_page=False):
"""Page exercising interactions with a single Paper Sampler subpage.
Args:
page_set: Page set to inforporate this page into.
anchor: string indicating which subpage to load (matches the element
type that page is displaying)
scrolling_page: Whether scrolling the content pane is relevant to this
content page or not.
"""
super(PolymerSampler, self).__init__(
url=('http://www.polymer-project.org/components/%s/demo.html' % anchor),
page_set=page_set, run_no_page_interactions=run_no_page_interactions)
self.scrolling_page = scrolling_page
self.iframe_js = 'document'
def RunNavigateSteps(self, action_runner):
super(PolymerSampler, self).RunNavigateSteps(action_runner)
waitForLoadJS = """
window.Polymer.whenPolymerReady(function() {
%s.contentWindow.Polymer.whenPolymerReady(function() {
window.__polymer_ready = true;
})
});
""" % self.iframe_js
action_runner.ExecuteJavaScript(waitForLoadJS)
action_runner.WaitForJavaScriptCondition(
'window.__polymer_ready')
def PerformPageInteractions(self, action_runner):
#TODO(wiltzius) Add interactions for input elements and shadow pages
if self.scrolling_page:
# Only bother scrolling the page if its been marked as worthwhile
self.ScrollContentPane(action_runner)
self.TouchEverything(action_runner)
def ScrollContentPane(self, action_runner):
element_function = (self.iframe_js + '.querySelector('
'"core-scroll-header-panel").$.mainContainer')
with action_runner.CreateInteraction('Scroll_Page', repeatable=True):
action_runner.ScrollElement(use_touch=True,
direction='down',
distance='900',
element_function=element_function)
with action_runner.CreateInteraction('Scroll_Page', repeatable=True):
action_runner.ScrollElement(use_touch=True,
direction='up',
distance='900',
element_function=element_function)
def TouchEverything(self, action_runner):
tappable_types = [
'paper-button',
'paper-checkbox',
'paper-fab',
'paper-icon-button',
# crbug.com/394756
# 'paper-radio-button',
'paper-tab',
'paper-toggle-button',
'x-shadow',
]
for tappable_type in tappable_types:
self.DoActionOnWidgetType(action_runner, tappable_type, self.TapWidget)
swipeable_types = ['paper-slider']
for swipeable_type in swipeable_types:
self.DoActionOnWidgetType(action_runner, swipeable_type, self.SwipeWidget)
def DoActionOnWidgetType(self, action_runner, widget_type, action_function):
# Find all widgets of this type, but skip any that are disabled or are
# currently active as they typically don't produce animation frames.
element_list_query = (self.iframe_js +
('.querySelectorAll("body %s:not([disabled]):'
'not([active])")' % widget_type))
roles_count_query = element_list_query + '.length'
for i in range(action_runner.EvaluateJavaScript(roles_count_query)):
element_query = element_list_query + ("[%d]" % i)
if action_runner.EvaluateJavaScript(
element_query + '.offsetParent != null'):
# Only try to tap on visible elements (offsetParent != null)
action_runner.ExecuteJavaScript(element_query + '.scrollIntoView()')
action_runner.Wait(1) # wait for page to settle after scrolling
action_function(action_runner, element_query)
def TapWidget(self, action_runner, element_function):
with action_runner.CreateInteraction('Tap_Widget', repeatable=True):
action_runner.TapElement(element_function=element_function)
action_runner.Wait(1) # wait for e.g. animations on the widget
def SwipeWidget(self, action_runner, element_function):
with action_runner.CreateInteraction('Swipe_Widget'):
action_runner.SwipeElement(element_function=element_function,
left_start_ratio=0.75,
speed_in_pixels_per_second=300)
class PolymerPageSet(story.StorySet):
def __init__(self, run_no_page_interactions=False):
super(PolymerPageSet, self).__init__(
archive_data_file='data/polymer.json',
cloud_storage_bucket=story.PUBLIC_BUCKET)
self.AddStory(PolymerCalculatorPage(self, run_no_page_interactions))
self.AddStory(PolymerShadowPage(self, run_no_page_interactions))
# Polymer Sampler subpages that are interesting to tap / swipe elements on
TAPPABLE_PAGES = [
'paper-button',
'paper-checkbox',
'paper-fab',
'paper-icon-button',
# crbug.com/394756
# 'paper-radio-button',
#FIXME(wiltzius) Disabling x-shadow until this issue is fixed:
# https://github.com/Polymer/paper-shadow/issues/12
#'paper-shadow',
'paper-tabs',
'paper-toggle-button',
]
for p in TAPPABLE_PAGES:
self.AddStory(PolymerSampler(
self, p, run_no_page_interactions=run_no_page_interactions))
# Polymer Sampler subpages that are interesting to scroll
SCROLLABLE_PAGES = [
'core-scroll-header-panel',
]
for p in SCROLLABLE_PAGES:
self.AddStory(PolymerSampler(
self, p, run_no_page_interactions=run_no_page_interactions,
scrolling_page=True))
for page in self:
assert (page.__class__.RunPageInteractions ==
PolymerPage.RunPageInteractions), (
'Pages in this page set must not override PolymerPage\' '
'RunPageInteractions method.')
|
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import re
import redis
import json
import os
from bs4 import BeautifulSoup
from frappe.utils import cint, strip_html_tags
from frappe.model.base_document import get_controller
from six import text_type
def setup_global_search_table():
"""
Creates __global_search table
:return:
"""
frappe.db.create_global_search_table()
def reset():
"""
Deletes all data in __global_search
:return:
"""
frappe.db.sql('DELETE FROM `__global_search`')
def get_doctypes_with_global_search(with_child_tables=True):
"""
Return doctypes with global search fields
:param with_child_tables:
:return:
"""
def _get():
global_search_doctypes = []
filters = {}
if not with_child_tables:
filters = {"istable": ["!=", 1], "issingle": ["!=", 1]}
for d in frappe.get_all('DocType', fields=['name', 'module'], filters=filters):
meta = frappe.get_meta(d.name)
if len(meta.get_global_search_fields()) > 0:
global_search_doctypes.append(d)
installed_apps = frappe.get_installed_apps()
module_app = frappe.local.module_app
doctypes = [
d.name for d in global_search_doctypes
if module_app.get(frappe.scrub(d.module))
and module_app[frappe.scrub(d.module)] in installed_apps
]
return doctypes
return frappe.cache().get_value('doctypes_with_global_search', _get)
def rebuild_for_doctype(doctype):
"""
Rebuild entries of doctype's documents in __global_search on change of
searchable fields
:param doctype: Doctype
"""
if frappe.local.conf.get('disable_global_search'):
return
if frappe.local.conf.get('disable_global_search'):
return
def _get_filters():
filters = frappe._dict({ "docstatus": ["!=", 2] })
if meta.has_field("enabled"):
filters.enabled = 1
if meta.has_field("disabled"):
filters.disabled = 0
return filters
meta = frappe.get_meta(doctype)
if cint(meta.issingle) == 1:
return
if cint(meta.istable) == 1:
parent_doctypes = frappe.get_all("DocField", fields="parent", filters={
"fieldtype": ["in", frappe.model.table_fields],
"options": doctype
})
for p in parent_doctypes:
rebuild_for_doctype(p.parent)
return
# Delete records
delete_global_search_records_for_doctype(doctype)
parent_search_fields = meta.get_global_search_fields()
fieldnames = get_selected_fields(meta, parent_search_fields)
# Get all records from parent doctype table
all_records = frappe.get_all(doctype, fields=fieldnames, filters=_get_filters())
# Children data
all_children, child_search_fields = get_children_data(doctype, meta)
all_contents = []
for doc in all_records:
content = []
for field in parent_search_fields:
value = doc.get(field.fieldname)
if value:
content.append(get_formatted_value(value, field))
# get children data
for child_doctype, records in all_children.get(doc.name, {}).items():
for field in child_search_fields.get(child_doctype):
for r in records:
if r.get(field.fieldname):
content.append(get_formatted_value(r.get(field.fieldname), field))
if content:
# if doctype published in website, push title, route etc.
published = 0
title, route = "", ""
try:
if hasattr(get_controller(doctype), "is_website_published") and meta.allow_guest_to_view:
d = frappe.get_doc(doctype, doc.name)
published = 1 if d.is_website_published() else 0
title = d.get_title()
route = d.get("route")
except ImportError:
# some doctypes has been deleted via future patch, hence controller does not exists
pass
all_contents.append({
"doctype": frappe.db.escape(doctype),
"name": frappe.db.escape(doc.name),
"content": frappe.db.escape(' ||| '.join(content or '')),
"published": published,
"title": frappe.db.escape((title or '')[:int(frappe.db.VARCHAR_LEN)]),
"route": frappe.db.escape((route or '')[:int(frappe.db.VARCHAR_LEN)])
})
if all_contents:
insert_values_for_multiple_docs(all_contents)
def delete_global_search_records_for_doctype(doctype):
frappe.db.sql('''DELETE
FROM `__global_search`
WHERE doctype = %s''', doctype, as_dict=True)
def get_selected_fields(meta, global_search_fields):
fieldnames = [df.fieldname for df in global_search_fields]
if meta.istable==1:
fieldnames.append("parent")
elif "name" not in fieldnames:
fieldnames.append("name")
if meta.has_field("is_website_published"):
fieldnames.append("is_website_published")
return fieldnames
def get_children_data(doctype, meta):
"""
Get all records from all the child tables of a doctype
all_children = {
"parent1": {
"child_doctype1": [
{
"field1": val1,
"field2": val2
}
]
}
}
"""
all_children = frappe._dict()
child_search_fields = frappe._dict()
for child in meta.get_table_fields():
child_meta = frappe.get_meta(child.options)
search_fields = child_meta.get_global_search_fields()
if search_fields:
child_search_fields.setdefault(child.options, search_fields)
child_fieldnames = get_selected_fields(child_meta, search_fields)
child_records = frappe.get_all(child.options, fields=child_fieldnames, filters={
"docstatus": ["!=", 1],
"parenttype": doctype
})
for record in child_records:
all_children.setdefault(record.parent, frappe._dict())\
.setdefault(child.options, []).append(record)
return all_children, child_search_fields
def insert_values_for_multiple_docs(all_contents):
values = []
for content in all_contents:
values.append("({doctype}, {name}, {content}, {published}, {title}, {route})"
.format(**content))
batch_size = 50000
for i in range(0, len(values), batch_size):
batch_values = values[i:i + batch_size]
# ignoring duplicate keys for doctype_name
frappe.db.multisql({
'mariadb': '''INSERT IGNORE INTO `__global_search`
(doctype, name, content, published, title, route)
VALUES {0} '''.format(", ".join(batch_values)),
'postgres': '''INSERT INTO `__global_search`
(doctype, name, content, published, title, route)
VALUES {0}
ON CONFLICT("name", "doctype") DO NOTHING'''.format(", ".join(batch_values))
})
def update_global_search(doc):
"""
Add values marked with `in_global_search` to
`global_search_queue` from given doc
:param doc: Document to be added to global search
"""
if frappe.local.conf.get('disable_global_search'):
return
if frappe.local.conf.get('disable_global_search'):
return
if doc.docstatus > 1 or (doc.meta.has_field("enabled") and not doc.get("enabled")) \
or doc.get("disabled"):
return
content = []
for field in doc.meta.get_global_search_fields():
if doc.get(field.fieldname) and field.fieldtype not in frappe.model.table_fields:
content.append(get_formatted_value(doc.get(field.fieldname), field))
# Get children
for child in doc.meta.get_table_fields():
for d in doc.get(child.fieldname):
if d.parent == doc.name:
for field in d.meta.get_global_search_fields():
if d.get(field.fieldname):
content.append(get_formatted_value(d.get(field.fieldname), field))
if content:
published = 0
if hasattr(doc, 'is_website_published') and doc.meta.allow_guest_to_view:
published = 1 if doc.is_website_published() else 0
title = (doc.get_title() or '')[:int(frappe.db.VARCHAR_LEN)]
route = doc.get('route') if doc else ''
value = dict(
doctype=doc.doctype,
name=doc.name,
content=' ||| '.join(content or ''),
published=published,
title=title,
route=route
)
sync_value_in_queue(value)
def update_global_search_for_all_web_pages():
if frappe.conf.get('disable_global_search'):
return
print('Update global search for all web pages...')
routes_to_index = get_routes_to_index()
for route in routes_to_index:
add_route_to_global_search(route)
sync_global_search()
def get_routes_to_index():
apps = frappe.get_installed_apps()
routes_to_index = []
for app in apps:
base = frappe.get_app_path(app, 'www')
path_to_index = frappe.get_app_path(app, 'www')
for dirpath, _, filenames in os.walk(path_to_index, topdown=True):
for f in filenames:
if f.endswith(('.md', '.html')):
filepath = os.path.join(dirpath, f)
route = os.path.relpath(filepath, base)
route = route.split('.')[0]
if route.endswith('index'):
route = route.rsplit('index', 1)[0]
routes_to_index.append(route)
return routes_to_index
def add_route_to_global_search(route):
from frappe.website.render import render_page
from frappe.utils import set_request
frappe.set_user('Guest')
frappe.local.no_cache = True
try:
set_request(method='GET', path=route)
content = render_page(route)
soup = BeautifulSoup(content, 'html.parser')
page_content = soup.find(class_='page_content')
text_content = page_content.text if page_content else ''
title = soup.title.text.strip() if soup.title else route
value = dict(
doctype='Static Web Page',
name=route,
content=text_content,
published=1,
title=title,
route=route
)
sync_value_in_queue(value)
except (frappe.PermissionError, frappe.DoesNotExistError, frappe.ValidationError, Exception):
pass
frappe.set_user('Administrator')
def get_formatted_value(value, field):
"""
Prepare field from raw data
:param value:
:param field:
:return:
"""
from six.moves.html_parser import HTMLParser
if getattr(field, 'fieldtype', None) in ["Text", "Text Editor"]:
h = HTMLParser()
value = h.unescape(frappe.safe_decode(value))
value = (re.subn(r'<[\s]*(script|style).*?</\1>(?s)', '', text_type(value))[0])
value = ' '.join(value.split())
return field.label + " : " + strip_html_tags(text_type(value))
def sync_global_search():
"""
Inserts / updates values from `global_search_queue` to __global_search.
This is called via job scheduler
:param flags:
:return:
"""
while frappe.cache().llen('global_search_queue') > 0:
value = json.loads(frappe.cache().lpop('global_search_queue').decode('utf-8'))
sync_value(value)
def sync_value_in_queue(value):
try:
# append to search queue if connected
frappe.cache().lpush('global_search_queue', json.dumps(value))
except redis.exceptions.ConnectionError:
# not connected, sync directly
sync_value(value)
def sync_value(value):
'''
Sync a given document to global search
:param value: dict of { doctype, name, content, published, title, route }
'''
frappe.db.multisql({
'mariadb': '''INSERT INTO `__global_search`
(`doctype`, `name`, `content`, `published`, `title`, `route`)
VALUES (%(doctype)s, %(name)s, %(content)s, %(published)s, %(title)s, %(route)s)
ON DUPLICATE key UPDATE
`content`=%(content)s,
`published`=%(published)s,
`title`=%(title)s,
`route`=%(route)s
''',
'postgres': '''INSERT INTO `__global_search`
(`doctype`, `name`, `content`, `published`, `title`, `route`)
VALUES (%(doctype)s, %(name)s, %(content)s, %(published)s, %(title)s, %(route)s)
ON CONFLICT("doctype", "name") DO UPDATE SET
`content`=%(content)s,
`published`=%(published)s,
`title`=%(title)s,
`route`=%(route)s
'''
}, value)
def delete_for_document(doc):
"""
Delete the __global_search entry of a document that has
been deleted
:param doc: Deleted document
"""
frappe.db.sql('''DELETE
FROM `__global_search`
WHERE doctype = %s
AND name = %s''', (doc.doctype, doc.name), as_dict=True)
@frappe.whitelist()
def search(text, start=0, limit=20, doctype=""):
"""
Search for given text in __global_search
:param text: phrase to be searched
:param start: start results at, default 0
:param limit: number of results to return, default 20
:return: Array of result objects
"""
from frappe.desk.doctype.global_search_settings.global_search_settings import get_doctypes_for_global_search
results = []
sorted_results = []
allowed_doctypes = get_doctypes_for_global_search()
for text in set(text.split('&')):
text = text.strip()
if not text:
continue
conditions = '1=1'
offset = ''
mariadb_text = frappe.db.escape('+' + text + '*')
mariadb_fields = '`doctype`, `name`, `content`, MATCH (`content`) AGAINST ({} IN BOOLEAN MODE) AS rank'.format(mariadb_text)
postgres_fields = '`doctype`, `name`, `content`, TO_TSVECTOR("content") @@ PLAINTO_TSQUERY({}) AS rank'.format(frappe.db.escape(text))
values = {}
if doctype:
conditions = '`doctype` = %(doctype)s'
values['doctype'] = doctype
elif allowed_doctypes:
conditions = '`doctype` IN %(allowed_doctypes)s'
values['allowed_doctypes'] = tuple(allowed_doctypes)
if int(start) > 0:
offset = 'OFFSET {}'.format(start)
common_query = """
SELECT {fields}
FROM `__global_search`
WHERE {conditions}
ORDER BY rank DESC
LIMIT {limit}
{offset}
"""
result = frappe.db.multisql({
'mariadb': common_query.format(fields=mariadb_fields, conditions=conditions, limit=limit, offset=offset),
'postgres': common_query.format(fields=postgres_fields, conditions=conditions, limit=limit, offset=offset)
}, values=values, as_dict=True)
results.extend(result)
# sort results based on allowed_doctype's priority
for doctype in allowed_doctypes:
for index, r in enumerate(results):
if r.doctype == doctype and r.rank > 0.0:
try:
meta = frappe.get_meta(r.doctype)
if meta.image_field:
r.image = frappe.db.get_value(r.doctype, r.name, meta.image_field)
except Exception:
frappe.clear_messages()
sorted_results.extend([r])
return sorted_results
@frappe.whitelist(allow_guest=True)
def web_search(text, scope=None, start=0, limit=20):
"""
Search for given text in __global_search where published = 1
:param text: phrase to be searched
:param scope: search only in this route, for e.g /docs
:param start: start results at, default 0
:param limit: number of results to return, default 20
:return: Array of result objects
"""
results = []
texts = text.split('&')
for text in texts:
common_query = ''' SELECT `doctype`, `name`, `content`, `title`, `route`
FROM `__global_search`
WHERE {conditions}
LIMIT %(limit)s OFFSET %(start)s'''
scope_condition = '`route` like %(scope)s AND ' if scope else ''
published_condition = '`published` = 1 AND '
mariadb_conditions = postgres_conditions = ' '.join([published_condition, scope_condition])
# https://mariadb.com/kb/en/library/full-text-index-overview/#in-boolean-mode
mariadb_conditions += 'MATCH(`content`) AGAINST ({} IN BOOLEAN MODE)'.format(frappe.db.escape('+' + text + '*'))
postgres_conditions += 'TO_TSVECTOR("content") @@ PLAINTO_TSQUERY({})'.format(frappe.db.escape(text))
values = {
"scope": "".join([scope, "%"]) if scope else '',
"limit": limit,
"start": start
}
result = frappe.db.multisql({
'mariadb': common_query.format(conditions=mariadb_conditions),
'postgres': common_query.format(conditions=postgres_conditions)
}, values=values, as_dict=True)
tmp_result = []
for i in result:
if i in results or not results:
tmp_result.append(i)
results += tmp_result
# chart of accounts -> {chart, of, accounts}
# titles that match the most of these words will have high relevance
words = set(get_distinct_words(text))
for r in results:
title_words = set(get_distinct_words(r.title))
words_match = len(words.intersection(title_words))
r.relevance = words_match
results = sorted(results, key=lambda x: x.relevance, reverse=True)
return results
def get_distinct_words(text):
text = text.replace('"', '')
text = text.replace("'", '')
return [w.strip().lower() for w in text.split(' ')]
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
import numpy as np
from mxnet.test_utils import *
from common import assertRaises, with_seed
import shutil
import tempfile
import unittest
from nose.tools import raises
def _get_data(url, dirname):
import os, tarfile
download(url, dirname=dirname, overwrite=False)
fname = os.path.join(dirname, url.split('/')[-1])
tar = tarfile.open(fname)
source_images = [os.path.join(dirname, x.name) for x in tar.getmembers() if x.isfile()]
if len(source_images) < 1 or not os.path.isfile(source_images[0]):
# skip extracting if exists
tar.extractall(path=dirname)
tar.close()
return source_images
def _generate_objects():
num = np.random.randint(1, 10)
xy = np.random.rand(num, 2)
wh = np.random.rand(num, 2) / 2
left = (xy[:, 0] - wh[:, 0])[:, np.newaxis]
right = (xy[:, 0] + wh[:, 0])[:, np.newaxis]
top = (xy[:, 1] - wh[:, 1])[:, np.newaxis]
bot = (xy[:, 1] + wh[:, 1])[:, np.newaxis]
boxes = np.maximum(0., np.minimum(1., np.hstack((left, top, right, bot))))
cid = np.random.randint(0, 20, size=num)
label = np.hstack((cid[:, np.newaxis], boxes)).ravel().tolist()
return [2, 5] + label
class TestImage(unittest.TestCase):
IMAGES_URL = "http://data.mxnet.io/data/test_images.tar.gz"
IMAGES = []
IMAGES_DIR = None
@classmethod
def setupClass(cls):
cls.IMAGES_DIR = tempfile.mkdtemp()
cls.IMAGES = _get_data(cls.IMAGES_URL, cls.IMAGES_DIR)
print("Loaded {} images".format(len(cls.IMAGES)))
@classmethod
def teardownClass(cls):
if cls.IMAGES_DIR:
print("cleanup {}".format(cls.IMAGES_DIR))
shutil.rmtree(cls.IMAGES_DIR)
@raises(mx.base.MXNetError)
def test_imread_not_found(self):
x = mx.img.image.imread("/139810923jadjsajlskd.___adskj/blah.jpg")
def test_imread_vs_imdecode(self):
for img in TestImage.IMAGES:
with open(img, 'rb') as fp:
str_image = fp.read()
image = mx.image.imdecode(str_image, to_rgb=0)
image_read = mx.img.image.imread(img)
same(image.asnumpy(), image_read.asnumpy())
def test_imdecode(self):
try:
import cv2
except ImportError:
return
for img in TestImage.IMAGES:
with open(img, 'rb') as fp:
str_image = fp.read()
image = mx.image.imdecode(str_image, to_rgb=0)
cv_image = cv2.imread(img)
assert_almost_equal(image.asnumpy(), cv_image)
def test_scale_down(self):
assert mx.image.scale_down((640, 480), (720, 120)) == (640, 106)
assert mx.image.scale_down((360, 1000), (480, 500)) == (360, 375)
assert mx.image.scale_down((300, 400), (0, 0)) == (0, 0)
def test_resize_short(self):
try:
import cv2
except ImportError:
return
for img in TestImage.IMAGES:
cv_img = cv2.imread(img)
mx_img = mx.nd.array(cv_img[:, :, (2, 1, 0)])
h, w, _ = cv_img.shape
for _ in range(3):
new_size = np.random.randint(1, 1000)
if h > w:
new_h, new_w = new_size * h // w, new_size
else:
new_h, new_w = new_size, new_size * w // h
for interp in range(0, 2):
# area-based/lanczos don't match with cv2?
cv_resized = cv2.resize(cv_img, (new_w, new_h), interpolation=interp)
mx_resized = mx.image.resize_short(mx_img, new_size, interp)
assert_almost_equal(mx_resized.asnumpy()[:, :, (2, 1, 0)], cv_resized, atol=3)
def test_color_normalize(self):
for _ in range(10):
mean = np.random.rand(3) * 255
std = np.random.rand(3) + 1
width = np.random.randint(100, 500)
height = np.random.randint(100, 500)
src = np.random.rand(height, width, 3) * 255.
mx_result = mx.image.color_normalize(mx.nd.array(src),
mx.nd.array(mean), mx.nd.array(std))
assert_almost_equal(mx_result.asnumpy(), (src - mean) / std, atol=1e-3)
def test_imageiter(self):
im_list = [[np.random.randint(0, 5), x] for x in TestImage.IMAGES]
test_iter = mx.image.ImageIter(2, (3, 224, 224), label_width=1, imglist=im_list,
path_root='')
for _ in range(3):
for batch in test_iter:
pass
test_iter.reset()
# test with list file
fname = './data/test_imageiter.lst'
file_list = ['\t'.join([str(k), str(np.random.randint(0, 5)), x]) \
for k, x in enumerate(TestImage.IMAGES)]
with open(fname, 'w') as f:
for line in file_list:
f.write(line + '\n')
test_iter = mx.image.ImageIter(2, (3, 224, 224), label_width=1, path_imglist=fname,
path_root='')
for batch in test_iter:
pass
@with_seed()
def test_augmenters(self):
# ColorNormalizeAug
mean = np.random.rand(3) * 255
std = np.random.rand(3) + 1
width = np.random.randint(100, 500)
height = np.random.randint(100, 500)
src = np.random.rand(height, width, 3) * 255.
# We test numpy and mxnet NDArray inputs
color_norm_aug = mx.image.ColorNormalizeAug(mean=mx.nd.array(mean), std=std)
out_image = color_norm_aug(mx.nd.array(src))
assert_almost_equal(out_image.asnumpy(), (src - mean) / std, atol=1e-3)
# only test if all augmenters will work
# TODO(Joshua Zhang): verify the augmenter outputs
im_list = [[0, x] for x in TestImage.IMAGES]
test_iter = mx.image.ImageIter(2, (3, 224, 224), label_width=1, imglist=im_list,
resize=640, rand_crop=True, rand_resize=True, rand_mirror=True, mean=True,
std=np.array([1.1, 1.03, 1.05]), brightness=0.1, contrast=0.1, saturation=0.1,
hue=0.1, pca_noise=0.1, rand_gray=0.2, inter_method=10, path_root='', shuffle=True)
for batch in test_iter:
pass
def test_image_detiter(self):
im_list = [_generate_objects() + [x] for x in TestImage.IMAGES]
det_iter = mx.image.ImageDetIter(2, (3, 300, 300), imglist=im_list, path_root='')
for _ in range(3):
for batch in det_iter:
pass
det_iter.reset()
val_iter = mx.image.ImageDetIter(2, (3, 300, 300), imglist=im_list, path_root='')
det_iter = val_iter.sync_label_shape(det_iter)
# test file list
fname = './data/test_imagedetiter.lst'
im_list = [[k] + _generate_objects() + [x] for k, x in enumerate(TestImage.IMAGES)]
with open(fname, 'w') as f:
for line in im_list:
line = '\t'.join([str(k) for k in line])
f.write(line + '\n')
det_iter = mx.image.ImageDetIter(2, (3, 400, 400), path_imglist=fname,
path_root='')
for batch in det_iter:
pass
def test_det_augmenters(self):
# only test if all augmenters will work
# TODO(Joshua Zhang): verify the augmenter outputs
im_list = [_generate_objects() + [x] for x in TestImage.IMAGES]
det_iter = mx.image.ImageDetIter(2, (3, 300, 300), imglist=im_list, path_root='',
resize=640, rand_crop=1, rand_pad=1, rand_gray=0.1, rand_mirror=True, mean=True,
std=np.array([1.1, 1.03, 1.05]), brightness=0.1, contrast=0.1, saturation=0.1,
pca_noise=0.1, hue=0.1, inter_method=10, min_object_covered=0.5,
aspect_ratio_range=(0.2, 5), area_range=(0.1, 4.0), min_eject_coverage=0.5,
max_attempts=50)
for batch in det_iter:
pass
if __name__ == '__main__':
import nose
nose.runmodule()
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import json
import logging
import shutil
import time
import os
import pytest
from gcdt_bundler.bundler import get_zipped_file
from nose.tools import assert_equal, assert_in, assert_not_in
from gcdt import utils
from gcdt.ramuda_core import delete_lambda_deprecated, delete_lambda, \
deploy_lambda, ping, list_functions, _update_lambda_configuration, \
get_metrics, rollback, _get_alias_version, info, invoke
from gcdt.ramuda_wire import _lambda_add_invoke_permission
from gcdt.ramuda_utils import list_lambda_versions, create_sha256, \
get_remote_code_hash
from gcdt_testtools.helpers_aws import create_role_helper, delete_role_helper, \
create_lambda_helper, create_lambda_role_helper, check_preconditions, \
settings_requirements, check_normal_mode
from gcdt_testtools.helpers_aws import temp_bucket, awsclient, \
cleanup_roles # fixtures!
from gcdt_testtools.helpers import cleanup_tempfiles, temp_folder # fixtures!
from gcdt_testtools.helpers import create_tempfile, logcapture # fixtures!
from . import here
log = logging.getLogger(__name__)
def get_size(start_path='.'):
total_size = 0
for dirpath, dirnames, filenames in os.walk(start_path):
for f in filenames:
fp = os.path.join(dirpath, f)
total_size += os.path.getsize(fp)
return total_size
# TODO: move AWS resource helpers to helpers_aws.py
# TODO: if we use this we need to move some of the following code to
# TODO: helpers_was.py!
@pytest.fixture(scope='function') # 'function' or 'module'
def vendored_folder():
# provide a temp folder and cleanup after test
# this also changes into the folder and back to cwd during cleanup
cwd = (os.getcwd())
folder = here('.')
os.chdir(folder) # reuse ./vendored folder => cd tests/
settings_requirements()
yield
# cleanup
os.chdir(cwd) # cd to original folder
# reuse ./vendored folder
@pytest.fixture(scope='function') # 'function' or 'module'
def temp_lambda(awsclient):
# provide a lambda function and cleanup after test suite
temp_string = utils.random_string()
lambda_name = 'jenkins_test_%s' % temp_string
role_name = 'unittest_%s_lambda' % temp_string
# create the function
role_arn = create_lambda_role_helper(awsclient, role_name)
create_lambda_helper(awsclient, lambda_name, role_arn,
# './resources/sample_lambda/handler.py',
here('./resources/sample_lambda/handler.py'),
lambda_handler='handler.handle')
yield lambda_name, role_name, role_arn
# cleanup
delete_lambda_deprecated(awsclient, lambda_name, delete_logs=True)
delete_role_helper(awsclient, role_name)
@pytest.fixture(scope='function') # 'function' or 'module'
def cleanup_lambdas_deprecated(awsclient):
items = []
yield items
# cleanup
for i in items:
delete_lambda_deprecated(awsclient, i, delete_logs=True)
@pytest.fixture(scope='function') # 'function' or 'module'
def cleanup_lambdas(awsclient):
items = []
yield items
# cleanup
for i, events in items:
delete_lambda(awsclient, i, events, delete_logs=True)
@pytest.mark.aws
@check_preconditions
def test_create_lambda(awsclient, vendored_folder, cleanup_lambdas_deprecated,
cleanup_roles):
log.info('running test_create_lambda')
temp_string = utils.random_string()
lambda_name = 'jenkins_test_' + temp_string
log.info(lambda_name)
role = create_role_helper(
awsclient,
'unittest_%s_lambda' % temp_string,
policies=[
'arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole',
'arn:aws:iam::aws:policy/AWSLambdaExecute']
)
cleanup_roles.append(role['RoleName'])
config = {
"lambda": {
"name": "dp-dev-sample-lambda-jobr1",
"description": "lambda test for ramuda",
"role": "'unused'",
"handlerFunction": "handler.handle",
"handlerFile": "./resources/sample_lambda/handler.py",
"timeout": 300,
"memorySize": 256,
"events": {
"s3Sources": [
{
"bucket": "jobr-test",
"type": "s3:ObjectCreated:*",
"suffix": ".gz"
}
],
"timeSchedules": [
{
"ruleName": "infra-dev-sample-lambda-jobr-T1",
"ruleDescription": "run every 5 min from 0-5",
"scheduleExpression": "cron(0/5 0-5 ? * * *)"
},
{
"ruleName": "infra-dev-sample-lambda-jobr-T2",
"ruleDescription": "run every 5 min from 8-23:59",
"scheduleExpression": "cron(0/5 8-23:59 ? * * *)"
}
]
},
"vpc": {
"subnetIds": [
"subnet-d5ffb0b1",
"subnet-d5ffb0b1",
"subnet-d5ffb0b1",
"subnet-e9db9f9f"
],
"securityGroups": [
"sg-660dd700"
]
}
},
"bundling": {
"zip": "bundle.zip",
"folders": [
{
"source": "./vendored",
"target": "."
},
{
"source": "./impl",
"target": "impl"
}
]
},
"deployment": {
"region": "eu-west-1"
}
}
lambda_description = config['lambda'].get('description')
role_arn = role['Arn']
lambda_handler = config['lambda'].get('handlerFunction')
handler_filename = config['lambda'].get('handlerFile')
timeout = int(config['lambda'].get('timeout'))
memory_size = int(config['lambda'].get('memorySize'))
zip_name = config['bundling'].get('zip')
folders_from_file = config['bundling'].get('folders')
subnet_ids = config['lambda'].get('vpc', {}).get('subnetIds', None)
security_groups = config['lambda'].get('vpc', {}).get('securityGroups',
None)
region = config['deployment'].get('region')
artifact_bucket = config['deployment'].get('artifactBucket', None)
zipfile = get_zipped_file(
handler_filename,
folders_from_file,
)
deploy_lambda(
awsclient=awsclient,
function_name=lambda_name,
role=role_arn,
handler_filename=handler_filename,
handler_function=lambda_handler,
folders=folders_from_file,
description=lambda_description,
timeout=timeout,
memory=memory_size,
artifact_bucket=artifact_bucket,
zipfile=zipfile
)
cleanup_lambdas_deprecated.append(lambda_name)
@pytest.mark.aws
@check_preconditions
@pytest.mark.parametrize('runtime', ['nodejs4.3', 'nodejs6.10', 'nodejs8.10'])
def test_create_lambda_nodejs(runtime, awsclient, temp_folder, cleanup_lambdas_deprecated,
cleanup_roles):
log.info('running test_create_lambda_nodejs')
# copy package.json and settings_dev.conf from sample
shutil.copy(
here('./resources/sample_lambda_nodejs/index.js'), temp_folder[0])
shutil.copy(
here('./resources/sample_lambda_nodejs/package.json'), temp_folder[0])
shutil.copy(
here('./resources/sample_lambda_nodejs/settings_dev.conf'),
temp_folder[0])
temp_string = utils.random_string()
lambda_name = 'jenkins_test_' + temp_string
log.info(lambda_name)
role = create_role_helper(
awsclient,
'unittest_%s_lambda' % temp_string,
policies=[
'arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole',
'arn:aws:iam::aws:policy/AWSLambdaExecute']
)
cleanup_roles.append(role['RoleName'])
config = {
"lambda": {
"runtime": runtime, # "nodejs4.3",
"name": "infra-dev-sample-lambda-jobr1",
"description": "lambda test for ramuda",
"role": "'unused'",
"handlerFunction": "index.handler",
"handlerFile": "index.js",
"timeout": 300,
"memorySize": 256,
"events": {
"s3Sources": [
{
"bucket": "jobr-test",
"type": "s3:ObjectCreated:*",
"suffix": ".gz"
}
],
"timeSchedules": [
{
"ruleName": "infra-dev-sample-lambda-jobr-T1",
"ruleDescription": "run every 5 min from 0-5",
"scheduleExpression": "cron(0/5 0-5 ? * * *)"
},
{
"ruleName": "infra-dev-sample-lambda-jobr-T2",
"ruleDescription": "run every 5 min from 8-23:59",
"scheduleExpression": "cron(0/5 8-23:59 ? * * *)"
}
]
},
"vpc": {
"subnetIds": [
"subnet-d5ffb0b1",
"subnet-d5ffb0b1",
"subnet-d5ffb0b1",
"subnet-e9db9f9f"
],
"securityGroups": [
"sg-660dd700"
]
}
},
"bundling": {
"zip": "bundle.zip",
"folders": [
{
"source": "./node_modules",
"target": "node_modules"
}
]
},
"deployment": {
"region": "eu-west-1"
}
}
runtime = config['lambda'].get('runtime')
lambda_description = config['lambda'].get('description')
role_arn = role['Arn']
lambda_handler = config['lambda'].get('handlerFunction')
handler_filename = config['lambda'].get('handlerFile')
timeout = int(config['lambda'].get('timeout'))
memory_size = int(config['lambda'].get('memorySize'))
zip_name = config['bundling'].get('zip')
folders_from_file = config['bundling'].get('folders')
subnet_ids = config['lambda'].get('vpc', {}).get('subnetIds', None)
security_groups = config['lambda'].get('vpc', {}).get('securityGroups',
None)
region = config['deployment'].get('region')
artifact_bucket = config['deployment'].get('artifactBucket', None)
zipfile = get_zipped_file(
handler_filename,
folders_from_file,
runtime=runtime,
)
deploy_lambda(
awsclient=awsclient,
function_name=lambda_name,
role=role_arn,
handler_filename=handler_filename,
handler_function=lambda_handler,
folders=folders_from_file,
description=lambda_description,
timeout=timeout,
memory=memory_size,
artifact_bucket=artifact_bucket,
zipfile=zipfile,
runtime=runtime
)
# TODO improve this (by using a waiter??)
cleanup_lambdas_deprecated.append(lambda_name)
@pytest.mark.aws
@check_preconditions
def test_create_lambda_with_s3(awsclient, vendored_folder, cleanup_lambdas_deprecated,
cleanup_roles):
log.info('running test_create_lambda_with_s3')
account = os.getenv('ACCOUNT')
temp_string = utils.random_string()
lambda_name = 'jenkins_test_' + temp_string
log.info(lambda_name)
role = create_role_helper(
awsclient,
'unittest_%s_lambda' % temp_string,
policies=[
'arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole',
'arn:aws:iam::aws:policy/AWSLambdaExecute']
)
cleanup_roles.append(role['RoleName'])
config = {
"lambda": {
"name": "dp-dev-sample-lambda-jobr1",
"description": "lambda nodejs test for ramuda",
"handlerFunction": "handler.handle",
"handlerFile": "./resources/sample_lambda/handler.py",
"timeout": 300,
"memorySize": 256,
"events": {
"s3Sources": [
{
"bucket": "jobr-test",
"type": "s3:ObjectCreated:*",
"suffix": ".gz"
}
],
"timeSchedules": [
{
"ruleName": "infra-dev-sample-lambda-jobr-T1",
"ruleDescription": "run every 5 min from 0-5",
"scheduleExpression": "cron(0/5 0-5 ? * * *)"
},
{
"ruleName": "infra-dev-sample-lambda-jobr-T2",
"ruleDescription": "run every 5 min from 8-23:59",
"scheduleExpression": "cron(0/5 8-23:59 ? * * *)"
}
]
},
"vpc": {
"subnetIds": [
"subnet-d5ffb0b1",
"subnet-d5ffb0b1",
"subnet-d5ffb0b1",
"subnet-e9db9f9f"
],
"securityGroups": [
"sg-660dd700"
]
}
},
"bundling": {
"zip": "bundle.zip",
"folders": [
{
"source": "./vendored",
"target": "."
},
{
"source": "./impl",
"target": "impl"
}
]
},
"deployment": {
"region": "eu-west-1",
"artifactBucket": "7finity-%s-dev-deployment" % account
}
}
lambda_description = config['lambda'].get('description')
role_arn = role['Arn']
lambda_handler = config['lambda'].get('handlerFunction')
handler_filename = config['lambda'].get('handlerFile')
timeout = int(config['lambda'].get('timeout'))
memory_size = int(config['lambda'].get('memorySize'))
zip_name = config['bundling'].get('zip')
folders_from_file = config['bundling'].get('folders')
subnet_ids = config['lambda'].get('vpc', {}).get('subnetIds', None)
security_groups = config['lambda'].get('vpc', {}).get('securityGroups',
None)
region = config['deployment'].get('region')
artifact_bucket = config['deployment'].get('artifactBucket', None)
zipfile = get_zipped_file(
handler_filename,
folders_from_file,
)
deploy_lambda(
awsclient=awsclient,
function_name=lambda_name,
role=role_arn,
handler_filename=handler_filename,
handler_function=lambda_handler,
folders=folders_from_file,
description=lambda_description,
timeout=timeout,
memory=memory_size,
artifact_bucket=artifact_bucket,
zipfile=zipfile
)
cleanup_lambdas_deprecated.append(lambda_name)
@pytest.mark.aws
@check_preconditions
def test_update_lambda(awsclient, vendored_folder, cleanup_lambdas_deprecated,
cleanup_roles):
log.info('running test_update_lambda')
temp_string = utils.random_string()
lambda_name = 'jenkins_test_%s' % temp_string
role_name = 'unittest_%s_lambda' % temp_string
# create the function
role_arn = create_lambda_role_helper(awsclient, role_name)
cleanup_roles.append(role_name)
create_lambda_helper(awsclient, lambda_name, role_arn,
'./resources/sample_lambda/handler.py')
# update the function
create_lambda_helper(awsclient, lambda_name, role_arn,
'./resources/sample_lambda/handler_v2.py')
cleanup_lambdas_deprecated.append(lambda_name)
@pytest.mark.aws
@check_preconditions
def test_lambda_add_invoke_permission(awsclient, vendored_folder,
temp_bucket, cleanup_lambdas_deprecated,
cleanup_roles):
log.info('running test_lambda_add_invoke_permission')
temp_string = utils.random_string()
lambda_name = 'jenkins_test_%s' % temp_string
role_name = 'unittest_%s_lambda' % temp_string
role_arn = create_lambda_role_helper(awsclient, role_name)
cleanup_roles.append(role_name)
create_lambda_helper(awsclient, lambda_name, role_arn,
'./resources/sample_lambda/handler_counter.py',
lambda_handler='handler_counter.handle')
cleanup_lambdas_deprecated.append(lambda_name)
bucket_name = temp_bucket
s3_arn = 'arn:aws:s3:::' + bucket_name
response = _lambda_add_invoke_permission(
awsclient, lambda_name, 's3.amazonaws.com', s3_arn)
# {"Statement":"{\"Condition\":{\"ArnLike\":{\"AWS:SourceArn\":\"arn:aws:s3:::unittest-lambda-s3-bucket-coedce\"}},\"Action\":[\"lambda:InvokeFunction\"],\"Resource\":\"arn:aws:lambda:eu-west-1:188084614522:function:jenkins_test_coedce:ACTIVE\",\"Effect\":\"Allow\",\"Principal\":{\"Service\":\"s3.amazonaws.com\"},\"Sid\":\"07c77fac-68ff-11e6-97f8-c4850848610b\"}"}
assert_not_in('Error', response)
assert_in('lambda:InvokeFunction', response['Statement'])
# TODO add more asserts!!
@pytest.mark.aws
@check_preconditions
def test_list_functions(awsclient, vendored_folder, temp_lambda, logcapture):
logcapture.level = logging.INFO
log.info('running test_list_functions')
list_functions(awsclient)
records = list(logcapture.actual())
assert records[0][1] == 'INFO'
assert records[0][2] == 'running test_list_functions'
assert records[2][1] == 'INFO'
assert records[2][2].startswith('\tMemory')
assert records[3][1] == 'INFO'
assert records[3][2].startswith('\tTimeout')
assert records[5][1] == 'INFO'
assert records[5][2] == '\tCurrent Version: $LATEST'
@pytest.mark.aws
@check_preconditions
def test_update_lambda_configuration(awsclient, vendored_folder, temp_lambda):
log.info('running test_update_lambda_configuration')
lambda_name = temp_lambda[0]
role_arn = temp_lambda[2]
handler_function = './resources/sample_lambda/handler_counter.py'
lambda_description = 'lambda created for unittesting ramuda deployment'
timeout = 300
memory_size = 256
function_version = _update_lambda_configuration(awsclient, lambda_name,
role_arn, handler_function,
lambda_description, timeout,
memory_size)
assert_equal(function_version, '$LATEST')
@pytest.mark.aws
@check_preconditions
def test_get_metrics(awsclient, vendored_folder, temp_lambda, logcapture):
logcapture.level = logging.INFO
log.info('running test_get_metrics')
get_metrics(awsclient, temp_lambda[0])
logcapture.check(
('tests.test_ramuda_aws', 'INFO', u'running test_get_metrics'),
('gcdt.ramuda_core', 'INFO', u'\tDuration 0'),
('gcdt.ramuda_core', 'INFO', u'\tErrors 0'),
('gcdt.ramuda_core', 'INFO', u'\tInvocations 1'),
('gcdt.ramuda_core', 'INFO', u'\tThrottles 0')
)
@pytest.mark.aws
@check_preconditions
def test_rollback(awsclient, vendored_folder, temp_lambda):
log.info('running test_rollback')
lambda_name = temp_lambda[0]
role_arn = temp_lambda[2]
alias_version = _get_alias_version(awsclient, lambda_name, 'ACTIVE')
assert_equal(alias_version, '1')
# update the function
create_lambda_helper(awsclient, lambda_name, role_arn,
'./resources/sample_lambda/handler_v2.py')
# now we use function_version 2!
alias_version = _get_alias_version(awsclient, lambda_name, 'ACTIVE')
assert_equal(alias_version, '$LATEST')
exit_code = rollback(awsclient, lambda_name, alias_name='ACTIVE')
assert_equal(exit_code, 0)
# we rolled back to function_version 1
alias_version = _get_alias_version(awsclient, lambda_name, 'ACTIVE')
assert_equal(alias_version, '1')
# try to rollback when previous version does not exist
exit_code = rollback(awsclient, lambda_name, alias_name='ACTIVE')
assert_equal(exit_code, 1)
# version did not change
alias_version = _get_alias_version(awsclient, lambda_name, 'ACTIVE')
assert_equal(alias_version, '1')
# roll back to the latest version
exit_code = rollback(awsclient, lambda_name, alias_name='ACTIVE',
version='$LATEST')
assert_equal(exit_code, 0)
# latest version of lambda is used
alias_version = _get_alias_version(awsclient, lambda_name, 'ACTIVE')
assert_equal(alias_version, '$LATEST')
# TODO: create more versions >5
# TODO: do multiple rollbacks >5
# TODO: verify version + active after rollback
# TODO: verify invocations meet the right lamda_function version
# here we have the test for ramuda_utils.list_lambda_versions
response = list_lambda_versions(awsclient, lambda_name)
assert_equal(response['Versions'][0]['Version'], '$LATEST')
assert_equal(response['Versions'][1]['Version'], '1')
assert_equal(response['Versions'][2]['Version'], '2')
@pytest.mark.aws
@check_preconditions
@check_normal_mode
def test_get_remote_code_hash(awsclient, vendored_folder, temp_lambda):
# this works only with the '--keep' option so timestamps of installed
# dependencies are unchanged. Consequently in this situation the bundle
# hashcode is the same.
# NOTE: this only makes sense in 'normal' placebo mode (not with playback!)
log.info('running test_get_remote_code_hash')
handler_filename = './resources/sample_lambda/handler.py'
folders_from_file = [
{'source': './vendored', 'target': '.'},
{'source': './resources/sample_lambda/impl', 'target': 'impl'}
]
# now run the update using '--keep' option to get a similar hash
# get local hash
zipfile = get_zipped_file(handler_filename, folders_from_file, keep=True)
expected_hash = create_sha256(zipfile)
lambda_name = temp_lambda[0]
time.sleep(10)
remote_hash = get_remote_code_hash(awsclient, lambda_name)
assert remote_hash == expected_hash
@pytest.mark.aws
@check_preconditions
def test_ping(awsclient, vendored_folder, temp_lambda):
log.info('running test_ping')
lambda_name = temp_lambda[0]
role_arn = temp_lambda[2]
# test the ping
response = ping(awsclient, lambda_name)
assert response == '"alive"'
# update the function
create_lambda_helper(awsclient, lambda_name, role_arn,
'./resources/sample_lambda/handler_no_ping.py',
lambda_handler='handler_no_ping.handle')
# test has no ping
response = ping(awsclient, lambda_name)
assert response == '{"ramuda_action": "ping"}'
@pytest.mark.aws
@check_preconditions
def test_invoke_outfile(awsclient, vendored_folder, temp_lambda):
log.info('running test_invoke')
lambda_name = temp_lambda[0]
role_arn = temp_lambda[2]
outfile = create_tempfile('')
payload = '{"ramuda_action": "ping"}' # default to ping event
response = invoke(awsclient, lambda_name, payload=payload, outfile=outfile)
with open(outfile, 'r') as ofile:
assert ofile.read() == '"alive"'
# cleanup
os.unlink(outfile)
@pytest.mark.aws
@check_preconditions
def test_invoke_payload_from_file(awsclient, vendored_folder, temp_lambda):
log.info('running test_invoke')
lambda_name = temp_lambda[0]
role_arn = temp_lambda[2]
payload_file = create_tempfile('{"ramuda_action": "ping"}')
response = invoke(awsclient, lambda_name,
payload='file://%s' % payload_file)
assert response == '"alive"'
# cleanup
os.unlink(payload_file)
@pytest.mark.aws
@check_preconditions
def test_info(awsclient, vendored_folder, temp_lambda, logcapture):
logcapture.level = logging.INFO
function_name = temp_lambda[0]
info(awsclient, function_name)
#out, err = capsys.readouterr()
#assert '### PERMISSIONS ###' in out
#assert '### EVENT SOURCES ###' in out
# TODO
#logcapture.check(
# ('gcdt.ramuda_core', 'INFO', '\n### PERMISSIONS ###\n'),
# ('gcdt.ramuda_core', 'INFO', '\n### EVENT SOURCES ###\n')
#)
@pytest.mark.aws
@check_preconditions
def test_sample_lambda_nodejs_with_env(awsclient, vendored_folder,
cleanup_lambdas_deprecated, cleanup_roles):
log.info('running test_sample_lambda_nodejs_with_env')
lambda_folder = './resources/sample_lambda_nodejs_with_env/'
temp_string = utils.random_string()
lambda_name = 'jenkins_test_sample-lambda-nodejs6_10_' + temp_string
role_name = 'unittest_%s_lambda' % temp_string
role_arn = create_lambda_role_helper(awsclient, role_name)
# create the function
create_lambda_helper(awsclient, lambda_name, role_arn,
here(lambda_folder + 'index.js'),
lambda_handler='index.handler',
folders_from_file=[],
runtime='nodejs6.10',
environment={"MYVALUE": "FOO"}
)
cleanup_roles.append(role_name)
cleanup_lambdas_deprecated.append(lambda_name)
payload = '{"ramuda_action": "getenv"}' # provided by our test sample
result = invoke(awsclient, lambda_name, payload)
env = json.loads(result)
assert 'MYVALUE' in env
assert env['MYVALUE'] == 'FOO'
# TODO test_info with s3 and timed event sources
# TODO
# _ensure_cloudwatch_event
# wire
# _get_lambda_policies
# use create_lambda_helper to simplify above testcases
|
|
import logging
import time
import threading
from coapthon import defines
__author__ = 'Giacomo Tanganelli'
logger = logging.getLogger(__name__)
class ObserveItem(object):
def __init__(self, timestamp, non_counter, allowed, transaction, serv=None):
"""
Data structure for the Observe option
:param timestamp: the timestamop of last message sent
:param non_counter: the number of NON notification sent
:param allowed: if the client is allowed as observer
:param transaction: the transaction
:param serv: reference to CoAP object
"""
self.timestamp = timestamp
self.non_counter = non_counter
self.allowed = allowed
self.transaction = transaction
# parameters for dynamic resource observing
self.conditional = False
self.conditions = {}
self.last_notify = time.time()
self.timer = None
self.coap = serv
# timer for notification procedure is set at (pmax - pmin)/2
def pmax_timer(self):
self.coap.notify(self.transaction.resource)
def start_timer(self):
pmin = 0
pmax = 0
for cond in self.conditions:
if cond == "pmin":
pmin = self.conditions[cond]
elif cond == "pmax":
pmax = self.conditions[cond]
if pmax == 0:
return
else:
self.timer = threading.Timer((pmax-pmin)/2, self.pmax_timer)
self.timer.start()
class ObserveLayer(object):
"""
Manage the observing feature. It store observing relationships.
"""
def __init__(self, server=None):
self._relations = {}
self._server = server
def send_request(self, request):
"""
Add itself to the observing list
:param request: the request
:return: the request unmodified
"""
if request.observe == 0:
# Observe request
host, port = request.destination
key_token = hash(str(host) + str(port) + str(request.token))
self._relations[key_token] = ObserveItem(time.time(), None, True, None)
if request.observe == 1:
# Cancelling observe explicitly
self.remove_subscriber(request)
return request
def receive_response(self, transaction):
"""
Sets notification's parameters.
:type transaction: Transaction
:param transaction: the transaction
:rtype : Transaction
:return: the modified transaction
"""
host, port = transaction.response.source
key_token = hash(str(host) + str(port) + str(transaction.response.token))
if key_token in self._relations and transaction.response.type == defines.Types["CON"]:
transaction.notification = True
return transaction
def send_empty(self, message):
"""
Eventually remove from the observer list in case of a RST message.
:type message: Message
:param message: the message
:return: the message unmodified
"""
host, port = message.destination
key_token = hash(str(host) + str(port) + str(message.token))
if key_token in self._relations and message.type == defines.Types["RST"]:
del self._relations[key_token]
return message
def receive_request(self, transaction):
"""
Manage the observe option in the request end eventually initialize the client for adding to
the list of observers or remove from the list.
:type transaction: Transaction
:param transaction: the transaction that owns the request
:rtype : Transaction
:return: the modified transaction
"""
if transaction.request.observe == 0:
# Observe request
host, port = transaction.request.source
key_token = hash(str(host) + str(port) + str(transaction.request.token))
non_counter = 0
if key_token in self._relations:
# Renew registration
allowed = True
else:
allowed = False
self._relations[key_token] = ObserveItem(time.time(), non_counter, allowed, transaction, self._server)
# check if the observing request has dynamic parameters (sent inside uri_query field)
if transaction.request.uri_query is not None:
logger.info("Dynamic Observing registration")
self._relations[key_token].conditional = True
self._relations[key_token].conditions = ObserveLayer.parse_uri_query(transaction.request.uri_query)
self._relations[key_token].start_timer()
elif transaction.request.observe == 1:
host, port = transaction.request.source
key_token = hash(str(host) + str(port) + str(transaction.request.token))
logger.info("Remove Subscriber")
try:
del self._relations[key_token]
except KeyError:
pass
return transaction
def receive_empty(self, empty, transaction):
"""
Manage the observe feature to remove a client in case of a RST message receveide in reply to a notification.
:type empty: Message
:param empty: the received message
:type transaction: Transaction
:param transaction: the transaction that owns the notification message
:rtype : Transaction
:return: the modified transaction
"""
if empty.type == defines.Types["RST"]:
host, port = transaction.request.source
key_token = hash(str(host) + str(port) + str(transaction.request.token))
logger.info("Remove Subscriber")
try:
del self._relations[key_token]
except KeyError:
pass
transaction.completed = True
return transaction
def send_response(self, transaction):
"""
Finalize to add the client to the list of observer.
:type transaction: Transaction
:param transaction: the transaction that owns the response
:return: the transaction unmodified
"""
host, port = transaction.request.source
key_token = hash(str(host) + str(port) + str(transaction.request.token))
if key_token in self._relations:
if transaction.response.code == defines.Codes.CONTENT.number:
if transaction.resource is not None and transaction.resource.observable:
transaction.response.observe = transaction.resource.observe_count
self._relations[key_token].allowed = True
self._relations[key_token].transaction = transaction
self._relations[key_token].timestamp = time.time()
else:
del self._relations[key_token]
elif transaction.response.code >= defines.Codes.ERROR_LOWER_BOUND:
del self._relations[key_token]
return transaction
def notify(self, resource, root=None):
"""
Prepare notification for the resource to all interested observers.
:rtype: list
:param resource: the resource for which send a new notification
:param root: deprecated
:return: the list of transactions to be notified
"""
ret = []
if root is not None:
resource_list = root.with_prefix_resource(resource.path)
else:
resource_list = [resource]
for key in self._relations.keys():
if self._relations[key].transaction.resource in resource_list:
# checking dynamic resource parameters
if self._relations[key].conditional:
if self.verify_conditions(self._relations[key]) is False:
continue
# updating relation timestamp and resetting timer
self._relations[key].last_notify = time.time()
self._relations[key].timer.cancel()
self._relations[key].start_timer()
if self._relations[key].non_counter > defines.MAX_NON_NOTIFICATIONS \
or self._relations[key].transaction.request.type == defines.Types["CON"]:
self._relations[key].transaction.response.type = defines.Types["CON"]
self._relations[key].non_counter = 0
elif self._relations[key].transaction.request.type == defines.Types["NON"]:
self._relations[key].non_counter += 1
self._relations[key].transaction.response.type = defines.Types["NON"]
self._relations[key].transaction.resource = resource
del self._relations[key].transaction.response.mid
del self._relations[key].transaction.response.token
ret.append(self._relations[key].transaction)
return ret
def remove_subscriber(self, message):
"""
Remove a subscriber based on token.
:param message: the message
"""
logger.debug("Remove Subscriber")
host, port = message.destination
key_token = hash(str(host) + str(port) + str(message.token))
try:
self._relations[key_token].transaction.completed = True
del self._relations[key_token]
except AttributeError:
logger.warning("No Transaction")
except KeyError:
logger.warning("No Subscriber")
@staticmethod
def parse_uri_query(uri_query):
"""
parse the conditional parameters for the conditional observing
:return: a map with pairs [parameter, value]
"""
dict_att = {}
print(uri_query)
attributes = uri_query.split(";")
for att in attributes:
a = att.split("=")
if len(a) > 1:
if str(a[0]) == "band":
a[1] = bool(a[1])
if a[1].isdigit():
a[1] = int(a[1])
dict_att[str(a[0])] = a[1]
else:
dict_att[str(a[0])] = a[0]
print (dict_att)
return dict_att
@staticmethod
def verify_conditions(item):
"""
checks if the changed resource requires a notification
:param item: ObserveItem
:return: Boolean
"""
for cond in item.conditions:
if cond == "pmin":
# CURRENT TIME - TIMESTAMP < PMIN
t = int(time.time() - item.last_notify)
if t < int(item.conditions[cond]):
return False
return True
|
|
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The barbican handlers class
# bare functions are provided to the reactive handlers to perform the functions
# needed on the class.
from __future__ import absolute_import
import subprocess
import charmhelpers.core.hookenv as hookenv
import charms_openstack.charm
import charms_openstack.adapters
import charms_openstack.ip as os_ip
PACKAGES = ['barbican-common', 'barbican-api', 'barbican-worker',
'python-mysqldb']
BARBICAN_DIR = '/etc/barbican/'
BARBICAN_CONF = BARBICAN_DIR + "barbican.conf"
BARBICAN_API_PASTE_CONF = BARBICAN_DIR + "barbican-api-paste.ini"
BARBICAN_WSGI_CONF = '/etc/apache2/conf-available/barbican-api.conf'
OPENSTACK_RELEASE_KEY = 'barbican-charm.openstack-release-version'
# select the default release function
charms_openstack.charm.use_defaults('charm.default-select-release')
###
# Implementation of the Barbican Charm classes
# Add some properties to the configuration for templates/code to use with the
# charm instance. The config_validator is called when the configuration is
# loaded, and the properties are to add those names to the config object.
@charms_openstack.adapters.config_property
def validate_keystone_api_version(config):
if config.keystone_api_version not in ['2', '3', 'none']:
raise ValueError(
"Unsupported keystone-api-version ({}). It should be 2 or 3"
.format(config.keystone_api_version))
@charms_openstack.adapters.config_property
def barbican_api_keystone_pipeline(config):
if config.keystone_api_version == "2":
return 'cors keystone_authtoken context apiapp'
else:
return 'cors keystone_v3_authtoken context apiapp'
@charms_openstack.adapters.config_property
def barbican_api_pipeline(config):
return {
"2": "cors keystone_authtoken context apiapp",
"3": "cors keystone_v3_authtoken context apiapp",
"none": "cors unauthenticated-context apiapp"
}[config.keystone_api_version]
@charms_openstack.adapters.config_property
def barbican_api_keystone_audit_pipeline(config):
if config.keystone_api_version == "2":
return 'keystone_authtoken context audit apiapp'
else:
return 'keystone_v3_authtoken context audit apiapp'
# Adapt the barbican-hsm-plugin relation for use in rendering the config
# for Barbican. Note that the HSM relation is optional, so we have a class
# variable 'exists' that we can test in the template to see if we should
# render HSM parameters into the template.
@charms_openstack.adapters.adapter_property('hsm')
def library_path(hsm):
"""Provide a library_path property to the template if it exists"""
try:
return hsm.relation.plugin_data['library_path']
except:
return ''
@charms_openstack.adapters.adapter_property('hsm')
def login(hsm):
"""Provide a login property to the template if it exists"""
try:
return hsm.relation.plugin_data['login']
except:
return ''
@charms_openstack.adapters.adapter_property('hsm')
def slot_id(hsm):
"""Provide a slot_id property to the template if it exists"""
try:
return hsm.relation.plugin_data['slot_id']
except:
return ''
class BarbicanCharm(charms_openstack.charm.HAOpenStackCharm):
"""BarbicanCharm provides the specialisation of the OpenStackCharm
functionality to manage a barbican unit.
"""
release = 'mitaka'
name = 'barbican'
packages = PACKAGES
api_ports = {
'barbican-worker': {
os_ip.PUBLIC: 9311,
os_ip.ADMIN: 9312,
os_ip.INTERNAL: 9311,
}
}
service_type = 'barbican'
default_service = 'barbican-worker'
services = ['apache2', 'barbican-worker']
# Note that the hsm interface is optional - defined in config.yaml
required_relations = ['shared-db', 'amqp', 'identity-service']
restart_map = {
BARBICAN_CONF: services,
BARBICAN_API_PASTE_CONF: services,
BARBICAN_WSGI_CONF: services,
}
ha_resources = ['vips', 'haproxy']
def get_amqp_credentials(self):
"""Provide the default amqp username and vhost as a tuple.
:returns (username, host): two strings to send to the amqp provider.
"""
return (self.config['rabbit-user'], self.config['rabbit-vhost'])
def get_database_setup(self):
"""Provide the default database credentials as a list of 3-tuples
returns a structure of:
[
{'database': <database>,
'username': <username>,
'hostname': <hostname of this unit>
'prefix': <the optional prefix for the database>, },
]
:returns [{'database': ...}, ...]: credentials for multiple databases
"""
return [
dict(
database=self.config['database'],
username=self.config['database-user'],
hostname=hookenv.unit_private_ip(), )
]
def action_generate_mkek(self, hsm):
"""Generate an MKEK on a connected HSM. Requires that an HSM is
avaiable via the barbican-hsm-plugin interface, generically known as
'hsm'.
Uses the barbican-manage command.
:param hsm: instance of BarbicanRequires() class from the
barbican-hsm-plugin interface
"""
plugin_data = hsm.plugin_data
cmd = [
'barbican-manage', 'hsm', 'gen_mkek',
'--library-path', plugin_data['library_path'],
'--passphrase', plugin_data['login'],
'--slot-id', plugin_data['slot_id'],
'--length', str(hookenv.config('mkek-key-length')),
'--label', hookenv.config('label-mkek'),
]
try:
subprocess.check_call(cmd)
hookenv.log("barbican-mangage hsm gen_mkek succeeded")
except subprocess.CalledProcessError:
str_err = "barbican-manage hsm gen_mkek failed."
hookenv.log(str_err)
raise Exception(str_err)
def action_generate_hmac(self, hsm):
"""Generate an HMAC on a connected HSM. Requires that an HSM is
avaiable via the barbican-hsm-plugin interface, generically known as
'hsm'.
Uses the barbican-manage command.
:param hsm: instance of BarbicanRequires() class from the
barbican-hsm-plugin interface
"""
plugin_data = hsm.plugin_data
cmd = [
'barbican-manage', 'hsm', 'gen_hmac',
'--library-path', plugin_data['library_path'],
'--passphrase', plugin_data['login'],
'--slot-id', plugin_data['slot_id'],
'--length', str(hookenv.config('hmac-key-length')),
'--label', hookenv.config('label-hmac'),
]
try:
subprocess.check_call(cmd)
hookenv.log("barbican-mangage hsm gen_hmac succeeded")
except subprocess.CalledProcessError:
str_err = "barbican-manage hsm gen_hmac failed."
hookenv.log(str_err)
raise Exception(str_err)
def states_to_check(self, required_relations=None):
"""Override the default states_to_check() for the assess_status
functionality so that, if we have to have an HSM relation, then enforce
it on the assess_status() call.
If param required_relations is not None then it overrides the
instance/class variable self.required_relations.
:param required_relations: [list of state names]
:returns: [states{} as per parent method]
"""
if required_relations is None:
required_relations = self.required_relations
if hookenv.config('require-hsm-plugin'):
required_relations.append('hsm')
return super(BarbicanCharm, self).states_to_check(
required_relations=required_relations)
|
|
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
from unittest.mock import MagicMock
from uuid import uuid1
from polycommon import user_system
from polycommon.events.event import Attribute, Event
from polycommon.events.registry import run
from polycommon.json_utils import loads
class TestEvents(TestCase):
def test_serialize(self):
class DummyEvent(Event):
event_type = "dummy.event"
attributes = (Attribute("attr1"),)
event = DummyEvent(attr1="test")
event_serialized = event.serialize(dumps=False)
assert event_serialized["type"] == "dummy.event"
assert event_serialized["uuid"] is not None
assert event_serialized["timestamp"] is not None
assert event_serialized["data"]["attr1"] == "test"
event_serialized_dump = event.serialize(dumps=True)
assert event_serialized == loads(event_serialized_dump)
def test_serialize_with_instance(self):
instance = MagicMock(instance_id=1)
event = run.RunSucceededEvent.from_instance(
instance=instance, actor_id=1, actor_name="user"
)
event.serialize(dumps=False, include_instance_info=True)
def test_from_event_data(self):
instance = MagicMock(ref_id=None)
event = run.RunSucceededEvent.from_instance(
instance=instance,
actor_id=1,
actor_name="user",
project_id=1,
project_name="project",
project_owner_id=1,
project_owner_name="owner",
)
assert event.ref_id is None
event_serialized = event.serialize(dumps=False, include_instance_info=True)
assert event_serialized.get("ref_id") is None
new_event = run.RunSucceededEvent.from_event_data(event_data=event_serialized)
assert new_event.serialize(include_instance_info=True) == event_serialized
# Add ref id
event.ref_id = uuid1()
event_serialized = event.serialize(dumps=False, include_instance_info=True)
assert event_serialized["ref_id"] == event.ref_id.hex
new_event = run.RunSucceededEvent.from_event_data(event_data=event_serialized)
assert new_event.ref_id == event.ref_id
assert new_event.serialize(include_instance_info=True) == event_serialized
def test_get_value_from_instance(self):
class DummyEvent(Event):
event_type = "dummy.event"
class SimpleObject:
attr1 = "test"
class ComposedObject:
attr2 = SimpleObject()
value = DummyEvent.get_value_from_instance(
attr="attr1", instance=SimpleObject()
)
assert value == "test"
value = DummyEvent.get_value_from_instance(
attr="attr2", instance=SimpleObject()
)
assert value is None
value = DummyEvent.get_value_from_instance(
attr="attr2.attr1", instance=ComposedObject()
)
assert value == "test"
value = DummyEvent.get_value_from_instance(
attr="attr2.attr3", instance=ComposedObject()
)
assert value is None
value = DummyEvent.get_value_from_instance(
attr="attr2.attr1.attr3", instance=ComposedObject()
)
assert value is None
value = DummyEvent.get_value_from_instance(
attr="attr2.attr4.attr3", instance=SimpleObject()
)
assert value is None
def test_from_instance_simple_event(self):
class DummyEvent(Event):
event_type = "dummy.event"
attributes = (Attribute("attr1"),)
class DummyObject:
attr1 = "test"
obj = DummyObject()
event = DummyEvent.from_instance(obj)
event_serialized = event.serialize(dumps=False)
assert event_serialized["type"] == "dummy.event"
assert event_serialized["uuid"] is not None
assert event_serialized["timestamp"] is not None
assert event_serialized["data"]["attr1"] == "test"
def test_from_instance_nested_event(self):
class DummyEvent(Event):
event_type = "dummy.event"
attributes = (
Attribute("attr1"),
Attribute("attr2.attr3"),
Attribute("attr2.attr4", is_required=False),
)
class DummyObject:
class NestedObject:
attr3 = "test2"
attr1 = "test"
attr2 = NestedObject()
obj = DummyObject()
event = DummyEvent.from_instance(obj)
event_serialized = event.serialize(dumps=False)
assert event_serialized["type"] == "dummy.event"
assert event_serialized["uuid"] is not None
assert event_serialized["timestamp"] is not None
assert event_serialized["data"]["attr1"] == "test"
assert event_serialized["data"]["attr2.attr3"] == "test2"
assert event_serialized["data"]["attr2.attr4"] is None
def test_actor(self):
class DummyEvent1(Event):
event_type = "dummy.event"
actor = True
attributes = (Attribute("attr1"),)
class DummyEvent2(Event):
event_type = "dummy.event"
actor = True
actor_id = "some_actor_id"
actor_name = "some_actor_name"
attributes = (Attribute("attr1"),)
class DummyObject1:
attr1 = "test"
class DummyObject2:
attr1 = "test"
some_actor_id = 1
some_actor_name = "foo"
# Not providing actor_id raises
obj = DummyObject1()
with self.assertRaises(ValueError):
DummyEvent1.from_instance(obj)
# Providing actor_id and not actor_name raises
with self.assertRaises(ValueError):
DummyEvent1.from_instance(obj, actor_id=1)
# Providing system actor id without actor_name does not raise
event = DummyEvent1.from_instance(obj, actor_id=user_system.USER_SYSTEM_ID)
assert event.data["actor_id"] == user_system.USER_SYSTEM_ID
assert event.data["actor_name"] == user_system.USER_SYSTEM_NAME
# Providing actor_id and actor_name does not raise
event = DummyEvent1.from_instance(obj, actor_id=1, actor_name="foo")
assert event.data["actor_id"] == 1
assert event.data["actor_name"] == "foo"
# Using an instance that has the actor properties
obj2 = DummyObject2()
event = DummyEvent2.from_instance(obj2)
assert event.data["some_actor_id"] == 1
assert event.data["some_actor_name"] == "foo"
# Using an instance that has the actor properties and overriding the actor
event = DummyEvent2.from_instance(
obj2,
some_actor_id=user_system.USER_SYSTEM_ID,
some_actor_name=user_system.USER_SYSTEM_NAME,
)
assert event.data["some_actor_id"] == user_system.USER_SYSTEM_ID
assert event.data["some_actor_name"] == user_system.USER_SYSTEM_NAME
|
|
#
# The Python Imaging Library
# $Id$
#
# map CSS3-style colour description strings to RGB
#
# History:
# 2002-10-24 fl Added support for CSS-style color strings
# 2002-12-15 fl Added RGBA support
# 2004-03-27 fl Fixed remaining int() problems for Python 1.5.2
# 2004-07-19 fl Fixed gray/grey spelling issues
# 2009-03-05 fl Fixed rounding error in grayscale calculation
#
# Copyright (c) 2002-2004 by Secret Labs AB
# Copyright (c) 2002-2004 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from PIL import Image
import re
def getrgb(color):
"""
Convert a color string to an RGB tuple. If the string cannot be parsed,
this function raises a :py:exc:`ValueError` exception.
.. versionadded:: 1.1.4
:param color: A color string
:return: ``(red, green, blue[, alpha])``
"""
try:
rgb = colormap[color]
except KeyError:
try:
# fall back on case-insensitive lookup
rgb = colormap[color.lower()]
except KeyError:
rgb = None
# found color in cache
if rgb:
if isinstance(rgb, tuple):
return rgb
colormap[color] = rgb = getrgb(rgb)
return rgb
# check for known string formats
m = re.match("#\w\w\w$", color)
if m:
return (
int(color[1]*2, 16),
int(color[2]*2, 16),
int(color[3]*2, 16)
)
m = re.match("#\w\w\w\w\w\w$", color)
if m:
return (
int(color[1:3], 16),
int(color[3:5], 16),
int(color[5:7], 16)
)
m = re.match("rgb\(\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\)$", color)
if m:
return (
int(m.group(1)),
int(m.group(2)),
int(m.group(3))
)
m = re.match("rgb\(\s*(\d+)%\s*,\s*(\d+)%\s*,\s*(\d+)%\s*\)$", color)
if m:
return (
int((int(m.group(1)) * 255) / 100.0 + 0.5),
int((int(m.group(2)) * 255) / 100.0 + 0.5),
int((int(m.group(3)) * 255) / 100.0 + 0.5)
)
m = re.match("hsl\(\s*(\d+)\s*,\s*(\d+)%\s*,\s*(\d+)%\s*\)$", color)
if m:
from colorsys import hls_to_rgb
rgb = hls_to_rgb(
float(m.group(1)) / 360.0,
float(m.group(3)) / 100.0,
float(m.group(2)) / 100.0,
)
return (
int(rgb[0] * 255 + 0.5),
int(rgb[1] * 255 + 0.5),
int(rgb[2] * 255 + 0.5)
)
m = re.match("rgba\(\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\)$", color)
if m:
return (
int(m.group(1)),
int(m.group(2)),
int(m.group(3)),
int(m.group(4))
)
raise ValueError("unknown color specifier: %r" % color)
def getcolor(color, mode):
"""
Same as :py:func:`~PIL.ImageColor.getrgb`, but converts the RGB value to a
greyscale value if the mode is not color or a palette image. If the string
cannot be parsed, this function raises a :py:exc:`ValueError` exception.
.. versionadded:: 1.1.4
:param color: A color string
:return: ``(graylevel [, alpha]) or (red, green, blue[, alpha])``
"""
# same as getrgb, but converts the result to the given mode
color, alpha = getrgb(color), 255
if len(color) == 4:
color, alpha = color[0:3], color[3]
if Image.getmodebase(mode) == "L":
r, g, b = color
color = (r*299 + g*587 + b*114)//1000
if mode[-1] == 'A':
return (color, alpha)
else:
if mode[-1] == 'A':
return color + (alpha,)
return color
colormap = {
# X11 colour table (from "CSS3 module: Color working draft"), with
# gray/grey spelling issues fixed. This is a superset of HTML 4.0
# colour names used in CSS 1.
"aliceblue": "#f0f8ff",
"antiquewhite": "#faebd7",
"aqua": "#00ffff",
"aquamarine": "#7fffd4",
"azure": "#f0ffff",
"beige": "#f5f5dc",
"bisque": "#ffe4c4",
"black": "#000000",
"blanchedalmond": "#ffebcd",
"blue": "#0000ff",
"blueviolet": "#8a2be2",
"brown": "#a52a2a",
"burlywood": "#deb887",
"cadetblue": "#5f9ea0",
"chartreuse": "#7fff00",
"chocolate": "#d2691e",
"coral": "#ff7f50",
"cornflowerblue": "#6495ed",
"cornsilk": "#fff8dc",
"crimson": "#dc143c",
"cyan": "#00ffff",
"darkblue": "#00008b",
"darkcyan": "#008b8b",
"darkgoldenrod": "#b8860b",
"darkgray": "#a9a9a9",
"darkgrey": "#a9a9a9",
"darkgreen": "#006400",
"darkkhaki": "#bdb76b",
"darkmagenta": "#8b008b",
"darkolivegreen": "#556b2f",
"darkorange": "#ff8c00",
"darkorchid": "#9932cc",
"darkred": "#8b0000",
"darksalmon": "#e9967a",
"darkseagreen": "#8fbc8f",
"darkslateblue": "#483d8b",
"darkslategray": "#2f4f4f",
"darkslategrey": "#2f4f4f",
"darkturquoise": "#00ced1",
"darkviolet": "#9400d3",
"deeppink": "#ff1493",
"deepskyblue": "#00bfff",
"dimgray": "#696969",
"dimgrey": "#696969",
"dodgerblue": "#1e90ff",
"firebrick": "#b22222",
"floralwhite": "#fffaf0",
"forestgreen": "#228b22",
"fuchsia": "#ff00ff",
"gainsboro": "#dcdcdc",
"ghostwhite": "#f8f8ff",
"gold": "#ffd700",
"goldenrod": "#daa520",
"gray": "#808080",
"grey": "#808080",
"green": "#008000",
"greenyellow": "#adff2f",
"honeydew": "#f0fff0",
"hotpink": "#ff69b4",
"indianred": "#cd5c5c",
"indigo": "#4b0082",
"ivory": "#fffff0",
"khaki": "#f0e68c",
"lavender": "#e6e6fa",
"lavenderblush": "#fff0f5",
"lawngreen": "#7cfc00",
"lemonchiffon": "#fffacd",
"lightblue": "#add8e6",
"lightcoral": "#f08080",
"lightcyan": "#e0ffff",
"lightgoldenrodyellow": "#fafad2",
"lightgreen": "#90ee90",
"lightgray": "#d3d3d3",
"lightgrey": "#d3d3d3",
"lightpink": "#ffb6c1",
"lightsalmon": "#ffa07a",
"lightseagreen": "#20b2aa",
"lightskyblue": "#87cefa",
"lightslategray": "#778899",
"lightslategrey": "#778899",
"lightsteelblue": "#b0c4de",
"lightyellow": "#ffffe0",
"lime": "#00ff00",
"limegreen": "#32cd32",
"linen": "#faf0e6",
"magenta": "#ff00ff",
"maroon": "#800000",
"mediumaquamarine": "#66cdaa",
"mediumblue": "#0000cd",
"mediumorchid": "#ba55d3",
"mediumpurple": "#9370db",
"mediumseagreen": "#3cb371",
"mediumslateblue": "#7b68ee",
"mediumspringgreen": "#00fa9a",
"mediumturquoise": "#48d1cc",
"mediumvioletred": "#c71585",
"midnightblue": "#191970",
"mintcream": "#f5fffa",
"mistyrose": "#ffe4e1",
"moccasin": "#ffe4b5",
"navajowhite": "#ffdead",
"navy": "#000080",
"oldlace": "#fdf5e6",
"olive": "#808000",
"olivedrab": "#6b8e23",
"orange": "#ffa500",
"orangered": "#ff4500",
"orchid": "#da70d6",
"palegoldenrod": "#eee8aa",
"palegreen": "#98fb98",
"paleturquoise": "#afeeee",
"palevioletred": "#db7093",
"papayawhip": "#ffefd5",
"peachpuff": "#ffdab9",
"peru": "#cd853f",
"pink": "#ffc0cb",
"plum": "#dda0dd",
"powderblue": "#b0e0e6",
"purple": "#800080",
"red": "#ff0000",
"rosybrown": "#bc8f8f",
"royalblue": "#4169e1",
"saddlebrown": "#8b4513",
"salmon": "#fa8072",
"sandybrown": "#f4a460",
"seagreen": "#2e8b57",
"seashell": "#fff5ee",
"sienna": "#a0522d",
"silver": "#c0c0c0",
"skyblue": "#87ceeb",
"slateblue": "#6a5acd",
"slategray": "#708090",
"slategrey": "#708090",
"snow": "#fffafa",
"springgreen": "#00ff7f",
"steelblue": "#4682b4",
"tan": "#d2b48c",
"teal": "#008080",
"thistle": "#d8bfd8",
"tomato": "#ff6347",
"turquoise": "#40e0d0",
"violet": "#ee82ee",
"wheat": "#f5deb3",
"white": "#ffffff",
"whitesmoke": "#f5f5f5",
"yellow": "#ffff00",
"yellowgreen": "#9acd32",
}
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementations of different layers."""
import inits
import tensorflow.compat.v1 as tf
# global unique layer ID dictionary for layer name assignment
_LAYER_UIDS = {}
def get_layer_uid(layer_name=''):
"""Helper function, assigns unique layer IDs."""
if layer_name not in _LAYER_UIDS:
_LAYER_UIDS[layer_name] = 1
return 1
else:
_LAYER_UIDS[layer_name] += 1
return _LAYER_UIDS[layer_name]
def sparse_dropout(x, keep_prob, noise_shape):
"""Dropout for sparse tensors."""
random_tensor = keep_prob
random_tensor += tf.random_uniform(noise_shape)
dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
pre_out = tf.sparse_retain(x, dropout_mask)
return pre_out * (1. / keep_prob)
def dot(x, y, sparse=False):
"""Wrapper for tf.matmul (sparse vs dense)."""
if sparse:
res = tf.sparse_tensor_dense_matmul(x, y)
else:
res = tf.matmul(x, y)
return res
def layernorm(x, offset, scale):
mean, variance = tf.nn.moments(x, axes=[1], keep_dims=True)
return tf.nn.batch_normalization(x, mean, variance, offset, scale, 1e-9)
class Layer(object):
"""Base layer class.
Defines basic API for all layer objects.
Implementation inspired by keras (http://keras.io).
# Properties
name: String, defines the variable scope of the layer.
logging: Boolean, switches Tensorflow histogram logging on/off
# Methods
_call(inputs): Defines computation graph of layer
(i.e. takes input, returns output)
__call__(inputs): Wrapper for _call()
_log_vars(): Log all variables
"""
def __init__(self, **kwargs):
allowed_kwargs = {'name', 'logging'}
for kwarg, _ in kwargs.items():
assert kwarg in allowed_kwargs, 'Invalid keyword argument: ' + kwarg
name = kwargs.get('name')
if not name:
layer = self.__class__.__name__.lower()
name = layer + '_' + str(get_layer_uid(layer))
self.name = name
self.vars = {}
logging = kwargs.get('logging', False)
self.logging = logging
self.sparse_inputs = False
def _call(self, inputs):
return inputs
def __call__(self, inputs):
with tf.name_scope(self.name):
if self.logging and not self.sparse_inputs:
tf.summary.histogram(self.name + '/inputs', inputs)
outputs = self._call(inputs)
if self.logging:
tf.summary.histogram(self.name + '/outputs', outputs)
return outputs
def _log_vars(self):
for var in self.vars:
tf.summary.histogram(self.name + '/vars/' + var, self.vars[var])
class Dense(Layer):
"""Dense layer."""
def __init__(self,
input_dim,
output_dim,
placeholders,
dropout=0.,
sparse_inputs=False,
act=tf.nn.relu,
bias=False,
featureless=False,
norm=False,
**kwargs):
super(Dense, self).__init__(**kwargs)
if dropout:
self.dropout = placeholders['dropout']
else:
self.dropout = 0.
self.act = act
self.sparse_inputs = sparse_inputs
self.featureless = featureless
self.bias = bias
self.norm = norm
# helper variable for sparse dropout
self.num_features_nonzero = placeholders['num_features_nonzero']
with tf.variable_scope(self.name + '_vars'):
self.vars['weights'] = inits.glorot([input_dim, output_dim],
name='weights')
if self.bias:
self.vars['bias'] = inits.zeros([output_dim], name='bias')
if self.norm:
self.vars['offset'] = inits.zeros([1, output_dim], name='offset')
self.vars['scale'] = inits.ones([1, output_dim], name='scale')
if self.logging:
self._log_vars()
def _call(self, inputs):
x = inputs
# dropout
if self.sparse_inputs:
x = sparse_dropout(x, 1 - self.dropout, self.num_features_nonzero)
else:
x = tf.nn.dropout(x, 1 - self.dropout)
# transform
output = dot(x, self.vars['weights'], sparse=self.sparse_inputs)
# bias
if self.bias:
output += self.vars['bias']
with tf.variable_scope(self.name + '_vars'):
if self.norm:
output = layernorm(output, self.vars['offset'], self.vars['scale'])
return self.act(output)
class GraphConvolution(Layer):
"""Graph convolution layer."""
def __init__(self,
input_dim,
output_dim,
placeholders,
dropout=0.,
sparse_inputs=False,
act=tf.nn.relu,
bias=False,
featureless=False,
norm=False,
precalc=False,
**kwargs):
super(GraphConvolution, self).__init__(**kwargs)
if dropout:
self.dropout = placeholders['dropout']
else:
self.dropout = 0.
self.act = act
self.support = placeholders['support']
self.sparse_inputs = sparse_inputs
self.featureless = featureless
self.bias = bias
self.norm = norm
self.precalc = precalc
# helper variable for sparse dropout
self.num_features_nonzero = placeholders['num_features_nonzero']
with tf.variable_scope(self.name + '_vars'):
self.vars['weights'] = inits.glorot([input_dim, output_dim],
name='weights')
if self.bias:
self.vars['bias'] = inits.zeros([output_dim], name='bias')
if self.norm:
self.vars['offset'] = inits.zeros([1, output_dim], name='offset')
self.vars['scale'] = inits.ones([1, output_dim], name='scale')
if self.logging:
self._log_vars()
def _call(self, inputs):
x = inputs
# convolve
if self.precalc:
support = x
else:
support = dot(self.support, x, sparse=True)
support = tf.concat((support, x), axis=1)
# dropout
support = tf.nn.dropout(support, 1 - self.dropout)
output = dot(support, self.vars['weights'], sparse=self.sparse_inputs)
# bias
if self.bias:
output += self.vars['bias']
with tf.variable_scope(self.name + '_vars'):
if self.norm:
output = layernorm(output, self.vars['offset'], self.vars['scale'])
return self.act(output)
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Long Short-Term Memory layer."""
# pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
import uuid
from keras import activations
from keras import backend
from keras import constraints
from keras import initializers
from keras import regularizers
from keras.engine import base_layer
from keras.engine.input_spec import InputSpec
from keras.layers.rnn import gru_lstm_utils
from keras.layers.rnn import rnn_utils
from keras.layers.rnn.base_rnn import RNN
from keras.layers.rnn.dropout_rnn_cell_mixin import DropoutRNNCellMixin
from keras.utils import tf_utils
import tensorflow.compat.v2 as tf
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import keras_export
RECURRENT_DROPOUT_WARNING_MSG = (
'RNN `implementation=2` is not supported when `recurrent_dropout` is set. '
'Using `implementation=1`.')
@keras_export('keras.layers.LSTMCell', v1=[])
class LSTMCell(DropoutRNNCellMixin, base_layer.BaseRandomLayer):
"""Cell class for the LSTM layer.
See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)
for details about the usage of RNN API.
This class processes one step within the whole time sequence input, whereas
`tf.keras.layer.LSTM` processes the whole sequence.
For example:
>>> inputs = tf.random.normal([32, 10, 8])
>>> rnn = tf.keras.layers.RNN(tf.keras.layers.LSTMCell(4))
>>> output = rnn(inputs)
>>> print(output.shape)
(32, 4)
>>> rnn = tf.keras.layers.RNN(
... tf.keras.layers.LSTMCell(4),
... return_sequences=True,
... return_state=True)
>>> whole_seq_output, final_memory_state, final_carry_state = rnn(inputs)
>>> print(whole_seq_output.shape)
(32, 10, 4)
>>> print(final_memory_state.shape)
(32, 4)
>>> print(final_carry_state.shape)
(32, 4)
Args:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use. Default: hyperbolic tangent
(`tanh`). If you pass `None`, no activation is applied (ie. "linear"
activation: `a(x) = x`).
recurrent_activation: Activation function to use for the recurrent step.
Default: sigmoid (`sigmoid`). If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, (default `True`), whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix, used for
the linear transformation of the inputs. Default: `glorot_uniform`.
recurrent_initializer: Initializer for the `recurrent_kernel` weights
matrix, used for the linear transformation of the recurrent state.
Default: `orthogonal`.
bias_initializer: Initializer for the bias vector. Default: `zeros`.
unit_forget_bias: Boolean (default `True`). If True, add 1 to the bias of
the forget gate at initialization. Setting it to true will also force
`bias_initializer="zeros"`. This is recommended in [Jozefowicz et
al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix. Default: `None`.
bias_regularizer: Regularizer function applied to the bias vector. Default:
`None`.
kernel_constraint: Constraint function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_constraint: Constraint function applied to the `recurrent_kernel`
weights matrix. Default: `None`.
bias_constraint: Constraint function applied to the bias vector. Default:
`None`.
dropout: Float between 0 and 1. Fraction of the units to drop for the linear
transformation of the inputs. Default: 0.
recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for
the linear transformation of the recurrent state. Default: 0.
Call arguments:
inputs: A 2D tensor, with shape of `[batch, feature]`.
states: List of 2 tensors that corresponding to the cell's units. Both of
them have shape `[batch, units]`, the first tensor is the memory state
from previous time step, the second tensor is the carry state from
previous time step. For timestep 0, the initial state provided by user
will be feed to cell.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. Only relevant when `dropout` or
`recurrent_dropout` is used.
"""
def __init__(self,
units,
activation='tanh',
recurrent_activation='sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
**kwargs):
if units < 0:
raise ValueError(f'Received an invalid value for argument `units`, '
f'expected a positive integer, got {units}.')
# By default use cached variable under v2 mode, see b/143699808.
if tf.compat.v1.executing_eagerly_outside_functions():
self._enable_caching_device = kwargs.pop('enable_caching_device', True)
else:
self._enable_caching_device = kwargs.pop('enable_caching_device', False)
super(LSTMCell, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.unit_forget_bias = unit_forget_bias
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
implementation = kwargs.pop('implementation', 2)
if self.recurrent_dropout != 0 and implementation != 1:
logging.debug(RECURRENT_DROPOUT_WARNING_MSG)
self.implementation = 1
else:
self.implementation = implementation
self.state_size = [self.units, self.units]
self.output_size = self.units
@tf_utils.shape_type_conversion
def build(self, input_shape):
default_caching_device = rnn_utils.caching_device(self)
input_dim = input_shape[-1]
self.kernel = self.add_weight(
shape=(input_dim, self.units * 4),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
caching_device=default_caching_device)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units * 4),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint,
caching_device=default_caching_device)
if self.use_bias:
if self.unit_forget_bias:
def bias_initializer(_, *args, **kwargs):
return backend.concatenate([
self.bias_initializer((self.units,), *args, **kwargs),
initializers.get('ones')((self.units,), *args, **kwargs),
self.bias_initializer((self.units * 2,), *args, **kwargs),
])
else:
bias_initializer = self.bias_initializer
self.bias = self.add_weight(
shape=(self.units * 4,),
name='bias',
initializer=bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
caching_device=default_caching_device)
else:
self.bias = None
self.built = True
def _compute_carry_and_output(self, x, h_tm1, c_tm1):
"""Computes carry and output using split kernels."""
x_i, x_f, x_c, x_o = x
h_tm1_i, h_tm1_f, h_tm1_c, h_tm1_o = h_tm1
i = self.recurrent_activation(
x_i + backend.dot(h_tm1_i, self.recurrent_kernel[:, :self.units]))
f = self.recurrent_activation(x_f + backend.dot(
h_tm1_f, self.recurrent_kernel[:, self.units:self.units * 2]))
c = f * c_tm1 + i * self.activation(x_c + backend.dot(
h_tm1_c, self.recurrent_kernel[:, self.units * 2:self.units * 3]))
o = self.recurrent_activation(
x_o + backend.dot(h_tm1_o, self.recurrent_kernel[:, self.units * 3:]))
return c, o
def _compute_carry_and_output_fused(self, z, c_tm1):
"""Computes carry and output using fused kernels."""
z0, z1, z2, z3 = z
i = self.recurrent_activation(z0)
f = self.recurrent_activation(z1)
c = f * c_tm1 + i * self.activation(z2)
o = self.recurrent_activation(z3)
return c, o
def call(self, inputs, states, training=None):
h_tm1 = states[0] # previous memory state
c_tm1 = states[1] # previous carry state
dp_mask = self.get_dropout_mask_for_cell(inputs, training, count=4)
rec_dp_mask = self.get_recurrent_dropout_mask_for_cell(
h_tm1, training, count=4)
if self.implementation == 1:
if 0 < self.dropout < 1.:
inputs_i = inputs * dp_mask[0]
inputs_f = inputs * dp_mask[1]
inputs_c = inputs * dp_mask[2]
inputs_o = inputs * dp_mask[3]
else:
inputs_i = inputs
inputs_f = inputs
inputs_c = inputs
inputs_o = inputs
k_i, k_f, k_c, k_o = tf.split(
self.kernel, num_or_size_splits=4, axis=1)
x_i = backend.dot(inputs_i, k_i)
x_f = backend.dot(inputs_f, k_f)
x_c = backend.dot(inputs_c, k_c)
x_o = backend.dot(inputs_o, k_o)
if self.use_bias:
b_i, b_f, b_c, b_o = tf.split(
self.bias, num_or_size_splits=4, axis=0)
x_i = backend.bias_add(x_i, b_i)
x_f = backend.bias_add(x_f, b_f)
x_c = backend.bias_add(x_c, b_c)
x_o = backend.bias_add(x_o, b_o)
if 0 < self.recurrent_dropout < 1.:
h_tm1_i = h_tm1 * rec_dp_mask[0]
h_tm1_f = h_tm1 * rec_dp_mask[1]
h_tm1_c = h_tm1 * rec_dp_mask[2]
h_tm1_o = h_tm1 * rec_dp_mask[3]
else:
h_tm1_i = h_tm1
h_tm1_f = h_tm1
h_tm1_c = h_tm1
h_tm1_o = h_tm1
x = (x_i, x_f, x_c, x_o)
h_tm1 = (h_tm1_i, h_tm1_f, h_tm1_c, h_tm1_o)
c, o = self._compute_carry_and_output(x, h_tm1, c_tm1)
else:
if 0. < self.dropout < 1.:
inputs = inputs * dp_mask[0]
z = backend.dot(inputs, self.kernel)
z += backend.dot(h_tm1, self.recurrent_kernel)
if self.use_bias:
z = backend.bias_add(z, self.bias)
z = tf.split(z, num_or_size_splits=4, axis=1)
c, o = self._compute_carry_and_output_fused(z, c_tm1)
h = o * self.activation(c)
return h, [h, c]
def get_config(self):
config = {
'units':
self.units,
'activation':
activations.serialize(self.activation),
'recurrent_activation':
activations.serialize(self.recurrent_activation),
'use_bias':
self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'unit_forget_bias':
self.unit_forget_bias,
'kernel_regularizer':
regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'kernel_constraint':
constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint),
'dropout':
self.dropout,
'recurrent_dropout':
self.recurrent_dropout,
'implementation':
self.implementation
}
config.update(rnn_utils.config_for_enable_caching_device(self))
base_config = super(LSTMCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
return list(rnn_utils.generate_zero_filled_state_for_cell(
self, inputs, batch_size, dtype))
@keras_export('keras.layers.LSTM', v1=[])
class LSTM(DropoutRNNCellMixin, RNN, base_layer.BaseRandomLayer):
"""Long Short-Term Memory layer - Hochreiter 1997.
See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)
for details about the usage of RNN API.
Based on available runtime hardware and constraints, this layer
will choose different implementations (cuDNN-based or pure-TensorFlow)
to maximize the performance. If a GPU is available and all
the arguments to the layer meet the requirement of the cuDNN kernel
(see below for details), the layer will use a fast cuDNN implementation.
The requirements to use the cuDNN implementation are:
1. `activation` == `tanh`
2. `recurrent_activation` == `sigmoid`
3. `recurrent_dropout` == 0
4. `unroll` is `False`
5. `use_bias` is `True`
6. Inputs, if use masking, are strictly right-padded.
7. Eager execution is enabled in the outermost context.
For example:
>>> inputs = tf.random.normal([32, 10, 8])
>>> lstm = tf.keras.layers.LSTM(4)
>>> output = lstm(inputs)
>>> print(output.shape)
(32, 4)
>>> lstm = tf.keras.layers.LSTM(4, return_sequences=True, return_state=True)
>>> whole_seq_output, final_memory_state, final_carry_state = lstm(inputs)
>>> print(whole_seq_output.shape)
(32, 10, 4)
>>> print(final_memory_state.shape)
(32, 4)
>>> print(final_carry_state.shape)
(32, 4)
Args:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`). If you pass `None`, no activation
is applied (ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use for the recurrent step.
Default: sigmoid (`sigmoid`). If you pass `None`, no activation is
applied (ie. "linear" activation: `a(x) = x`).
use_bias: Boolean (default `True`), whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix, used for
the linear transformation of the inputs. Default: `glorot_uniform`.
recurrent_initializer: Initializer for the `recurrent_kernel` weights
matrix, used for the linear transformation of the recurrent state.
Default: `orthogonal`.
bias_initializer: Initializer for the bias vector. Default: `zeros`.
unit_forget_bias: Boolean (default `True`). If True, add 1 to the bias of
the forget gate at initialization. Setting it to true will also force
`bias_initializer="zeros"`. This is recommended in [Jozefowicz et
al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf).
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_regularizer: Regularizer function applied to the
`recurrent_kernel` weights matrix. Default: `None`.
bias_regularizer: Regularizer function applied to the bias vector. Default:
`None`.
activity_regularizer: Regularizer function applied to the output of the
layer (its "activation"). Default: `None`.
kernel_constraint: Constraint function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_constraint: Constraint function applied to the `recurrent_kernel`
weights matrix. Default: `None`.
bias_constraint: Constraint function applied to the bias vector. Default:
`None`.
dropout: Float between 0 and 1. Fraction of the units to drop for the linear
transformation of the inputs. Default: 0.
recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for
the linear transformation of the recurrent state. Default: 0.
return_sequences: Boolean. Whether to return the last output. in the output
sequence, or the full sequence. Default: `False`.
return_state: Boolean. Whether to return the last state in addition to the
output. Default: `False`.
go_backwards: Boolean (default `False`). If True, process the input sequence
backwards and return the reversed sequence.
stateful: Boolean (default `False`). If True, the last state for each sample
at index i in a batch will be used as initial state for the sample of
index i in the following batch.
time_major: The shape format of the `inputs` and `outputs` tensors.
If True, the inputs and outputs will be in shape
`[timesteps, batch, feature]`, whereas in the False case, it will be
`[batch, timesteps, feature]`. Using `time_major = True` is a bit more
efficient because it avoids transposes at the beginning and end of the
RNN calculation. However, most TensorFlow data is batch-major, so by
default this function accepts input and emits output in batch-major
form.
unroll: Boolean (default `False`). If True, the network will be unrolled,
else a symbolic loop will be used. Unrolling can speed-up a RNN, although
it tends to be more memory-intensive. Unrolling is only suitable for short
sequences.
Call arguments:
inputs: A 3D tensor with shape `[batch, timesteps, feature]`.
mask: Binary tensor of shape `[batch, timesteps]` indicating whether
a given timestep should be masked (optional, defaults to `None`).
An individual `True` entry indicates that the corresponding timestep
should be utilized, while a `False` entry indicates that the corresponding
timestep should be ignored.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. This argument is passed to the cell
when calling it. This is only relevant if `dropout` or
`recurrent_dropout` is used (optional, defaults to `None`).
initial_state: List of initial state tensors to be passed to the first
call of the cell (optional, defaults to `None` which causes creation
of zero-filled initial state tensors).
"""
def __init__(self,
units,
activation='tanh',
recurrent_activation='sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
time_major=False,
unroll=False,
**kwargs):
# return_runtime is a flag for testing, which shows the real backend
# implementation chosen by grappler in graph mode.
self.return_runtime = kwargs.pop('return_runtime', False)
implementation = kwargs.pop('implementation', 2)
if implementation == 0:
logging.warning('`implementation=0` has been deprecated, '
'and now defaults to `implementation=1`.'
'Please update your layer call.')
if 'enable_caching_device' in kwargs:
cell_kwargs = {'enable_caching_device':
kwargs.pop('enable_caching_device')}
else:
cell_kwargs = {}
cell = LSTMCell(
units,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
unit_forget_bias=unit_forget_bias,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
implementation=implementation,
dtype=kwargs.get('dtype'),
trainable=kwargs.get('trainable', True),
**cell_kwargs)
super(LSTM, self).__init__(
cell,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
time_major=time_major,
unroll=unroll,
**kwargs)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.input_spec = [InputSpec(ndim=3)]
self.state_spec = [
InputSpec(shape=(None, dim)) for dim in (self.units, self.units)
]
self._could_use_gpu_kernel = (
self.activation in (activations.tanh, tf.tanh) and
self.recurrent_activation in (activations.sigmoid, tf.sigmoid) and
recurrent_dropout == 0 and not unroll and use_bias and
tf.compat.v1.executing_eagerly_outside_functions())
if tf.config.list_logical_devices('GPU'):
# Only show the message when there is GPU available, user will not care
# about the cuDNN if there isn't any GPU.
if self._could_use_gpu_kernel:
logging.debug(gru_lstm_utils.CUDNN_AVAILABLE_MSG % self.name)
else:
logging.warning(gru_lstm_utils.CUDNN_NOT_AVAILABLE_MSG % self.name)
if gru_lstm_utils.use_new_gru_lstm_impl():
self._defun_wrapper = gru_lstm_utils.DefunWrapper(
time_major, go_backwards, 'lstm')
def call(self, inputs, mask=None, training=None, initial_state=None):
# The input should be dense, padded with zeros. If a ragged input is fed
# into the layer, it is padded and the row lengths are used for masking.
inputs, row_lengths = backend.convert_inputs_if_ragged(inputs)
is_ragged_input = (row_lengths is not None)
self._validate_args_if_ragged(is_ragged_input, mask)
# LSTM does not support constants. Ignore it during process.
inputs, initial_state, _ = self._process_inputs(inputs, initial_state, None)
if isinstance(mask, list):
mask = mask[0]
input_shape = backend.int_shape(inputs)
timesteps = input_shape[0] if self.time_major else input_shape[1]
if not self._could_use_gpu_kernel:
# Fall back to use the normal LSTM.
kwargs = {'training': training}
self._maybe_reset_cell_dropout_mask(self.cell)
def step(inputs, states):
return self.cell(inputs, states, **kwargs)
last_output, outputs, states = backend.rnn(
step,
inputs,
initial_state,
constants=None,
go_backwards=self.go_backwards,
mask=mask,
unroll=self.unroll,
input_length=row_lengths if row_lengths is not None else timesteps,
time_major=self.time_major,
zero_output_for_mask=self.zero_output_for_mask)
runtime = gru_lstm_utils.runtime(gru_lstm_utils.RUNTIME_UNKNOWN)
else:
# Use the new defun approach for backend implementation swap.
# Note that different implementations need to have same function
# signature, eg, the tensor parameters need to have same shape and dtypes.
# Since the cuDNN has an extra set of bias, those bias will be passed to
# both normal and cuDNN implementations.
self.reset_dropout_mask()
dropout_mask = self.get_dropout_mask_for_cell(inputs, training, count=4)
if dropout_mask is not None:
inputs = inputs * dropout_mask[0]
if gru_lstm_utils.use_new_gru_lstm_impl():
lstm_kwargs = {
'inputs':
inputs,
'init_h':
gru_lstm_utils.read_variable_value(initial_state[0]),
'init_c':
gru_lstm_utils.read_variable_value(initial_state[1]),
'kernel':
gru_lstm_utils.read_variable_value(self.cell.kernel),
'recurrent_kernel':
gru_lstm_utils.read_variable_value(self.cell.recurrent_kernel),
'bias':
gru_lstm_utils.read_variable_value(self.cell.bias),
'mask':
mask,
'time_major':
self.time_major,
'go_backwards':
self.go_backwards,
'sequence_lengths':
row_lengths,
'zero_output_for_mask':
self.zero_output_for_mask,
}
(last_output, outputs, new_h, new_c,
runtime) = self._defun_wrapper.defun_layer(**lstm_kwargs)
else:
gpu_lstm_kwargs = {
'inputs':
inputs,
'init_h':
gru_lstm_utils.read_variable_value(initial_state[0]),
'init_c':
gru_lstm_utils.read_variable_value(initial_state[1]),
'kernel':
gru_lstm_utils.read_variable_value(self.cell.kernel),
'recurrent_kernel':
gru_lstm_utils.read_variable_value(self.cell.recurrent_kernel),
'bias':
gru_lstm_utils.read_variable_value(self.cell.bias),
'mask':
mask,
'time_major':
self.time_major,
'go_backwards':
self.go_backwards,
'sequence_lengths':
row_lengths
}
normal_lstm_kwargs = gpu_lstm_kwargs.copy()
normal_lstm_kwargs.update({
'zero_output_for_mask': self.zero_output_for_mask,
})
if tf.executing_eagerly():
device_type = gru_lstm_utils.get_context_device_type()
can_use_gpu = (
# Either user specified GPU or unspecified but GPU is available.
(device_type == gru_lstm_utils.GPU_DEVICE_NAME or
(device_type is None
and tf.config.list_logical_devices('GPU'))) and
(mask is None or
gru_lstm_utils.is_cudnn_supported_inputs(mask, self.time_major)))
# Under eager context, check the device placement and prefer the
# GPU implementation when GPU is available.
if can_use_gpu:
last_output, outputs, new_h, new_c, runtime = gpu_lstm(
**gpu_lstm_kwargs)
else:
last_output, outputs, new_h, new_c, runtime = standard_lstm(
**normal_lstm_kwargs)
else:
(last_output, outputs, new_h, new_c,
runtime) = lstm_with_backend_selection(**normal_lstm_kwargs)
states = [new_h, new_c]
if self.stateful:
updates = [
tf.compat.v1.assign(self_state, tf.cast(state, self_state.dtype))
for self_state, state in zip(self.states, states)
]
self.add_update(updates)
if self.return_sequences:
output = backend.maybe_convert_to_ragged(
is_ragged_input, outputs, row_lengths, go_backwards=self.go_backwards)
else:
output = last_output
if self.return_state:
return [output] + list(states)
elif self.return_runtime:
return output, runtime
else:
return output
@property
def units(self):
return self.cell.units
@property
def activation(self):
return self.cell.activation
@property
def recurrent_activation(self):
return self.cell.recurrent_activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def unit_forget_bias(self):
return self.cell.unit_forget_bias
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
@property
def implementation(self):
return self.cell.implementation
def get_config(self):
config = {
'units':
self.units,
'activation':
activations.serialize(self.activation),
'recurrent_activation':
activations.serialize(self.recurrent_activation),
'use_bias':
self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'unit_forget_bias':
self.unit_forget_bias,
'kernel_regularizer':
regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint':
constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint),
'dropout':
self.dropout,
'recurrent_dropout':
self.recurrent_dropout,
'implementation':
self.implementation
}
config.update(rnn_utils.config_for_enable_caching_device(self.cell))
base_config = super(LSTM, self).get_config()
del base_config['cell']
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if 'implementation' in config and config['implementation'] == 0:
config['implementation'] = 1
return cls(**config)
def standard_lstm(inputs, init_h, init_c, kernel, recurrent_kernel, bias,
mask, time_major, go_backwards, sequence_lengths,
zero_output_for_mask):
"""LSTM with standard kernel implementation.
This implementation can be run on all types for hardware.
This implementation lifts out all the layer weights and make them function
parameters. It has same number of tensor input params as the cuDNN
counterpart. The RNN step logic has been simplified, eg dropout and mask is
removed since cuDNN implementation does not support that.
Note that the first half of the bias tensor should be ignored by this impl.
The cuDNN impl need an extra set of input gate bias. In order to make the both
function take same shape of parameter, that extra set of bias is also feed
here.
Args:
inputs: input tensor of LSTM layer.
init_h: initial state tensor for the cell output.
init_c: initial state tensor for the cell hidden state.
kernel: weights for cell kernel.
recurrent_kernel: weights for cell recurrent kernel.
bias: weights for cell kernel bias and recurrent bias. Only recurrent bias
is used in this case.
mask: Boolean tensor for mask out the steps within sequence.
An individual `True` entry indicates that the corresponding timestep
should be utilized, while a `False` entry indicates that the corresponding
timestep should be ignored.
time_major: boolean, whether the inputs are in the format of
[time, batch, feature] or [batch, time, feature].
go_backwards: Boolean (default False). If True, process the input sequence
backwards and return the reversed sequence.
sequence_lengths: The lengths of all sequences coming from a variable length
input, such as ragged tensors. If the input has a fixed timestep size,
this should be None.
zero_output_for_mask: Boolean, whether to output zero for masked timestep.
Returns:
last_output: output tensor for the last timestep, which has shape
[batch, units].
outputs: output tensor for all timesteps, which has shape
[batch, time, units].
state_0: the cell output, which has same shape as init_h.
state_1: the cell hidden state, which has same shape as init_c.
runtime: constant string tensor which indicate real runtime hardware. This
value is for testing purpose and should be used by user.
"""
input_shape = backend.int_shape(inputs)
timesteps = input_shape[0] if time_major else input_shape[1]
def step(cell_inputs, cell_states):
"""Step function that will be used by Keras RNN backend."""
h_tm1 = cell_states[0] # previous memory state
c_tm1 = cell_states[1] # previous carry state
z = backend.dot(cell_inputs, kernel)
z += backend.dot(h_tm1, recurrent_kernel)
z = backend.bias_add(z, bias)
z0, z1, z2, z3 = tf.split(z, 4, axis=1)
i = tf.sigmoid(z0)
f = tf.sigmoid(z1)
c = f * c_tm1 + i * tf.tanh(z2)
o = tf.sigmoid(z3)
h = o * tf.tanh(c)
return h, [h, c]
last_output, outputs, new_states = backend.rnn(
step,
inputs, [init_h, init_c],
constants=None,
unroll=False,
time_major=time_major,
mask=mask,
go_backwards=go_backwards,
input_length=(sequence_lengths
if sequence_lengths is not None else timesteps),
zero_output_for_mask=zero_output_for_mask)
return (last_output, outputs, new_states[0], new_states[1],
gru_lstm_utils.runtime(gru_lstm_utils.RUNTIME_CPU))
def gpu_lstm(inputs, init_h, init_c, kernel, recurrent_kernel, bias, mask,
time_major, go_backwards, sequence_lengths):
"""LSTM with either cuDNN or ROCm implementation which is only available for GPU.
Note that currently only right padded data is supported, or the result will be
polluted by the unmasked data which should be filtered.
Args:
inputs: Input tensor of LSTM layer.
init_h: Initial state tensor for the cell output.
init_c: Initial state tensor for the cell hidden state.
kernel: Weights for cell kernel.
recurrent_kernel: Weights for cell recurrent kernel.
bias: Weights for cell kernel bias and recurrent bias. Only recurrent bias
is used in this case.
mask: Boolean tensor for mask out the steps within sequence. An individual
`True` entry indicates that the corresponding timestep should be utilized,
while a `False` entry indicates that the corresponding timestep should be
ignored.
time_major: Boolean, whether the inputs are in the format of [time, batch,
feature] or [batch, time, feature].
go_backwards: Boolean (default False). If True, process the input sequence
backwards and return the reversed sequence.
sequence_lengths: The lengths of all sequences coming from a variable length
input, such as ragged tensors. If the input has a fixed timestep size,
this should be None.
Returns:
last_output: Output tensor for the last timestep, which has shape
[batch, units].
outputs: Output tensor for all timesteps, which has shape
[batch, time, units].
state_0: The cell output, which has same shape as init_h.
state_1: The cell hidden state, which has same shape as init_c.
runtime: Constant string tensor which indicate real runtime hardware. This
value is for testing purpose and should not be used by user.
"""
if mask is not None:
sequence_lengths = gru_lstm_utils.calculate_sequence_by_mask(
mask, time_major)
if not time_major and sequence_lengths is None:
inputs = tf.transpose(inputs, perm=(1, 0, 2))
seq_axis, batch_axis = (0, 1)
else:
seq_axis, batch_axis = (0, 1) if time_major else (1, 0)
# For init_h and init_c, cuDNN expects one more dim of num_layers before or
# after batch dim for time major or batch major inputs respectively
init_h = tf.expand_dims(init_h, axis=seq_axis)
init_c = tf.expand_dims(init_c, axis=seq_axis)
weights = tf.split(kernel, 4, axis=1)
weights += tf.split(recurrent_kernel, 4, axis=1)
# cuDNN has an extra set of bias for inputs, we disable them (setting to 0),
# so that mathematically it is same as the canonical LSTM implementation.
full_bias = tf.concat((tf.zeros_like(bias), bias), 0)
if tf.sysconfig.get_build_info()['is_rocm_build']:
# ROCm MIOpen's weight sequence for LSTM is different from both canonical
# and Cudnn format
# MIOpen: [i, f, o, c] Cudnn/Canonical: [i, f, c, o]
# i is input gate weights.
# f is forget gate weights.
# o is output gate weights.
# c is cell gate weights.
weights = [weights[x] for x in (0, 1, 3, 2, 4, 5, 7, 6)]
# full_bias is a tensor of shape (8*n,)
full_bias = tf.split(full_bias, 8, axis=0)
full_bias = [full_bias[x] for x in (0, 1, 3, 2, 4, 5, 7, 6)]
params = gru_lstm_utils.canonical_to_params(
weights=weights,
biases=tf.split(full_bias, 8),
shape=tf.constant([-1]),
transpose_weights=True)
if sequence_lengths is not None:
if go_backwards:
# Three reversals are required. E.g.,
# normal input = [1, 2, 3, 0, 0] # where 0 need to be masked
# reversed_input_to_cudnn = [3, 2, 1, 0, 0]
# output_from_cudnn = [6, 5, 4, 0, 0]
# expected_output = [0, 0, 6, 5 ,4]
inputs = tf.reverse_sequence(
inputs, sequence_lengths, seq_axis=seq_axis, batch_axis=batch_axis)
outputs, h, c, _, _ = tf.raw_ops.CudnnRNNV3(
input=inputs,
input_h=init_h,
input_c=init_c,
params=params,
is_training=True,
rnn_mode='lstm',
sequence_lengths=sequence_lengths,
time_major=time_major)
if go_backwards:
outputs = tf.reverse_sequence(
outputs, sequence_lengths, seq_axis=seq_axis, batch_axis=batch_axis)
outputs = tf.reverse(outputs, axis=[seq_axis])
else:
# # Fill the array with shape [batch] with value of max timesteps.
# sequence_length = array_ops.fill([array_ops.shape(inputs)[1]],
# array_ops.shape(inputs)[0])
if go_backwards:
# Reverse axis 0 since the input is already convert to time major.
inputs = tf.reverse(inputs, axis=[0])
outputs, h, c, _ = tf.raw_ops.CudnnRNN(
input=inputs, input_h=init_h, input_c=init_c, params=params,
is_training=True, rnn_mode='lstm')
last_output = outputs[-1]
if not time_major and sequence_lengths is None:
outputs = tf.transpose(outputs, perm=[1, 0, 2])
h = tf.squeeze(h, axis=seq_axis)
c = tf.squeeze(c, axis=seq_axis)
# In the case of variable length input, the cudnn kernel will fill zeros for
# the output, whereas the default keras behavior is to bring over the previous
# output for t-1, so that in the return_sequence=False case, user can quickly
# get the final effect output instead just 0s at the last timestep.
# In order to mimic the default keras behavior, we copy the final h state as
# the last_output, since it is numerically same as the output.
if sequence_lengths is not None:
last_output = h
return last_output, outputs, h, c, gru_lstm_utils.runtime(
gru_lstm_utils.RUNTIME_GPU)
def lstm_with_backend_selection(inputs, init_h, init_c, kernel,
recurrent_kernel, bias, mask, time_major,
go_backwards, sequence_lengths,
zero_output_for_mask):
"""Call the LSTM with optimized backend kernel selection.
Under the hood, this function will create two TF function, one with the most
generic kernel and can run on all device condition, and the second one with
cuDNN specific kernel, which can only run on GPU.
The first function will be called with normal_lstm_params, while the second
function is not called, but only registered in the graph. The Grappler will
do the proper graph rewrite and swap the optimized TF function based on the
device placement.
Args:
inputs: Input tensor of LSTM layer.
init_h: Initial state tensor for the cell output.
init_c: Initial state tensor for the cell hidden state.
kernel: Weights for cell kernel.
recurrent_kernel: Weights for cell recurrent kernel.
bias: Weights for cell kernel bias and recurrent bias. Only recurrent bias
is used in this case.
mask: Boolean tensor for mask out the steps within sequence.
An individual `True` entry indicates that the corresponding timestep
should be utilized, while a `False` entry indicates that the corresponding
timestep should be ignored.
time_major: Boolean, whether the inputs are in the format of
[time, batch, feature] or [batch, time, feature].
go_backwards: Boolean (default False). If True, process the input sequence
backwards and return the reversed sequence.
sequence_lengths: The lengths of all sequences coming from a variable length
input, such as ragged tensors. If the input has a fixed timestep size,
this should be None.
zero_output_for_mask: Boolean, whether to output zero for masked timestep.
Returns:
List of output tensors, same as standard_lstm.
"""
params = {
'inputs': inputs,
'init_h': init_h,
'init_c': init_c,
'kernel': kernel,
'recurrent_kernel': recurrent_kernel,
'bias': bias,
'mask': mask,
'time_major': time_major,
'go_backwards': go_backwards,
'sequence_lengths': sequence_lengths,
'zero_output_for_mask': zero_output_for_mask,
}
def gpu_lstm_with_fallback(inputs, init_h, init_c, kernel, recurrent_kernel,
bias, mask, time_major, go_backwards,
sequence_lengths, zero_output_for_mask):
"""Use cuDNN kernel when mask is none or strictly right padded."""
if mask is None:
return gpu_lstm(
inputs=inputs,
init_h=init_h,
init_c=init_c,
kernel=kernel,
recurrent_kernel=recurrent_kernel,
bias=bias,
mask=mask,
time_major=time_major,
go_backwards=go_backwards,
sequence_lengths=sequence_lengths)
def cudnn_lstm_fn():
return gpu_lstm(
inputs=inputs,
init_h=init_h,
init_c=init_c,
kernel=kernel,
recurrent_kernel=recurrent_kernel,
bias=bias,
mask=mask,
time_major=time_major,
go_backwards=go_backwards,
sequence_lengths=sequence_lengths)
def stardard_lstm_fn():
return standard_lstm(
inputs=inputs,
init_h=init_h,
init_c=init_c,
kernel=kernel,
recurrent_kernel=recurrent_kernel,
bias=bias,
mask=mask,
time_major=time_major,
go_backwards=go_backwards,
sequence_lengths=sequence_lengths,
zero_output_for_mask=zero_output_for_mask)
return tf.cond(
gru_lstm_utils.is_cudnn_supported_inputs(mask, time_major),
true_fn=cudnn_lstm_fn,
false_fn=stardard_lstm_fn)
if gru_lstm_utils.use_new_gru_lstm_impl():
# Chooses the implementation dynamically based on the running device.
(last_output, outputs, new_h, new_c,
runtime) = tf.__internal__.execute_fn_for_device(
{
gru_lstm_utils.CPU_DEVICE_NAME:
lambda: standard_lstm(**params),
gru_lstm_utils.GPU_DEVICE_NAME:
lambda: gpu_lstm_with_fallback(**params)
}, lambda: standard_lstm(**params))
else:
# Each time a `tf.function` is called, we will give it a unique
# identifiable API name, so that Grappler won't get confused when it
# sees multiple LSTM layers added into same graph, and it will be able
# to pair up the different implementations across them.
api_name = 'lstm_' + str(uuid.uuid4())
supportive_attribute = {
'time_major': time_major,
'go_backwards': go_backwards,
}
defun_standard_lstm = gru_lstm_utils.generate_defun_backend(
api_name, gru_lstm_utils.CPU_DEVICE_NAME, standard_lstm,
supportive_attribute)
defun_gpu_lstm = gru_lstm_utils.generate_defun_backend(
api_name, gru_lstm_utils.GPU_DEVICE_NAME, gpu_lstm_with_fallback,
supportive_attribute)
# Call the normal LSTM impl and register the cuDNN impl function. The
# grappler will kick in during session execution to optimize the graph.
last_output, outputs, new_h, new_c, runtime = defun_standard_lstm(**params)
gru_lstm_utils.function_register(defun_gpu_lstm, **params)
return last_output, outputs, new_h, new_c, runtime
|
|
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import mock
from oslo_config import cfg
from tempest.common import credentials_factory as credentials
from tempest import config
from tempest.lib.common import dynamic_creds
from tempest.lib.common import rest_client
from tempest.lib import exceptions as lib_exc
from tempest.lib.services.identity.v2 import identity_client as v2_iden_client
from tempest.lib.services.identity.v2 import roles_client as v2_roles_client
from tempest.lib.services.identity.v2 import tenants_client as \
v2_tenants_client
from tempest.lib.services.identity.v2 import token_client as v2_token_client
from tempest.lib.services.identity.v2 import users_client as v2_users_client
from tempest.lib.services.identity.v3 import domains_client
from tempest.lib.services.identity.v3 import identity_client as v3_iden_client
from tempest.lib.services.identity.v3 import projects_client as \
v3_projects_client
from tempest.lib.services.identity.v3 import roles_client as v3_roles_client
from tempest.lib.services.identity.v3 import token_client as v3_token_client
from tempest.lib.services.identity.v3 import users_client as \
v3_users_client
from tempest.lib.services.network import routers_client
from tempest.tests import base
from tempest.tests import fake_config
from tempest.tests.lib import fake_http
from tempest.tests.lib import fake_identity
from tempest.tests.lib.services import registry_fixture
class TestDynamicCredentialProvider(base.TestCase):
fixed_params = {'name': 'test class',
'identity_version': 'v2',
'admin_role': 'admin',
'identity_uri': 'fake_uri'}
token_client = v2_token_client
iden_client = v2_iden_client
roles_client = v2_roles_client
tenants_client = v2_tenants_client
users_client = v2_users_client
token_client_class = token_client.TokenClient
fake_response = fake_identity._fake_v2_response
tenants_client_class = tenants_client.TenantsClient
delete_tenant = 'delete_tenant'
def setUp(self):
super(TestDynamicCredentialProvider, self).setUp()
self.useFixture(fake_config.ConfigFixture())
self.useFixture(registry_fixture.RegistryFixture())
self.patchobject(config, 'TempestConfigPrivate',
fake_config.FakePrivate)
self.patchobject(self.token_client_class, 'raw_request',
self.fake_response)
cfg.CONF.set_default('operator_role', 'FakeRole',
group='object-storage')
self._mock_list_ec2_credentials('fake_user_id', 'fake_tenant_id')
self.fixed_params.update(
admin_creds=self._get_fake_admin_creds())
def test_tempest_client(self):
creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
self.assertIsInstance(creds.identity_admin_client,
self.iden_client.IdentityClient)
def _get_fake_admin_creds(self):
return credentials.get_credentials(
fill_in=False,
identity_version=self.fixed_params['identity_version'],
username='fake_username', password='fake_password',
tenant_name='fake_tenant')
def _mock_user_create(self, id, name):
user_fix = self.useFixture(fixtures.MockPatchObject(
self.users_client.UsersClient,
'create_user',
return_value=(rest_client.ResponseBody
(200, {'user': {'id': id, 'name': name}}))))
return user_fix
def _mock_tenant_create(self, id, name):
tenant_fix = self.useFixture(fixtures.MockPatchObject(
self.tenants_client.TenantsClient,
'create_tenant',
return_value=(rest_client.ResponseBody
(200, {'tenant': {'id': id, 'name': name}}))))
return tenant_fix
def _mock_list_roles(self, id, name):
roles_fix = self.useFixture(fixtures.MockPatchObject(
self.roles_client.RolesClient,
'list_roles',
return_value=(rest_client.ResponseBody
(200,
{'roles': [{'id': id, 'name': name},
{'id': '1', 'name': 'FakeRole'},
{'id': '2', 'name': 'Member'}]}))))
return roles_fix
def _mock_list_2_roles(self):
roles_fix = self.useFixture(fixtures.MockPatchObject(
self.roles_client.RolesClient,
'list_roles',
return_value=(rest_client.ResponseBody
(200,
{'roles': [{'id': '1234', 'name': 'role1'},
{'id': '1', 'name': 'FakeRole'},
{'id': '12345', 'name': 'role2'}]}))))
return roles_fix
def _mock_assign_user_role(self):
tenant_fix = self.useFixture(fixtures.MockPatchObject(
self.roles_client.RolesClient,
'create_user_role_on_project',
return_value=(rest_client.ResponseBody
(200, {}))))
return tenant_fix
def _mock_list_role(self):
roles_fix = self.useFixture(fixtures.MockPatchObject(
self.roles_client.RolesClient,
'list_roles',
return_value=(rest_client.ResponseBody
(200, {'roles': [
{'id': '1', 'name': 'FakeRole'},
{'id': '2', 'name': 'Member'}]}))))
return roles_fix
def _mock_list_ec2_credentials(self, user_id, tenant_id):
ec2_creds_fix = self.useFixture(fixtures.MockPatchObject(
self.users_client.UsersClient,
'list_user_ec2_credentials',
return_value=(rest_client.ResponseBody
(200, {'credentials': [{
'access': 'fake_access',
'secret': 'fake_secret',
'tenant_id': tenant_id,
'user_id': user_id,
'trust_id': None}]}))))
return ec2_creds_fix
def _mock_network_create(self, iso_creds, id, name):
net_fix = self.useFixture(fixtures.MockPatchObject(
iso_creds.networks_admin_client,
'create_network',
return_value={'network': {'id': id, 'name': name}}))
return net_fix
def _mock_subnet_create(self, iso_creds, id, name):
subnet_fix = self.useFixture(fixtures.MockPatchObject(
iso_creds.subnets_admin_client,
'create_subnet',
return_value={'subnet': {'id': id, 'name': name}}))
return subnet_fix
def _mock_router_create(self, id, name):
router_fix = self.useFixture(fixtures.MockPatchObject(
routers_client.RoutersClient,
'create_router',
return_value={'router': {'id': id, 'name': name}}))
return router_fix
@mock.patch('tempest.lib.common.rest_client.RestClient')
def test_primary_creds(self, MockRestClient):
creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
self._mock_assign_user_role()
self._mock_list_role()
self._mock_tenant_create('1234', 'fake_prim_tenant')
self._mock_user_create('1234', 'fake_prim_user')
primary_creds = creds.get_primary_creds()
self.assertEqual(primary_creds.username, 'fake_prim_user')
self.assertEqual(primary_creds.tenant_name, 'fake_prim_tenant')
# Verify IDs
self.assertEqual(primary_creds.tenant_id, '1234')
self.assertEqual(primary_creds.user_id, '1234')
@mock.patch('tempest.lib.common.rest_client.RestClient')
def test_admin_creds(self, MockRestClient):
creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
self._mock_list_roles('1234', 'admin')
self._mock_user_create('1234', 'fake_admin_user')
self._mock_tenant_create('1234', 'fake_admin_tenant')
user_mock = mock.patch.object(self.roles_client.RolesClient,
'create_user_role_on_project')
user_mock.start()
self.addCleanup(user_mock.stop)
with mock.patch.object(self.roles_client.RolesClient,
'create_user_role_on_project') as user_mock:
admin_creds = creds.get_admin_creds()
user_mock.assert_has_calls([
mock.call('1234', '1234', '1234')])
self.assertEqual(admin_creds.username, 'fake_admin_user')
self.assertEqual(admin_creds.tenant_name, 'fake_admin_tenant')
# Verify IDs
self.assertEqual(admin_creds.tenant_id, '1234')
self.assertEqual(admin_creds.user_id, '1234')
@mock.patch('tempest.lib.common.rest_client.RestClient')
def test_role_creds(self, MockRestClient):
creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
self._mock_list_2_roles()
self._mock_user_create('1234', 'fake_role_user')
self._mock_tenant_create('1234', 'fake_role_tenant')
user_mock = mock.patch.object(self.roles_client.RolesClient,
'create_user_role_on_project')
user_mock.start()
self.addCleanup(user_mock.stop)
with mock.patch.object(self.roles_client.RolesClient,
'create_user_role_on_project') as user_mock:
role_creds = creds.get_creds_by_roles(
roles=['role1', 'role2'])
calls = user_mock.mock_calls
# Assert that the role creation is called with the 2 specified roles
self.assertEqual(len(calls), 2)
args = map(lambda x: x[1], calls)
args = list(args)
self.assertIn(('1234', '1234', '1234'), args)
self.assertIn(('1234', '1234', '12345'), args)
self.assertEqual(role_creds.username, 'fake_role_user')
self.assertEqual(role_creds.tenant_name, 'fake_role_tenant')
# Verify IDs
self.assertEqual(role_creds.tenant_id, '1234')
self.assertEqual(role_creds.user_id, '1234')
@mock.patch('tempest.lib.common.rest_client.RestClient')
def test_all_cred_cleanup(self, MockRestClient):
creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
self._mock_assign_user_role()
self._mock_list_role()
self._mock_tenant_create('1234', 'fake_prim_tenant')
self._mock_user_create('1234', 'fake_prim_user')
creds.get_primary_creds()
self._mock_tenant_create('12345', 'fake_alt_tenant')
self._mock_user_create('12345', 'fake_alt_user')
creds.get_alt_creds()
self._mock_tenant_create('123456', 'fake_admin_tenant')
self._mock_user_create('123456', 'fake_admin_user')
self._mock_list_roles('123456', 'admin')
creds.get_admin_creds()
user_mock = self.patchobject(self.users_client.UsersClient,
'delete_user')
tenant_mock = self.patchobject(self.tenants_client_class,
self.delete_tenant)
creds.clear_creds()
# Verify user delete calls
calls = user_mock.mock_calls
self.assertEqual(len(calls), 3)
args = map(lambda x: x[1][0], calls)
args = list(args)
self.assertIn('1234', args)
self.assertIn('12345', args)
self.assertIn('123456', args)
# Verify tenant delete calls
calls = tenant_mock.mock_calls
self.assertEqual(len(calls), 3)
args = map(lambda x: x[1][0], calls)
args = list(args)
self.assertIn('1234', args)
self.assertIn('12345', args)
self.assertIn('123456', args)
@mock.patch('tempest.lib.common.rest_client.RestClient')
def test_alt_creds(self, MockRestClient):
creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
self._mock_assign_user_role()
self._mock_list_role()
self._mock_user_create('1234', 'fake_alt_user')
self._mock_tenant_create('1234', 'fake_alt_tenant')
alt_creds = creds.get_alt_creds()
self.assertEqual(alt_creds.username, 'fake_alt_user')
self.assertEqual(alt_creds.tenant_name, 'fake_alt_tenant')
# Verify IDs
self.assertEqual(alt_creds.tenant_id, '1234')
self.assertEqual(alt_creds.user_id, '1234')
@mock.patch('tempest.lib.common.rest_client.RestClient')
def test_no_network_creation_with_config_set(self, MockRestClient):
creds = dynamic_creds.DynamicCredentialProvider(
neutron_available=True, create_networks=False,
project_network_cidr='10.100.0.0/16', project_network_mask_bits=28,
**self.fixed_params)
self._mock_assign_user_role()
self._mock_list_role()
self._mock_user_create('1234', 'fake_prim_user')
self._mock_tenant_create('1234', 'fake_prim_tenant')
net = mock.patch.object(creds.networks_admin_client,
'delete_network')
net_mock = net.start()
subnet = mock.patch.object(creds.subnets_admin_client,
'delete_subnet')
subnet_mock = subnet.start()
router = mock.patch.object(creds.routers_admin_client,
'delete_router')
router_mock = router.start()
primary_creds = creds.get_primary_creds()
self.assertEqual(net_mock.mock_calls, [])
self.assertEqual(subnet_mock.mock_calls, [])
self.assertEqual(router_mock.mock_calls, [])
network = primary_creds.network
subnet = primary_creds.subnet
router = primary_creds.router
self.assertIsNone(network)
self.assertIsNone(subnet)
self.assertIsNone(router)
@mock.patch('tempest.lib.common.rest_client.RestClient')
def test_network_creation(self, MockRestClient):
creds = dynamic_creds.DynamicCredentialProvider(
neutron_available=True,
project_network_cidr='10.100.0.0/16', project_network_mask_bits=28,
**self.fixed_params)
self._mock_assign_user_role()
self._mock_list_role()
self._mock_user_create('1234', 'fake_prim_user')
self._mock_tenant_create('1234', 'fake_prim_tenant')
self._mock_network_create(creds, '1234', 'fake_net')
self._mock_subnet_create(creds, '1234', 'fake_subnet')
self._mock_router_create('1234', 'fake_router')
router_interface_mock = self.patch(
'tempest.lib.services.network.routers_client.RoutersClient.'
'add_router_interface')
primary_creds = creds.get_primary_creds()
router_interface_mock.assert_called_once_with('1234', subnet_id='1234')
network = primary_creds.network
subnet = primary_creds.subnet
router = primary_creds.router
self.assertEqual(network['id'], '1234')
self.assertEqual(network['name'], 'fake_net')
self.assertEqual(subnet['id'], '1234')
self.assertEqual(subnet['name'], 'fake_subnet')
self.assertEqual(router['id'], '1234')
self.assertEqual(router['name'], 'fake_router')
@mock.patch('tempest.lib.common.rest_client.RestClient')
def test_network_cleanup(self, MockRestClient):
def side_effect(**args):
return {"security_groups": [{"tenant_id": args['tenant_id'],
"name": args['name'],
"description": args['name'],
"security_group_rules": [],
"id": "sg-%s" % args['tenant_id']}]}
creds = dynamic_creds.DynamicCredentialProvider(
neutron_available=True,
project_network_cidr='10.100.0.0/16', project_network_mask_bits=28,
**self.fixed_params)
# Create primary tenant and network
self._mock_assign_user_role()
self._mock_list_role()
self._mock_user_create('1234', 'fake_prim_user')
self._mock_tenant_create('1234', 'fake_prim_tenant')
self._mock_network_create(creds, '1234', 'fake_net')
self._mock_subnet_create(creds, '1234', 'fake_subnet')
self._mock_router_create('1234', 'fake_router')
router_interface_mock = self.patch(
'tempest.lib.services.network.routers_client.RoutersClient.'
'add_router_interface')
creds.get_primary_creds()
router_interface_mock.assert_called_once_with('1234', subnet_id='1234')
router_interface_mock.reset_mock()
# Create alternate tenant and network
self._mock_user_create('12345', 'fake_alt_user')
self._mock_tenant_create('12345', 'fake_alt_tenant')
self._mock_network_create(creds, '12345', 'fake_alt_net')
self._mock_subnet_create(creds, '12345', 'fake_alt_subnet')
self._mock_router_create('12345', 'fake_alt_router')
creds.get_alt_creds()
router_interface_mock.assert_called_once_with('12345',
subnet_id='12345')
router_interface_mock.reset_mock()
# Create admin tenant and networks
self._mock_user_create('123456', 'fake_admin_user')
self._mock_tenant_create('123456', 'fake_admin_tenant')
self._mock_network_create(creds, '123456', 'fake_admin_net')
self._mock_subnet_create(creds, '123456', 'fake_admin_subnet')
self._mock_router_create('123456', 'fake_admin_router')
self._mock_list_roles('123456', 'admin')
creds.get_admin_creds()
self.patchobject(self.users_client.UsersClient, 'delete_user')
self.patchobject(self.tenants_client_class, self.delete_tenant)
net = mock.patch.object(creds.networks_admin_client, 'delete_network')
net_mock = net.start()
subnet = mock.patch.object(creds.subnets_admin_client, 'delete_subnet')
subnet_mock = subnet.start()
router = mock.patch.object(creds.routers_admin_client, 'delete_router')
router_mock = router.start()
remove_router_interface_mock = self.patch(
'tempest.lib.services.network.routers_client.RoutersClient.'
'remove_router_interface')
return_values = ({'status': 200}, {'ports': []})
port_list_mock = mock.patch.object(creds.ports_admin_client,
'list_ports',
return_value=return_values)
port_list_mock.start()
secgroup_list_mock = mock.patch.object(
creds.security_groups_admin_client,
'list_security_groups',
side_effect=side_effect)
secgroup_list_mock.start()
return_values = fake_http.fake_http_response({}, status=204), ''
remove_secgroup_mock = self.patch(
'tempest.lib.services.network.security_groups_client.'
'SecurityGroupsClient.delete', return_value=return_values)
creds.clear_creds()
# Verify default security group delete
calls = remove_secgroup_mock.mock_calls
self.assertEqual(len(calls), 3)
args = map(lambda x: x[1][0], calls)
args = list(args)
self.assertIn('v2.0/security-groups/sg-1234', args)
self.assertIn('v2.0/security-groups/sg-12345', args)
self.assertIn('v2.0/security-groups/sg-123456', args)
# Verify remove router interface calls
calls = remove_router_interface_mock.mock_calls
self.assertEqual(len(calls), 3)
args = map(lambda x: (x[1][0], x[2]), calls)
args = list(args)
self.assertIn(('1234', {'subnet_id': '1234'}), args)
self.assertIn(('12345', {'subnet_id': '12345'}), args)
self.assertIn(('123456', {'subnet_id': '123456'}), args)
# Verify network delete calls
calls = net_mock.mock_calls
self.assertEqual(len(calls), 3)
args = map(lambda x: x[1][0], calls)
args = list(args)
self.assertIn('1234', args)
self.assertIn('12345', args)
self.assertIn('123456', args)
# Verify subnet delete calls
calls = subnet_mock.mock_calls
self.assertEqual(len(calls), 3)
args = map(lambda x: x[1][0], calls)
args = list(args)
self.assertIn('1234', args)
self.assertIn('12345', args)
self.assertIn('123456', args)
# Verify router delete calls
calls = router_mock.mock_calls
self.assertEqual(len(calls), 3)
args = map(lambda x: x[1][0], calls)
args = list(args)
self.assertIn('1234', args)
self.assertIn('12345', args)
self.assertIn('123456', args)
@mock.patch('tempest.lib.common.rest_client.RestClient')
def test_network_alt_creation(self, MockRestClient):
creds = dynamic_creds.DynamicCredentialProvider(
neutron_available=True,
project_network_cidr='10.100.0.0/16', project_network_mask_bits=28,
**self.fixed_params)
self._mock_assign_user_role()
self._mock_list_role()
self._mock_user_create('1234', 'fake_alt_user')
self._mock_tenant_create('1234', 'fake_alt_tenant')
self._mock_network_create(creds, '1234', 'fake_alt_net')
self._mock_subnet_create(creds, '1234', 'fake_alt_subnet')
self._mock_router_create('1234', 'fake_alt_router')
router_interface_mock = self.patch(
'tempest.lib.services.network.routers_client.RoutersClient.'
'add_router_interface')
alt_creds = creds.get_alt_creds()
router_interface_mock.assert_called_once_with('1234', subnet_id='1234')
network = alt_creds.network
subnet = alt_creds.subnet
router = alt_creds.router
self.assertEqual(network['id'], '1234')
self.assertEqual(network['name'], 'fake_alt_net')
self.assertEqual(subnet['id'], '1234')
self.assertEqual(subnet['name'], 'fake_alt_subnet')
self.assertEqual(router['id'], '1234')
self.assertEqual(router['name'], 'fake_alt_router')
@mock.patch('tempest.lib.common.rest_client.RestClient')
def test_network_admin_creation(self, MockRestClient):
creds = dynamic_creds.DynamicCredentialProvider(
neutron_available=True,
project_network_cidr='10.100.0.0/16', project_network_mask_bits=28,
**self.fixed_params)
self._mock_assign_user_role()
self._mock_user_create('1234', 'fake_admin_user')
self._mock_tenant_create('1234', 'fake_admin_tenant')
self._mock_network_create(creds, '1234', 'fake_admin_net')
self._mock_subnet_create(creds, '1234', 'fake_admin_subnet')
self._mock_router_create('1234', 'fake_admin_router')
router_interface_mock = self.patch(
'tempest.lib.services.network.routers_client.RoutersClient.'
'add_router_interface')
self._mock_list_roles('123456', 'admin')
admin_creds = creds.get_admin_creds()
router_interface_mock.assert_called_once_with('1234', subnet_id='1234')
network = admin_creds.network
subnet = admin_creds.subnet
router = admin_creds.router
self.assertEqual(network['id'], '1234')
self.assertEqual(network['name'], 'fake_admin_net')
self.assertEqual(subnet['id'], '1234')
self.assertEqual(subnet['name'], 'fake_admin_subnet')
self.assertEqual(router['id'], '1234')
self.assertEqual(router['name'], 'fake_admin_router')
@mock.patch('tempest.lib.common.rest_client.RestClient')
def test_no_network_resources(self, MockRestClient):
net_dict = {
'network': False,
'router': False,
'subnet': False,
'dhcp': False,
}
creds = dynamic_creds.DynamicCredentialProvider(
neutron_available=True,
project_network_cidr='10.100.0.0/16', project_network_mask_bits=28,
network_resources=net_dict,
**self.fixed_params)
self._mock_assign_user_role()
self._mock_list_role()
self._mock_user_create('1234', 'fake_prim_user')
self._mock_tenant_create('1234', 'fake_prim_tenant')
net = mock.patch.object(creds.networks_admin_client,
'delete_network')
net_mock = net.start()
subnet = mock.patch.object(creds.subnets_admin_client,
'delete_subnet')
subnet_mock = subnet.start()
router = mock.patch.object(creds.routers_admin_client,
'delete_router')
router_mock = router.start()
primary_creds = creds.get_primary_creds()
self.assertEqual(net_mock.mock_calls, [])
self.assertEqual(subnet_mock.mock_calls, [])
self.assertEqual(router_mock.mock_calls, [])
network = primary_creds.network
subnet = primary_creds.subnet
router = primary_creds.router
self.assertIsNone(network)
self.assertIsNone(subnet)
self.assertIsNone(router)
@mock.patch('tempest.lib.common.rest_client.RestClient')
def test_router_without_network(self, MockRestClient):
net_dict = {
'network': False,
'router': True,
'subnet': False,
'dhcp': False,
}
creds = dynamic_creds.DynamicCredentialProvider(
neutron_available=True,
project_network_cidr='10.100.0.0/16', project_network_mask_bits=28,
network_resources=net_dict,
**self.fixed_params)
self._mock_assign_user_role()
self._mock_list_role()
self._mock_user_create('1234', 'fake_prim_user')
self._mock_tenant_create('1234', 'fake_prim_tenant')
self.assertRaises(lib_exc.InvalidConfiguration,
creds.get_primary_creds)
@mock.patch('tempest.lib.common.rest_client.RestClient')
def test_subnet_without_network(self, MockRestClient):
net_dict = {
'network': False,
'router': False,
'subnet': True,
'dhcp': False,
}
creds = dynamic_creds.DynamicCredentialProvider(
neutron_available=True,
project_network_cidr='10.100.0.0/16', project_network_mask_bits=28,
network_resources=net_dict,
**self.fixed_params)
self._mock_assign_user_role()
self._mock_list_role()
self._mock_user_create('1234', 'fake_prim_user')
self._mock_tenant_create('1234', 'fake_prim_tenant')
self.assertRaises(lib_exc.InvalidConfiguration,
creds.get_primary_creds)
@mock.patch('tempest.lib.common.rest_client.RestClient')
def test_dhcp_without_subnet(self, MockRestClient):
net_dict = {
'network': False,
'router': False,
'subnet': False,
'dhcp': True,
}
creds = dynamic_creds.DynamicCredentialProvider(
neutron_available=True,
project_network_cidr='10.100.0.0/16', project_network_mask_bits=28,
network_resources=net_dict,
**self.fixed_params)
self._mock_assign_user_role()
self._mock_list_role()
self._mock_user_create('1234', 'fake_prim_user')
self._mock_tenant_create('1234', 'fake_prim_tenant')
self.assertRaises(lib_exc.InvalidConfiguration,
creds.get_primary_creds)
class TestDynamicCredentialProviderV3(TestDynamicCredentialProvider):
fixed_params = {'name': 'test class',
'identity_version': 'v3',
'admin_role': 'admin',
'identity_uri': 'fake_uri'}
token_client = v3_token_client
iden_client = v3_iden_client
roles_client = v3_roles_client
tenants_client = v3_projects_client
users_client = v3_users_client
token_client_class = token_client.V3TokenClient
fake_response = fake_identity._fake_v3_response
tenants_client_class = tenants_client.ProjectsClient
delete_tenant = 'delete_project'
def setUp(self):
super(TestDynamicCredentialProviderV3, self).setUp()
self.useFixture(fake_config.ConfigFixture())
self.useFixture(fixtures.MockPatchObject(
domains_client.DomainsClient, 'list_domains',
return_value=dict(domains=[dict(id='default',
name='Default')])))
self.patchobject(self.roles_client.RolesClient,
'create_user_role_on_domain')
def _mock_list_ec2_credentials(self, user_id, tenant_id):
pass
def _mock_tenant_create(self, id, name):
project_fix = self.useFixture(fixtures.MockPatchObject(
self.tenants_client.ProjectsClient,
'create_project',
return_value=(rest_client.ResponseBody
(200, {'project': {'id': id, 'name': name}}))))
return project_fix
@mock.patch('tempest.lib.common.rest_client.RestClient')
def test_member_role_creation_with_duplicate(self, rest_client_mock):
creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
creds.creds_client = mock.MagicMock()
creds.creds_client.create_user_role.side_effect = lib_exc.Conflict
with mock.patch('tempest.lib.common.dynamic_creds.LOG') as log_mock:
creds._create_creds()
log_mock.warning.assert_called_once_with(
"Member role already exists, ignoring conflict.")
creds.creds_client.assign_user_role.assert_called_once_with(
mock.ANY, mock.ANY, 'Member')
|
|
#!/usr/bin/env python
import os
from uuid import uuid4
from tabulate import tabulate
from multiprocessing.pool import ThreadPool as Pool
from multiprocessing import Lock
from virtbmc.models import VirtBMC, QemuVM
from virtbmc.template import gen_template_content
from virtbmc.clrlog import LOG
import virtbmc.utils as utils
import virtbmc.config as config
from virtbmc import procutils
RUNNING_STATUS = 'running'
STOP_STATUS = 'stop'
ERROR_STATUS = 'error'
class QemuBMCUnit(object):
def __init__(self, number, listen_addr, ipmi_port,
serial_port, telnet_port, qemu_program,
memory, ncpu, vncport, bridge, workspace,
image_size, ipmi_sim, ipmiusr, ipmipass,
uuid=None, **kwargs):
self.uuid = uuid or str(uuid4())
self.number = number
self.bmcname = 'bmc--{}--{}'.format(
self.uuid, self.number)
self.qemuname = 'vm--{}--{}'.format(
self.uuid, self.number
)
self.tmux_name = 'vbmc-{}--{}'.format(
self.number, self.uuid[:8]
)
self.listen_addr = listen_addr
self.ipmi_port = int(ipmi_port)
self.fake_ipmi_mac_port = str(ipmi_port)[:2] + ':' + str(ipmi_port)[2:]
self.serial_port = serial_port
self.telnet_port = telnet_port
self.path_prefix = '{}/{}--{}'.format(workspace, self.uuid, self.number)
self.lan_config_program = '{}/ipmi_sim_lancontrol'.format(self.path_prefix)
self.chassis_control_program = '{}/ipmi_sim_chassiscontrol'.format(self.path_prefix)
self.ipmi_sim = ipmi_sim
self.ipmi_config_file = '{}/lan.conf'.format(self.path_prefix)
self.bmc_env_file = '{}/gen-bmc-env'.format(self.path_prefix)
self.status_file = '{}/vbmc_qemu.status'.format(self.path_prefix)
self.ipmi_op_record_file = '{}/operate.record'.format(self.path_prefix)
self.ipmiusr = ipmiusr or 'root'
self.ipmipass = ipmipass or 'test'
self.qemu_program = qemu_program
self.memory = memory
self.ncpu = ncpu
self.image_size = image_size
self.disk = '{}/disks/{}.qcow2'.format(self.path_prefix, self.uuid)
self.ifup_script = '{}/qemu-ifup'.format(self.path_prefix)
self.ifdown_script = '{}/qemu-ifdown'.format(self.path_prefix)
self.qemu_pidfile = '{}/qemu.pid'.format(self.path_prefix)
self.controller_script = '{}/controller'.format(self.path_prefix)
self.ifmac = kwargs.get('ifmac') or utils.random_mac()
self.vncport = vncport
self.bridge = bridge
def _create_template_content(self, temfile, outfile):
gen_template_content(temfile, outfile, self.__dict__)
def gen_bmc_env(self, temfile):
utils.mkdir_of_file(self.bmc_env_file)
self._create_template_content(temfile, self.bmc_env_file)
utils.make_executable(self.bmc_env_file)
def gen_qemu_ifup(self, temfile):
utils.mkdir_of_file(self.ifup_script)
self._create_template_content(temfile, self.ifup_script)
utils.make_executable(self.ifup_script)
def gen_qemu_ifdown(self, temfile):
utils.mkdir_of_file(self.ifdown_script)
self._create_template_content(temfile, self.ifdown_script)
utils.make_executable(self.ifdown_script)
def gen_ipmi_sim_chassiscontrol(self, temfile):
utils.mkdir_of_file(self.chassis_control_program)
self._create_template_content(temfile, self.chassis_control_program)
utils.make_executable(self.chassis_control_program)
def gen_ipmi_lancontrol(self, temfile):
utils.mkdir_of_file(self.lan_config_program)
self._create_template_content(temfile, self.lan_config_program)
utils.make_executable(self.lan_config_program)
def gen_ipmi_config(self, temfile):
utils.mkdir_of_file(self.ipmi_config_file)
self._create_template_content(temfile, self.ipmi_config_file)
def gen_controller_script(self, temfile, tmux_cmd):
utils.mkdir_of_file(self.controller_script)
self._create_template_content(temfile, self.controller_script)
utils.make_executable(self.controller_script)
utils.cpto(tmux_cmd, self.path_prefix)
def gen_all_scripts(self, temdir):
self.gen_bmc_env('{}/{}'.format(temdir, 'gen-bmc-env.tem'))
self.gen_qemu_ifup('{}/{}'.format(temdir, 'qemu-ifup.tem'))
self.gen_qemu_ifdown('{}/{}'.format(temdir, 'qemu-ifdown.tem'))
self.gen_ipmi_sim_chassiscontrol('{}/{}'.format(temdir, 'ipmi_sim_chassiscontrol.tem'))
self.gen_ipmi_lancontrol('{}/{}'.format(temdir, 'ipmi_sim_lancontrol.tem'))
self.gen_ipmi_config('{}/{}'.format(temdir, 'lan.conf.tem'))
self.gen_controller_script('{}/{}'.format(temdir, 'controller.tem'),
'{}/{}'.format(temdir, 'tmux-cmd'))
def create_qemu_image(self):
utils.mkdir_of_file(self.disk)
cmd = ['qemu-img', 'create', '-f', 'qcow2', self.disk, self.image_size]
utils.run_cmd(cmd)
def run_bmc(self):
if not self.is_bmc_running():
cmd = [self.controller_script, 'start', self.ipmiusr, self.ipmipass]
utils.run_cmd(cmd)
LOG.info('Start BMC: {} DONE.'.format(self.bmcname))
else:
LOG.warning('BMC: {} already started.'.format(self.qemuname))
def run_vm(self):
try:
if not self.is_vm_running():
cmd = [self.controller_script, 'startvm', self.ipmiusr, self.ipmipass]
utils.run_cmd(cmd)
LOG.info('Starting VM: {} DONE.'.format(self.qemuname))
else:
LOG.warning('VM: {} already started.'.format(self.qemuname))
except Exception as e:
LOG.error("Run VM error: {}".format(e))
def kill_qemu_by_pid(self):
with open(self.qemu_pidfile, 'r') as f:
pid = f.read().strip()
procutils.check_call_no_exception(['kill', '-9', pid])
def get_vm_status_byfile(self):
import re
info = {}
if not os.path.exists(self.status_file):
info['power'] = 'off'
info['bootdev'] = 'default'
return info
with open(self.status_file) as f:
for line in f.readlines():
power_m = re.search(r'^power: (.*)', line.strip())
bootdev_m = re.search(r'^bootdev: (.*)', line.strip())
if power_m:
info['power'] = power_m.group(1)
if bootdev_m:
info['bootdev'] = bootdev_m.group(1)
return info
def get_vm_status(self):
if not os.path.exists(self.status_file):
status = STOP_STATUS
else:
if self.get_vm_status_byfile().get('power', 'off') == 'off':
status = STOP_STATUS
else:
cmd = 'ipmitool -I lanplus -U {} -P {} -H {} -p {} chassis power status'.format(self.ipmiusr, self.ipmipass,
self.listen_addr, self.ipmi_port).split()
try:
output = utils.run_cmd(cmd)
if 'Power is on' in ''.join(output):
status = RUNNING_STATUS
else:
status = STOP_STATUS
except:
status = ERROR_STATUS
return status
def get_bmc_status(self):
if utils.is_port_open(self.ipmi_port):
return RUNNING_STATUS
else:
return STOP_STATUS
def is_vm_running(self):
if self.get_vm_status() == RUNNING_STATUS:
return True
else:
return False
def is_bmc_running(self):
if self.get_bmc_status() == RUNNING_STATUS:
return True
else:
return False
def stop_vm(self):
if self.is_vm_running():
cmd = [self.controller_script, 'stopvm', self.ipmiusr, self.ipmipass]
utils.run_cmd(cmd)
LOG.info('Stop VM: {} DONE.'.format(self.qemuname))
if os.path.exists(self.qemu_pidfile):
if self.get_vm_status() == ERROR_STATUS or procutils.check_pid_alive(self.qemu_pidfile, 'qemu'):
self.kill_qemu_by_pid()
LOG.warning("VM: {} be killed".format(self.qemuname))
else:
LOG.warning('VM: {} already stopped.'.format(self.qemuname))
def stop_bmc(self):
if self.is_bmc_running:
cmd = [self.controller_script, 'stop', self.ipmiusr, self.ipmipass]
utils.run_cmd(cmd)
LOG.info('Stop BMC: {} DONE.'.format(self.qemuname))
else:
LOG.warning('BMC: {} already stopped.'.format(self.qemuname))
def save_todb(self):
qemuvm = QemuVM(**self.__dict__)
vbmc = VirtBMC(vm=qemuvm, **self.__dict__)
qemuvm.save()
vbmc.save()
def cleanup(self):
self.stop_vm()
self.stop_bmc()
utils.rmdirs(self.path_prefix)
list_headers = ['Order', 'UUID', 'TmuxSession',
'ListenIp', 'IPMIPort', 'VncPort', 'VmMAC',
'IPMIUser', 'IPMIPassword', 'BMCStatus',
'VMStatus', 'BootDev']
def get_list_field(self):
return [
self.number,
self.uuid,
self.tmux_name,
self.listen_addr,
self.ipmi_port,
5900+self.vncport,
self.ifmac,
self.ipmiusr,
self.ipmipass,
self.get_bmc_status(),
self.get_vm_status(),
self.get_vm_status_byfile()['bootdev'],
]
BMC_FREE_PORT = utils.get_free_port(9000, 9500)
VNC_FREE_PORT = utils.get_free_port(5900, 6000)
def gen_config(args, num):
global BMC_FREE_PORT
global VNC_FREE_PORT
db_items = VirtBMC.select().order_by(VirtBMC.number)
if len(db_items) != 0:
used_bmc_port = [item.ipmi_port for item in db_items]
used_bmc_port += [item.ipmi_port for item in db_items]
used_bmc_port += [item.telnet_port for item in db_items]
used_vnc_port = [5900+item.vm.vncport for item in db_items]
BMC_FREE_PORT = list(set(BMC_FREE_PORT)-set(used_bmc_port))
VNC_FREE_PORT = list(set(VNC_FREE_PORT)-set(used_vnc_port))
res = {}
res['number'] = num
res['ipmi_sim'] = args.ipmi_sim
res['listen_addr'] = utils.get_netiface_ip(args.bridge)
ipmi_udp_port = BMC_FREE_PORT.pop(0)
serial_tcp_port = ipmi_udp_port
res['ipmi_port'] = ipmi_udp_port
res['serial_port'] = serial_tcp_port
res['telnet_port'] = BMC_FREE_PORT.pop(0)
res['vncport'] = VNC_FREE_PORT.pop(0) - 5900
res['qemu_program'] = args.qemu
res['memory'] = args.memory
res['ncpu'] = args.ncpu
res['bridge'] = args.bridge
res['workspace'] = config.WORKSPACE
res['image_size'] = args.image_size
res['ipmiusr'] = args.ipmi_user
res['ipmipass'] = args.ipmi_password
return res
def get_QemuBMC_unit(_uuid, **kwargs):
vbmc = VirtBMC.get(VirtBMC.uuid == _uuid)
vm = QemuVM.get(QemuVM.uuid == _uuid)
if kwargs.get('ipmi_user', False) and vbmc.ipmiusr != kwargs['ipmi_user']:
vbmc.ipmiusr = kwargs['ipmi_user']
vbmc.save()
if kwargs.get('ipmi_password', False) and vbmc.ipmipass != kwargs['ipmi_password']:
vbmc.ipmipass = kwargs['ipmi_password']
vbmc.save()
res = vbmc.__dict__['_data'].copy()
res.update(vm.__dict__['_data'])
res['workspace'] = config.WORKSPACE
return QemuBMCUnit(**res), vbmc, vm
def process_map(func, lst):
if len(lst) == 0:
LOG.info("Empty list..., skip")
return
pool = Pool(processes=len(lst))
return pool.map(func, lst)
def create(args):
def BMC_min_num():
db_items = VirtBMC.select().order_by(VirtBMC.number)
if len(db_items) == 0:
return 0
num_lst = [item.number for item in db_items]
min_con_range = list(utils.ranges(num_lst))[0]
if min_con_range[0] != 0:
return 0
else:
return min_con_range[-1] + 1
def _create(args, lock):
with lock:
res = gen_config(args, BMC_min_num())
unit = QemuBMCUnit(**res)
unit.save_todb()
LOG.info(unit.__dict__)
unit.gen_all_scripts(args.template)
unit.create_qemu_image()
lock = Lock()
process_map(lambda _: _create(args, lock), range(0, args.number))
def extract_ipmi_user_passwd(args):
ret = {}
if args.ipmi_user:
ret['ipmi_user'] = args.ipmi_user
if args.ipmi_password:
ret['ipmi_password'] = args.ipmi_password
return ret
def list_all(args):
uuids = [bmc.uuid for bmc in VirtBMC.select().order_by(VirtBMC.number)]
print_table(uuids, args.json)
def print_table(ids, json_output):
data_series = []
for _uuid in ids:
unit_item, _, _ = get_QemuBMC_unit(_uuid)
data_series.append(unit_item.get_list_field())
if json_output:
import json
output = [dict(zip(QemuBMCUnit.list_headers, item)) for item in data_series]
output = json.dumps(output, indent=4)
else:
output = tabulate(data_series, QemuBMCUnit.list_headers,
tablefmt="psql")
print(output)
def update(args):
def _update(uuid):
kwargs = extract_ipmi_user_passwd(args)
unit_item, _, _ = get_QemuBMC_unit(uuid, **kwargs)
if args.id[0] == 'all':
update_list = [item.uuid for item in VirtBMC.select()]
else:
update_list = args.id
process_map(_update, update_list)
print_table(update_list, args.json)
def delete(args):
def _delete(uuid):
unit_item, bmc, vm = get_QemuBMC_unit(uuid)
unit_item.cleanup()
vm.delete_instance()
bmc.delete_instance()
if args.id[0] == 'all':
delete_list = [item.uuid for item in VirtBMC.select()]
else:
delete_list = args.id
process_map(_delete, delete_list)
def stop(args):
def _stop(uuid):
unit_item, bmc, vm = get_QemuBMC_unit(uuid)
unit_item.stop_vm()
unit_item.stop_bmc()
if args.id[0] == 'all':
stop_list = [item.uuid for item in VirtBMC.select()]
else:
stop_list = args.id
process_map(_stop, stop_list)
def start(args):
def _start(uuid):
unit_item, _, _ = get_QemuBMC_unit(uuid)
unit_item.run_bmc()
if args.autostart_vm:
unit_item.run_vm()
if args.bmc[0] == 'all':
bmc_list = [item.uuid for item in VirtBMC.select()]
else:
bmc_list = args.start_bmc
map(_start, bmc_list)
|
|
#!/usr/bin/env python3
# transpiled with BefunCompile v1.3.0 (c) 2017
import gzip, base64
_g = ("Ah+LCAAAAAAABADt0OtLU3EcBvCfd7volAQp2OaKigpdYl5I54glEUqFUStZ6pBminlLnE7zSIG+8AZmC29bvskXRgsrZm257CALbGosXXI82+Skh3nZpsep03mW9qJ/"
+ "ot/z4sPzhe+rh90UDPbjD1ZmDcXYbd/6H0JxdH4eevNZ4akTI2EHf58cDJtdHrqfmHXGkdqOSNp/RksCH/TYpKG5ma8OdGYHK+3zH0LRp1Nvfem1htOqx8wvSsd6pjfK"
+ "GcZxTsbWbkU5b+yXlPV7Vs4lax8hfpJt7dsrAivn8t7pnrcHMHx8GUxfBuef6M6mI4B13J91sfnvlwAKhUKhUCgUCoVCoVAoFAqFQqHQ/948AGhZZaQyhi6LL5Y35XAT"
+ "hMYCr804ESkULhnKk6/1q5Ryl4RlUag1q5IajaW12603uOj3Ne8ON5poP1BvkXEmpTqpvUyj7UwkaJk5g69gFW2qMQvvKKjiEehGKY/W4GieVkW6kpDwdfEACZyIWM00"
+ "jVWYx4WYS2JGRLiuwrxYfqd3sc4zbQoCHoYd+SgKQXJCGvmiYmlcyTS2tLq2xj82wparCS2SP8QAqnk3yxMZa2ZWUsTZT9VdcpQa4W5oJhxmB91k8QceWyVamcVFyXSC"
+ "ZLdqxFkTmFQ9lsRqwC8YMKxNjxMRdAGyPWdYXUvReYsR2aHS7YUmUsTju2RdeqWcukpgaVYfa3d1BmWM8LjntIps0RWvasbdv4uvnJ/ls3+1LJXNtBq1urvXxwQmsk1P"
+ "TKW84dvzj2wAZ8tKjF5NWerobeeukf812oiX6noCbSULKr/PyfG7Gx0URqZV5yhzv4UCywvcLSgaj6PIzp0CqlRAj4662kIZD/nm78YqRdBW4WsGqI2kX86j6fhw8jix"
+ "nXhu2cUEw9zNvb2prepEYjCDcA3xWsYtvQlGnLs5wFakAnK1gxoxDecC6yOvf/jznuZb0WzwB1p9toPEGQAA")
g = base64.b64decode(_g)[1:]
for i in range(base64.b64decode(_g)[0]):
g = gzip.decompress(g)
g=list(g)
def gr(x,y):
if(x>=0 and y>=0 and x<2017 and y<1013):
return g[y*2017 + x];
return 0;
def gw(x,y,v):
if(x>=0 and y>=0 and x<2017 and y<1013):
g[y*2017 + x]=v;
def td(a,b):
return ((0)if(b==0)else(a//b))
def tm(a,b):
return ((0)if(b==0)else(a%b))
s=[]
def sp():
global s
if (len(s) == 0):
return 0
return s.pop()
def sa(v):
global s
s.append(v)
def sr():
global s
if (len(s) == 0):
return 0
return s[-1]
def _0():
gw(1,9,1000)
gw(2,9,1017)
gw(3,9,1000000)
gw(1,2,0)
gw(1,4,0)
gw(1,5,gr(3,9))
sa(gr(3,9)-1)
sa(0)
sa(gr(3,9)-1)
sa(gr(3,9)-1)
gw((td(gr(3,9)-1,gr(1,9)))+9,tm(gr(3,9)-1,gr(1,9)),0)
return 1
def _1():
sa(td(sp(),gr(1,9)))
sa(sp()+gr(2,9))
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(tm(sp(),gr(1,9)))
v0=sp()
v1=sp()
gw(v1,v0,sp())
sa(sr());
return (28)if(sp()!=0)else(2)
def _2():
gw(1,3,1)
sp();
sa(1)
return 3
def _3():
sa(2)
sa(2*gr(1,3))
sa((1)if((2*gr(1,3))<gr(3,9))else(0))
return 4
def _4():
return (27)if(sp()!=0)else(5)
def _5():
sp();
sp();
sa(sp()+1)
return (6)if((sr()-gr(3,9))!=0)else(7)
def _6():
sa(sr());
gw(1,3,sp())
return 3
def _7():
gw(2,3,6)
sp();
sa(gr((td(6,gr(1,9)))+9,tm(6,gr(1,9))))
return 8
def _8():
return (9)if(sp()!=0)else(12)
def _9():
sa(gr(2,3)+1)
return (10)if(((gr(2,3)+1)-gr(3,9))!=0)else(11)
def _10():
sa(sr());
gw(2,3,sp())
sa((td(sr(),gr(1,9)))+9)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(tm(sp(),gr(1,9)))
v0=sp()
sa(gr(sp(),v0))
return 8
def _11():
print(gr(1,5),end=" ",flush=True)
sp();
return 29
def _12():
global t0
sa(gr(2,3))
sa(gr(2,3))
gw(7,0,gr(2,3))
gw(1,2,1)
sa(td(sp(),gr(1,9)))
sa(sp()+gr(2,9))
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(tm(sp(),gr(1,9)))
v0=sp()
t0=gr(sp(),v0)
gw(2,2,t0)
return 13
def _13():
global t0
t0=gr(2,2)
return (14)if(gr(2,2)>0)else(9)
def _14():
global t0
return (15)if(t0<gr(3,9))else(9)
def _15():
global t0
return (16)if((t0-gr(2,3))!=0)else(9)
def _16():
gw(3,1,0)
gw(3,2,0)
sa(0)
sa(gr(7,0)-gr(2,2))
return 17
def _17():
return (23)if(sp()!=0)else(18)
def _18():
global t0
global t1
gw(3,1,1)
t0=gr(1,2)
sa(sr());
sa(t0)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
v0=sp()
sa(sp()-v0)
t1=sp()
gw(3,2,t1)
return (22)if(gr(3,2)>gr(1,4))else(19)
def _19():
sp();
return (9)if((gr(3,1))!=0)else(20)
def _20():
return (9)if((gr((td(gr(2,2),gr(1,9)))+9,tm(gr(2,2),gr(1,9))))!=0)else(21)
def _21():
sa(gr(2,2))
sa(gr(2,2))
sa(7)
sa(gr(1,2))
gw(1,2,gr(1,2)+1)
v0=sp()
v1=sp()
gw(v1,v0,sp())
sa(1)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa((td(sr(),gr(1,9)))+9)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(tm(sp(),gr(1,9)))
v0=sp()
v1=sp()
gw(v1,v0,sp())
gw(2,2,gr((td(gr(2,2),gr(1,9)))+gr(2,9),tm(gr(2,2),gr(1,9))))
return 13
def _22():
global t0
gw(1,4,gr(3,2))
sa(sr());
sa(7)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
v0=sp()
t0=gr(sp(),v0)
gw(1,5,t0)
return 23
def _23():
global t0
sa(sr());
sa(7)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
v0=sp()
t0=gr(sp(),v0)
t0=(1)if(t0<gr(1,5))else(0)
t0=t0*gr(3,1)
return (26)if((t0)!=0)else(24)
def _24():
sa(sp()+1)
return (25)if((sr()-gr(1,2))!=0)else(19)
def _25():
sa(sr());
sa(7)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
v0=sp()
sa(gr(sp(),v0))
sa(sp()-gr(2,2))
return 17
def _26():
global t0
sa(sr());
sa(7)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
v0=sp()
t0=gr(sp(),v0)
gw(1,5,t0)
return 24
def _27():
sa(sr());
sa((td(sr(),gr(1,9)))+gr(2,9))
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(tm(sp(),gr(1,9)))
v0=sp()
sa(gr(sp(),v0))
sa(sp()+gr(1,3))
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa((td(sr(),gr(1,9)))+gr(2,9))
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(tm(sp(),gr(1,9)))
v0=sp()
v1=sp()
gw(v1,v0,sp())
sa(sp()+1)
sa(sr()*gr(1,3))
sa((1)if(sr()<gr(3,9))else(0))
return 4
def _28():
sa(sp()-1)
sa(sr());
sa(0)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa((td(sr(),gr(1,9)))+9)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(tm(sp(),gr(1,9)))
v0=sp()
v1=sp()
gw(v1,v0,sp())
sa(sr());
sa(0)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(sr());
return 1
m=[_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_10,_11,_12,_13,_14,_15,_16,_17,_18,_19,_20,_21,_22,_23,_24,_25,_26,_27,_28]
c=0
while c<29:
c=m[c]()
|
|
import django
from django.contrib.contenttypes.generic import GenericRelation, GenericRel
from django.contrib.contenttypes.models import ContentType
from django.db import DEFAULT_DB_ALIAS
from django.db.models.query_utils import Q
from django.utils.functional import lazy
from django.utils.text import capfirst
from fluent_contents import appsettings
from fluent_contents.forms.fields import PlaceholderFormField
from fluent_contents.models import Placeholder, ContentItem
__all__ = (
'PlaceholderRelation', 'ContentItemRelation',
'PlaceholderField',
)
# The PlaceholderField is inspired by Django CMS
# Yet uses a different methology to access the fields.
#
# In Django CMS it's a ForeignKey to Placeholder.
# Here, the Placeholder has a GenericForeignKey to the parent - hence it will be deleted when the parent is removed -
# so the PlaceholderField is merely a reverse GenericRelation.
#
# In the admin, the logic of the PlaceholderEditor code can be reused.
class PlaceholderRelation(GenericRelation):
"""
A :class:`~django.contrib.contenttypes.generic.GenericRelation` which can be applied to a parent model that
is expected to be referenced be a :class:`~fluent_contents.models.Placeholder`. For example:
.. code-block:: python
class Page(models.Model):
placeholder_set = PlaceholderRelation()
"""
def __init__(self, **kwargs):
defaults = {
'limit_choices_to': Q(
parent_type=lazy(lambda: ContentType.objects.get_for_model(Placeholder), ContentType)()
)
}
defaults.update(kwargs)
super(PlaceholderRelation, self).__init__(to=Placeholder,
object_id_field='parent_id', content_type_field='parent_type', **defaults)
class ContentItemRelation(GenericRelation):
"""
A :class:`~django.contrib.contenttypes.generic.GenericRelation` which can be applied to a parent model that
is expected to be referenced by the :class:`~fluent_contents.models.ContentItem` classes. For example:
.. code-block:: python
class Page(models.Model):
contentitem_set = ContentItemRelation()
Adding this relation also causes the admin delete page to list the
:class:`~fluent_contents.models.ContentItem` objects which will be deleted.
"""
def __init__(self, **kwargs):
super(ContentItemRelation, self).__init__(to=ContentItem,
object_id_field='parent_id', content_type_field='parent_type', **kwargs)
def bulk_related_objects(self, objs, using=DEFAULT_DB_ALIAS):
# Fix delete screen. Workaround for https://github.com/chrisglass/django_polymorphic/issues/34
return super(ContentItemRelation, self).bulk_related_objects(objs).non_polymorphic()
class PlaceholderRel(GenericRel):
"""
The internal :class:`~django.contrib.contenttypes.generic.GenericRel`
that is used by the :class:`PlaceholderField` to support queries.
"""
def __init__(self, field):
limit_choices_to = Q(
parent_type=lazy(lambda: ContentType.objects.get_for_model(Placeholder), ContentType)(),
slot=field.slot,
)
# TODO: make sure reverse queries work properly
if django.VERSION >= (1, 6, 0):
super(PlaceholderRel, self).__init__(
field=field,
to=Placeholder,
related_name=None, # NOTE: must be unique for app/model/slot.
limit_choices_to=limit_choices_to
)
else:
super(PlaceholderRel, self).__init__(
to=Placeholder,
related_name=None, # NOTE: must be unique for app/model/slot.
limit_choices_to=limit_choices_to
)
class PlaceholderFieldDescriptor(object):
"""
This descriptor is placed on the PlaceholderField model instance
by the :func:`~PlaceholderField.contribute_to_class` function.
This causes ``instance.field`` to return a :class:`~fluent_contents.models.Placeholder` object.
"""
def __init__(self, slot):
"""Set the slot this descriptor is created for."""
self.slot = slot
def __get__(self, instance, instance_type=None):
"""Return the placeholder by slot."""
if instance is None:
return self
try:
placeholder = Placeholder.objects.get_by_slot(instance, self.slot)
except Placeholder.DoesNotExist:
raise Placeholder.DoesNotExist("Placeholder does not exist for parent {0} (type_id: {1}, parent_id: {2}), slot: '{3}'".format(
repr(instance),
ContentType.objects.get_for_model(instance).pk,
instance.pk,
self.slot
))
else:
placeholder.parent = instance # fill the reverse cache
return placeholder
def __set__(self, instance, value):
if instance is None:
raise AttributeError("Descriptor must be accessed via instance")
if value == "-DUMMY-":
return
raise NotImplementedError("Setting Placeholder value is not supported, use Placeholder.objects.create_for_object() instead.")
class PlaceholderField(PlaceholderRelation):
"""
The model field to add :class:`~fluent_contents.models.ContentItem` objects to a model.
:param slot: A programmatic name to identify the placeholder.
:param plugins: Optional, define which plugins are allowed to be used. This can be a list of names, or :class:`~fluent_contents.extensions.ContentPlugin` references.
:type slot: str
:type plugins: list
This class provides the form fields for the field. Use this class in a model to use it:
.. code-block:: python
class Article(models.Model):
contents = PlaceholderField("article_contents")
The data itself is stored as reverse relation in the :class:`~fluent_contents.models.ContentItem` object.
Hence, all contents will be cleaned up properly when the parent model is deleted.
The placeholder will be displayed in the admin:
.. image:: /images/admin/placeholderfieldadmin1.png
:width: 770px
:height: 562px
:alt: django-fluent-contents placeholder field preview
"""
def __init__(self, slot, plugins=None, **kwargs):
"""
Initialize the placeholder field.
"""
super(PlaceholderField, self).__init__(**kwargs)
self.slot = slot
# See if a plugin configuration is defined in the settings
self._slot_config = appsettings.FLUENT_CONTENTS_PLACEHOLDER_CONFIG.get(slot) or {}
self._plugins = plugins or self._slot_config.get('plugins') or None
# Overwrite some hardcoded defaults from the base class.
self.editable = True
self.blank = True # TODO: support blank: False to enforce adding at least one plugin.
self.rel = PlaceholderRel(self) # This support queries
def formfield(self, **kwargs):
"""
Returns a :class:`PlaceholderFormField` instance for this database Field.
"""
defaults = {
'label': capfirst(self.verbose_name),
'help_text': self.help_text,
'required': not self.blank,
}
defaults.update(kwargs)
return PlaceholderFormField(slot=self.slot, plugins=self._plugins, **defaults)
def contribute_to_class(self, cls, name):
"""
Internal Django method to associate the field with the Model; it assigns the descriptor.
"""
super(PlaceholderField, self).contribute_to_class(cls, name)
# overwrites what instance.<colname> returns; give direct access to the placeholder
setattr(cls, name, PlaceholderFieldDescriptor(self.slot))
# Make placeholder fields easy to find
# Can't assign this to cls._meta because that gets overwritten by every level of model inheritance.
if not hasattr(cls, '_meta_placeholder_fields'):
cls._meta_placeholder_fields = {}
cls._meta_placeholder_fields[name] = self
# Configure the revere relation if possible.
# TODO: make sure reverse queries work properly
if self.rel.related_name is None:
# Make unique for model (multiple models can use same slotnane)
self.rel.related_name = '{app}_{model}_{slot}_FIXME'.format(
app=cls._meta.app_label,
model=cls._meta.object_name.lower(),
slot=self.slot
)
# Remove attribute must exist for the delete page. Currently it's not actively used.
# The regular ForeignKey assigns a ForeignRelatedObjectsDescriptor to it for example.
# In this case, the PlaceholderRelation is already the reverse relation.
# Being able to move forward from the Placeholder to the derived models does not have that much value.
setattr(self.rel.to, self.rel.related_name, None)
@property
def plugins(self):
"""
Get the set of plugins that this field may display.
"""
from fluent_contents import extensions
if self._plugins is None:
return extensions.plugin_pool.get_plugins()
else:
try:
return extensions.plugin_pool.get_plugins_by_name(*self._plugins)
except extensions.PluginNotFound as e:
raise extensions.PluginNotFound(str(e) + " Update the plugin list of '{0}.{1}' field or FLUENT_CONTENTS_PLACEHOLDER_CONFIG['{2}'] setting.".format(self.model._meta.object_name, self.name, self.slot))
def value_from_object(self, obj):
"""
Internal Django method, used to return the placeholder ID when exporting the model instance.
"""
try:
# not using self.attname, access the descriptor instead.
placeholder = getattr(obj, self.name)
except Placeholder.DoesNotExist:
return None # Still allow ModelForm / admin to open and create a new Placeholder if the table was truncated.
return placeholder.id if placeholder else None # Be consistent with other fields, like ForeignKey
try:
from south.modelsinspector import add_ignored_fields
except ImportError:
pass
else:
# South 0.7.x ignores GenericRelation fields but doesn't ignore subclasses.
# Taking the same fix as applied in http://south.aeracode.org/ticket/414
_name_re = "^" + __name__.replace(".", "\.")
add_ignored_fields((
_name_re + "\.PlaceholderField",
_name_re + "\.PlaceholderRelation",
_name_re + "\.ContentItemRelation",
))
|
|
#!/usr/bin/env python
"""Example code of DDPG on OpenAI Gym environments.
For DDPG, see: https://arxiv.org/abs/1509.02971
"""
from __future__ import print_function
from __future__ import division
import argparse
import collections
import copy
import random
import gym
import numpy as np
import chainer
from chainer import functions as F
from chainer import links as L
from chainer import optimizers
class QFunction(chainer.Chain):
"""Q-function represented by a MLP."""
def __init__(self, obs_size, action_size, n_units=100):
super(QFunction, self).__init__()
with self.init_scope():
self.l0 = L.Linear(obs_size + action_size, n_units)
self.l1 = L.Linear(n_units, n_units)
self.l2 = L.Linear(n_units, 1,
initialW=chainer.initializers.HeNormal(1e-3))
def __call__(self, obs, action):
"""Compute Q-values for given state-action pairs."""
x = F.concat((obs, action), axis=1)
h = F.relu(self.l0(x))
h = F.relu(self.l1(h))
return self.l2(h)
def squash(x, low, high):
"""Squash values to fit [low, high] via tanh."""
center = (high + low) / 2
scale = (high - low) / 2
return F.tanh(x) * scale + center
class Policy(chainer.Chain):
"""Policy represented by a MLP."""
def __init__(self, obs_size, action_size, action_low, action_high,
n_units=100):
super(Policy, self).__init__()
self.action_high = action_high
self.action_low = action_low
with self.init_scope():
self.l0 = L.Linear(obs_size, n_units)
self.l1 = L.Linear(n_units, n_units)
self.l2 = L.Linear(n_units, action_size,
initialW=chainer.initializers.HeNormal(1e-3))
def __call__(self, x):
"""Compute actions for given observations."""
h = F.relu(self.l0(x))
h = F.relu(self.l1(h))
return squash(self.l2(h),
self.xp.asarray(self.action_low),
self.xp.asarray(self.action_high))
def get_action(policy, obs):
"""Get an action by evaluating a given policy."""
obs = policy.xp.asarray(obs[None], dtype=np.float32)
with chainer.no_backprop_mode():
action = policy(obs).data[0]
return chainer.backends.cuda.to_cpu(action)
def update(Q, target_Q, policy, target_policy, opt_Q, opt_policy,
samples, gamma=0.99):
"""Update a Q-function and a policy."""
xp = Q.xp
obs = xp.asarray([sample[0] for sample in samples], dtype=np.float32)
action = xp.asarray([sample[1] for sample in samples], dtype=np.float32)
reward = xp.asarray([sample[2] for sample in samples], dtype=np.float32)
done = xp.asarray([sample[3] for sample in samples], dtype=np.float32)
obs_next = xp.asarray([sample[4] for sample in samples], dtype=np.float32)
def update_Q():
# Predicted values: Q(s,a)
y = F.squeeze(Q(obs, action), axis=1)
# Target values: r + gamma * Q(s,policy(s))
with chainer.no_backprop_mode():
next_q = F.squeeze(target_Q(obs_next, target_policy(obs_next)),
axis=1)
target = reward + gamma * (1 - done) * next_q
loss = F.mean_squared_error(y, target)
Q.cleargrads()
loss.backward()
opt_Q.update()
def update_policy():
# Maximize Q(s,policy(s))
q = Q(obs, policy(obs))
q = q[:] # Avoid https://github.com/chainer/chainer/issues/2744
loss = - F.mean(q)
policy.cleargrads()
loss.backward()
opt_policy.update()
update_Q()
update_policy()
def soft_copy_params(source, target, tau):
"""Make the parameters of a link close to the ones of another link.
Making tau close to 0 slows the pace of updates, and close to 1 might lead
to faster, but more volatile updates.
"""
# Sort params by name
source_params = [param for _, param in sorted(source.namedparams())]
target_params = [param for _, param in sorted(target.namedparams())]
for s, t in zip(source_params, target_params):
t.data[:] += tau * (s.data - t.data)
def main():
parser = argparse.ArgumentParser(description='Chainer example: DDPG')
parser.add_argument('--env', type=str, default='Pendulum-v0',
help='Name of the OpenAI Gym environment')
parser.add_argument('--batch-size', '-b', type=int, default=64,
help='Number of transitions in each mini-batch')
parser.add_argument('--episodes', '-e', type=int, default=1000,
help='Number of episodes to run')
parser.add_argument('--gpu', '-g', type=int, default=-1,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--out', '-o', default='ddpg_result',
help='Directory to output the result')
parser.add_argument('--unit', '-u', type=int, default=100,
help='Number of units')
parser.add_argument('--reward-scale', type=float, default=1e-3,
help='Reward scale factor')
parser.add_argument('--replay-start-size', type=int, default=500,
help=('Number of iterations after which replay is '
'started'))
parser.add_argument('--tau', type=float, default=1e-2,
help='Softness of soft target update (0, 1]')
parser.add_argument('--noise-scale', type=float, default=0.4,
help='Scale of additive Gaussian noises')
parser.add_argument('--record', action='store_true', default=True,
help='Record performance')
parser.add_argument('--no-record', action='store_false', dest='record')
args = parser.parse_args()
# Initialize an environment
env = gym.make(args.env)
assert isinstance(env.observation_space, gym.spaces.Box)
assert isinstance(env.action_space, gym.spaces.Box)
obs_size = env.observation_space.low.size
action_size = env.action_space.low.size
if args.record:
env = gym.wrappers.Monitor(env, args.out, force=True)
reward_threshold = env.spec.reward_threshold
if reward_threshold is not None:
print('{} defines "solving" as getting average reward of {} over 100 '
'consecutive trials.'.format(args.env, reward_threshold))
else:
print('{} is an unsolved environment, which means it does not have a '
'specified reward threshold at which it\'s considered '
'solved.'.format(args.env))
# Initialize variables
D = collections.deque(maxlen=10 ** 6) # Replay buffer
Rs = collections.deque(maxlen=100) # History of returns
iteration = 0
# Initialize models and optimizers
Q = QFunction(obs_size, action_size, n_units=args.unit)
policy = Policy(obs_size, action_size,
env.action_space.low, env.action_space.high,
n_units=args.unit)
if args.gpu >= 0:
chainer.backends.cuda.get_device_from_id(args.gpu).use()
Q.to_gpu(args.gpu)
policy.to_gpu(args.gpu)
target_Q = copy.deepcopy(Q)
target_policy = copy.deepcopy(policy)
opt_Q = optimizers.Adam()
opt_Q.setup(Q)
opt_policy = optimizers.Adam(alpha=1e-4)
opt_policy.setup(policy)
for episode in range(args.episodes):
obs = env.reset()
done = False
R = 0.0 # Return (sum of rewards obtained in an episode)
timestep = 0
while not done and timestep < env.spec.timestep_limit:
# Select an action with additive noises for exploration
action = (get_action(policy, obs) +
np.random.normal(scale=args.noise_scale))
# Execute an action
new_obs, reward, done, _ = env.step(
np.clip(action, env.action_space.low, env.action_space.high))
R += reward
# Store a transition
D.append((obs, action, reward * args.reward_scale, done, new_obs))
obs = new_obs
# Sample a random minibatch of transitions and replay
if len(D) >= args.replay_start_size:
sample_indices = random.sample(range(len(D)), args.batch_size)
samples = [D[i] for i in sample_indices]
update(Q, target_Q, policy, target_policy,
opt_Q, opt_policy, samples)
# Soft update of the target networks
soft_copy_params(Q, target_Q, args.tau)
soft_copy_params(policy, target_policy, args.tau)
iteration += 1
timestep += 1
Rs.append(R)
average_R = np.mean(Rs)
print('episode: {} iteration: {} R:{} average_R:{}'.format(
episode, iteration, R, average_R))
if reward_threshold is not None and average_R >= reward_threshold:
print('Solved {} by getting average reward of '
'{} >= {} over 100 consecutive episodes.'.format(
args.env, average_R, reward_threshold))
break
if __name__ == '__main__':
main()
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import os
import subprocess
import sys
import textwrap
import pretend
import pytest
from cryptography import utils
from cryptography.exceptions import InternalError, _Reasons
from cryptography.hazmat.backends.interfaces import EllipticCurveBackend
from cryptography.hazmat.backends.openssl.backend import (
Backend, backend
)
from cryptography.hazmat.backends.openssl.ec import _sn_to_elliptic_curve
from cryptography.hazmat.primitives import hashes, interfaces
from cryptography.hazmat.primitives.asymmetric import dsa, ec, padding
from cryptography.hazmat.primitives.ciphers import Cipher
from cryptography.hazmat.primitives.ciphers.algorithms import AES
from cryptography.hazmat.primitives.ciphers.modes import CBC, CTR
from cryptography.hazmat.primitives.interfaces import BlockCipherAlgorithm
from ..primitives.fixtures_rsa import RSA_KEY_512
from ..primitives.test_ec import _skip_curve_unsupported
from ...utils import load_vectors_from_file, raises_unsupported_algorithm
@utils.register_interface(interfaces.Mode)
class DummyMode(object):
name = "dummy-mode"
def validate_for_algorithm(self, algorithm):
pass
@utils.register_interface(interfaces.CipherAlgorithm)
class DummyCipher(object):
name = "dummy-cipher"
@utils.register_interface(interfaces.AsymmetricPadding)
class DummyPadding(object):
name = "dummy-cipher"
@utils.register_interface(interfaces.HashAlgorithm)
class DummyHash(object):
name = "dummy-hash"
class DummyMGF(object):
_salt_length = 0
class TestOpenSSL(object):
def test_backend_exists(self):
assert backend
def test_openssl_version_text(self):
"""
This test checks the value of OPENSSL_VERSION_TEXT.
Unfortunately, this define does not appear to have a
formal content definition, so for now we'll test to see
if it starts with OpenSSL as that appears to be true
for every OpenSSL.
"""
assert backend.openssl_version_text().startswith("OpenSSL")
def test_supports_cipher(self):
assert backend.cipher_supported(None, None) is False
def test_aes_ctr_always_available(self):
# AES CTR should always be available in both 0.9.8 and 1.0.0+
assert backend.cipher_supported(AES(b"\x00" * 16),
CTR(b"\x00" * 16)) is True
def test_register_duplicate_cipher_adapter(self):
with pytest.raises(ValueError):
backend.register_cipher_adapter(AES, CBC, None)
@pytest.mark.parametrize("mode", [DummyMode(), None])
def test_nonexistent_cipher(self, mode):
b = Backend()
b.register_cipher_adapter(
DummyCipher,
type(mode),
lambda backend, cipher, mode: backend._ffi.NULL
)
cipher = Cipher(
DummyCipher(), mode, backend=b,
)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_CIPHER):
cipher.encryptor()
def test_consume_errors(self):
for i in range(10):
backend._lib.ERR_put_error(backend._lib.ERR_LIB_EVP, 0, 0,
b"test_openssl.py", -1)
assert backend._lib.ERR_peek_error() != 0
errors = backend._consume_errors()
assert backend._lib.ERR_peek_error() == 0
assert len(errors) == 10
def test_openssl_error_string(self):
backend._lib.ERR_put_error(
backend._lib.ERR_LIB_EVP,
backend._lib.EVP_F_EVP_DECRYPTFINAL_EX,
0,
b"test_openssl.py",
-1
)
errors = backend._consume_errors()
exc = backend._unknown_error(errors[0])
assert (
"digital envelope routines:"
"EVP_DecryptFinal_ex:digital envelope routines" in str(exc)
)
def test_ssl_ciphers_registered(self):
meth = backend._lib.TLSv1_method()
ctx = backend._lib.SSL_CTX_new(meth)
assert ctx != backend._ffi.NULL
backend._lib.SSL_CTX_free(ctx)
def test_evp_ciphers_registered(self):
cipher = backend._lib.EVP_get_cipherbyname(b"aes-256-cbc")
assert cipher != backend._ffi.NULL
def test_error_strings_loaded(self):
# returns a value in a static buffer
err = backend._lib.ERR_error_string(101183626, backend._ffi.NULL)
assert backend._ffi.string(err) == (
b"error:0607F08A:digital envelope routines:EVP_EncryptFinal_ex:"
b"data not multiple of block length"
)
def test_unknown_error_in_cipher_finalize(self):
cipher = Cipher(AES(b"\0" * 16), CBC(b"\0" * 16), backend=backend)
enc = cipher.encryptor()
enc.update(b"\0")
backend._lib.ERR_put_error(0, 0, 1,
b"test_openssl.py", -1)
with pytest.raises(InternalError):
enc.finalize()
def test_derive_pbkdf2_raises_unsupported_on_old_openssl(self):
if backend.pbkdf2_hmac_supported(hashes.SHA256()):
pytest.skip("Requires an older OpenSSL")
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_HASH):
backend.derive_pbkdf2_hmac(hashes.SHA256(), 10, b"", 1000, b"")
@pytest.mark.skipif(
backend._lib.OPENSSL_VERSION_NUMBER >= 0x1000000f,
reason="Requires an older OpenSSL. Must be < 1.0.0"
)
def test_large_key_size_on_old_openssl(self):
with pytest.raises(ValueError):
dsa.generate_parameters(2048, backend=backend)
with pytest.raises(ValueError):
dsa.generate_parameters(3072, backend=backend)
@pytest.mark.skipif(
backend._lib.OPENSSL_VERSION_NUMBER < 0x1000000f,
reason="Requires a newer OpenSSL. Must be >= 1.0.0"
)
def test_large_key_size_on_new_openssl(self):
parameters = dsa.generate_parameters(2048, backend)
param_num = parameters.parameter_numbers()
assert utils.bit_length(param_num.p) == 2048
parameters = dsa.generate_parameters(3072, backend)
param_num = parameters.parameter_numbers()
assert utils.bit_length(param_num.p) == 3072
def test_int_to_bn(self):
value = (2 ** 4242) - 4242
bn = backend._int_to_bn(value)
assert bn != backend._ffi.NULL
bn = backend._ffi.gc(bn, backend._lib.BN_free)
assert bn
assert backend._bn_to_int(bn) == value
def test_int_to_bn_inplace(self):
value = (2 ** 4242) - 4242
bn_ptr = backend._lib.BN_new()
assert bn_ptr != backend._ffi.NULL
bn_ptr = backend._ffi.gc(bn_ptr, backend._lib.BN_free)
bn = backend._int_to_bn(value, bn_ptr)
assert bn == bn_ptr
assert backend._bn_to_int(bn_ptr) == value
class TestOpenSSLRandomEngine(object):
def teardown_method(self, method):
# we need to reset state to being default. backend is a shared global
# for all these tests.
backend.activate_osrandom_engine()
current_default = backend._lib.ENGINE_get_default_RAND()
name = backend._lib.ENGINE_get_name(current_default)
assert name == backend._lib.Cryptography_osrandom_engine_name
def test_osrandom_engine_is_default(self, tmpdir):
engine_printer = textwrap.dedent(
"""
import sys
from cryptography.hazmat.backends.openssl.backend import backend
e = backend._lib.ENGINE_get_default_RAND()
name = backend._lib.ENGINE_get_name(e)
sys.stdout.write(backend._ffi.string(name).decode('ascii'))
res = backend._lib.ENGINE_free(e)
assert res == 1
"""
)
engine_name = tmpdir.join('engine_name')
with engine_name.open('w') as out:
subprocess.check_call(
[sys.executable, "-c", engine_printer],
stdout=out
)
osrandom_engine_name = backend._ffi.string(
backend._lib.Cryptography_osrandom_engine_name
)
assert engine_name.read().encode('ascii') == osrandom_engine_name
def test_osrandom_sanity_check(self):
# This test serves as a check against catastrophic failure.
buf = backend._ffi.new("char[]", 500)
res = backend._lib.RAND_bytes(buf, 500)
assert res == 1
assert backend._ffi.buffer(buf)[:] != "\x00" * 500
def test_activate_osrandom_already_default(self):
e = backend._lib.ENGINE_get_default_RAND()
name = backend._lib.ENGINE_get_name(e)
assert name == backend._lib.Cryptography_osrandom_engine_name
res = backend._lib.ENGINE_free(e)
assert res == 1
backend.activate_osrandom_engine()
e = backend._lib.ENGINE_get_default_RAND()
name = backend._lib.ENGINE_get_name(e)
assert name == backend._lib.Cryptography_osrandom_engine_name
res = backend._lib.ENGINE_free(e)
assert res == 1
def test_activate_osrandom_no_default(self):
backend.activate_builtin_random()
e = backend._lib.ENGINE_get_default_RAND()
assert e == backend._ffi.NULL
backend.activate_osrandom_engine()
e = backend._lib.ENGINE_get_default_RAND()
name = backend._lib.ENGINE_get_name(e)
assert name == backend._lib.Cryptography_osrandom_engine_name
res = backend._lib.ENGINE_free(e)
assert res == 1
def test_activate_builtin_random(self):
e = backend._lib.ENGINE_get_default_RAND()
assert e != backend._ffi.NULL
name = backend._lib.ENGINE_get_name(e)
assert name == backend._lib.Cryptography_osrandom_engine_name
res = backend._lib.ENGINE_free(e)
assert res == 1
backend.activate_builtin_random()
e = backend._lib.ENGINE_get_default_RAND()
assert e == backend._ffi.NULL
def test_activate_builtin_random_already_active(self):
backend.activate_builtin_random()
e = backend._lib.ENGINE_get_default_RAND()
assert e == backend._ffi.NULL
backend.activate_builtin_random()
e = backend._lib.ENGINE_get_default_RAND()
assert e == backend._ffi.NULL
class TestOpenSSLRSA(object):
def test_generate_rsa_parameters_supported(self):
assert backend.generate_rsa_parameters_supported(1, 1024) is False
assert backend.generate_rsa_parameters_supported(4, 1024) is False
assert backend.generate_rsa_parameters_supported(3, 1024) is True
assert backend.generate_rsa_parameters_supported(3, 511) is False
def test_generate_bad_public_exponent(self):
with pytest.raises(ValueError):
backend.generate_rsa_private_key(public_exponent=1, key_size=2048)
with pytest.raises(ValueError):
backend.generate_rsa_private_key(public_exponent=4, key_size=2048)
def test_cant_generate_insecure_tiny_key(self):
with pytest.raises(ValueError):
backend.generate_rsa_private_key(public_exponent=65537,
key_size=511)
with pytest.raises(ValueError):
backend.generate_rsa_private_key(public_exponent=65537,
key_size=256)
@pytest.mark.skipif(
backend._lib.OPENSSL_VERSION_NUMBER >= 0x1000100f,
reason="Requires an older OpenSSL. Must be < 1.0.1"
)
def test_non_sha1_pss_mgf1_hash_algorithm_on_old_openssl(self):
private_key = RSA_KEY_512.private_key(backend)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_HASH):
private_key.signer(
padding.PSS(
mgf=padding.MGF1(
algorithm=hashes.SHA256(),
),
salt_length=padding.PSS.MAX_LENGTH
),
hashes.SHA1()
)
public_key = private_key.public_key()
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_HASH):
public_key.verifier(
b"sig",
padding.PSS(
mgf=padding.MGF1(
algorithm=hashes.SHA256(),
),
salt_length=padding.PSS.MAX_LENGTH
),
hashes.SHA1()
)
def test_rsa_padding_unsupported_pss_mgf1_hash(self):
assert backend.rsa_padding_supported(
padding.PSS(mgf=padding.MGF1(DummyHash()), salt_length=0)
) is False
def test_rsa_padding_unsupported(self):
assert backend.rsa_padding_supported(DummyPadding()) is False
def test_rsa_padding_supported_pkcs1v15(self):
assert backend.rsa_padding_supported(padding.PKCS1v15()) is True
def test_rsa_padding_supported_pss(self):
assert backend.rsa_padding_supported(
padding.PSS(mgf=padding.MGF1(hashes.SHA1()), salt_length=0)
) is True
def test_rsa_padding_supported_oaep(self):
assert backend.rsa_padding_supported(
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
algorithm=hashes.SHA1(),
label=None
),
) is True
def test_rsa_padding_unsupported_mgf(self):
assert backend.rsa_padding_supported(
padding.OAEP(
mgf=DummyMGF(),
algorithm=hashes.SHA1(),
label=None
),
) is False
assert backend.rsa_padding_supported(
padding.PSS(mgf=DummyMGF(), salt_length=0)
) is False
def test_unsupported_mgf1_hash_algorithm_decrypt(self):
private_key = RSA_KEY_512.private_key(backend)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_HASH):
private_key.decrypt(
b"0" * 64,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA1(),
label=None
)
)
def test_unsupported_oaep_hash_algorithm_decrypt(self):
private_key = RSA_KEY_512.private_key(backend)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_HASH):
private_key.decrypt(
b"0" * 64,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
algorithm=hashes.SHA256(),
label=None
)
)
def test_unsupported_oaep_label_decrypt(self):
private_key = RSA_KEY_512.private_key(backend)
with pytest.raises(ValueError):
private_key.decrypt(
b"0" * 64,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
algorithm=hashes.SHA1(),
label=b"label"
)
)
@pytest.mark.skipif(
backend._lib.OPENSSL_VERSION_NUMBER <= 0x10001000,
reason="Requires an OpenSSL version >= 1.0.1"
)
class TestOpenSSLCMAC(object):
def test_unsupported_cipher(self):
@utils.register_interface(BlockCipherAlgorithm)
class FakeAlgorithm(object):
def __init__(self):
self.block_size = 64
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_CIPHER):
backend.create_cmac_ctx(FakeAlgorithm())
class TestOpenSSLSerialisationWithOpenSSL(object):
def test_pem_password_cb_buffer_too_small(self):
ffi_cb, cb = backend._pem_password_cb(b"aa")
assert cb(None, 1, False, None) == 0
def test_unsupported_evp_pkey_type(self):
key = pretend.stub(type="unsupported")
with raises_unsupported_algorithm(None):
backend._evp_pkey_to_private_key(key)
with raises_unsupported_algorithm(None):
backend._evp_pkey_to_public_key(key)
def test_very_long_pem_serialization_password(self):
password = "x" * 1024
with pytest.raises(ValueError):
load_vectors_from_file(
os.path.join(
"asymmetric", "Traditional_OpenSSL_Serialization",
"key1.pem"
),
lambda pemfile: (
backend.load_traditional_openssl_pem_private_key(
pemfile.read().encode(), password
)
)
)
class TestOpenSSLEllipticCurve(object):
def test_elliptic_curve_supported(self, monkeypatch):
monkeypatch.setattr(backend._lib, "Cryptography_HAS_EC", 0)
assert backend.elliptic_curve_supported(None) is False
def test_elliptic_curve_signature_algorithm_supported(self, monkeypatch):
monkeypatch.setattr(backend._lib, "Cryptography_HAS_EC", 0)
assert backend.elliptic_curve_signature_algorithm_supported(
None, None
) is False
def test_sn_to_elliptic_curve_not_supported(self):
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_ELLIPTIC_CURVE):
_sn_to_elliptic_curve(backend, b"fake")
@pytest.mark.requires_backend_interface(interface=EllipticCurveBackend)
class TestDeprecatedECBackendMethods(object):
def test_elliptic_curve_private_key_from_numbers(self):
d = 5634846038258869671139984276180670841223409490498798721258
y = 4131560123026307384858369684985976479488628761329758810693
x = 3402090428547195623222463880060959356423657484435591627791
curve = ec.SECP192R1()
_skip_curve_unsupported(backend, curve)
pub_numbers = ec.EllipticCurvePublicNumbers(
x=x,
y=y,
curve=curve
)
numbers = ec.EllipticCurvePrivateNumbers(
private_value=d,
public_numbers=pub_numbers
)
pytest.deprecated_call(
backend.elliptic_curve_private_key_from_numbers,
numbers
)
def test_elliptic_curve_public_key_from_numbers(self):
y = 4131560123026307384858369684985976479488628761329758810693
x = 3402090428547195623222463880060959356423657484435591627791
curve = ec.SECP192R1()
_skip_curve_unsupported(backend, curve)
pub_numbers = ec.EllipticCurvePublicNumbers(
x=x,
y=y,
curve=curve
)
pytest.deprecated_call(
backend.elliptic_curve_public_key_from_numbers,
pub_numbers
)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2014 IBM Corporation
# Copyright 2015 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# concept here that mapping from the resource tree and arguments go to
# specific python class signatures. The intent is to require
# plugin authors to come here if they *really* think they need new 'commands'
# and hopefully curtail deviation by each plugin author
# have to specify a standard place for cfg selection of *which* plugin
# as well a standard to map api requests to python funcitons
# e.g. <nodeelement>/power/state maps to some plugin
# HardwareManager.get_power/set_power selected by hardwaremanagement.method
# plugins can advertise a set of names if there is a desire for readable things
# exceptions to handle os images
# endpoints point to a class... usually, the class should have:
# -create
# -retrieve
# -update
# -delete
# functions. Console is special and just get's passed through
# see API.txt
import confluent.alerts as alerts
import confluent.config.attributes as attrscheme
import confluent.interface.console as console
import confluent.exceptions as exc
import confluent.messages as msg
import confluent.noderange as noderange
try:
import confluent.shellmodule as shellmodule
except ImportError:
pass
import itertools
import os
import sys
pluginmap = {}
def seek_element(currplace, currkey):
try:
return currplace[currkey]
except TypeError:
if isinstance(currplace, PluginCollection):
# we hit a plugin curated collection, all children
# are up to the plugin to comprehend
return currplace
raise
def nested_lookup(nestdict, key):
try:
return reduce(seek_element, key, nestdict)
except TypeError:
raise exc.NotFoundException("Invalid element requested")
def load_plugins():
# To know our plugins directory, we get the parent path of 'bin'
path = os.path.dirname(os.path.realpath(__file__))
plugintop = os.path.realpath(os.path.join(path, 'plugins'))
plugins = set()
for plugindir in os.listdir(plugintop):
plugindir = os.path.join(plugintop, plugindir)
if not os.path.isdir(plugindir):
continue
sys.path.append(plugindir)
# two passes, to avoid adding both py and pyc files
for plugin in os.listdir(plugindir):
if plugin.startswith('.'):
continue
(plugin, plugtype) = os.path.splitext(plugin)
if plugtype == '.sh':
pluginmap[plugin] = shellmodule.Plugin(
os.path.join(plugindir, plugin + '.sh'))
elif "__init__" not in plugin:
plugins.add(plugin)
for plugin in plugins:
tmpmod = __import__(plugin)
if 'plugin_names' in tmpmod.__dict__:
for name in tmpmod.plugin_names:
pluginmap[name] = tmpmod
else:
pluginmap[plugin] = tmpmod
rootcollections = ['noderange/', 'nodes/', 'nodegroups/', 'users/', 'events/']
class PluginRoute(object):
def __init__(self, routedict):
self.routeinfo = routedict
class PluginCollection(object):
def __init__(self, routedict):
self.routeinfo = routedict
# _ prefix indicates internal use (e.g. special console scheme) and should not
# be enumerated in any collection
noderesources = {
'attributes': {
'all': PluginRoute({'handler': 'attributes'}),
'current': PluginRoute({'handler': 'attributes'}),
},
'boot': {
'nextdevice': PluginRoute({
'pluginattrs': ['hardwaremanagement.method'],
'default': 'ipmi',
}),
},
'configuration': {
'management_controller': {
'alerts': {
'destinations': PluginCollection({
'pluginattrs': ['hardwaremanagement.method'],
'default': 'ipmi',
}),
},
'users': PluginCollection({
'pluginattrs': ['hardwaremanagement.method'],
'default': 'ipmi',
}),
'net_interfaces': PluginCollection({
'pluginattrs': ['hardwaremanagement.method'],
'default': 'ipmi',
}),
'reset': PluginRoute({
'pluginattrs': ['hardwaremanagement.method'],
'default': 'ipmi',
}),
'identifier': PluginRoute({
'pluginattrs': ['hardwaremanagement.method'],
'default': 'ipmi',
}),
'domain_name': PluginRoute({
'pluginattrs': ['hardwaremanagement.method'],
'default': 'ipmi',
}),
'ntp': {
'enabled': PluginRoute({
'pluginattrs': ['hardwaremanagement.method'],
'default': 'ipmi',
}),
'servers': PluginCollection({
'pluginattrs': ['hardwaremanagement.method'],
'default': 'ipmi',
}),
},
}
},
'_console': {
'session': PluginRoute({
'pluginattrs': ['console.method'],
}),
},
'console': {
# this is a dummy value, http or socket must handle special
'session': PluginRoute({}),
'license': PluginRoute({
'pluginattrs': ['hardwaremanagement.method'],
'default': 'ipmi',
}),
},
'events': {
'hardware': {
'log': PluginRoute({
'pluginattrs': ['hardwaremanagement.method'],
'default': 'ipmi',
}),
'decode': PluginRoute({
'pluginattrs': ['hardwaremanagement.method'],
'default': 'ipmi',
}),
},
},
'health': {
'hardware': PluginRoute({
'pluginattrs': ['hardwaremanagement.method'],
'default': 'ipmi',
}),
},
'identify': PluginRoute({
'pluginattrs': ['hardwaremanagement.method'],
'default': 'ipmi',
}),
'inventory': {
'hardware': {
'all': PluginCollection({
'pluginattrs': ['hardwaremanagement.method'],
'default': 'ipmi',
}),
},
'firmware': {
'all': PluginCollection({
'pluginattrs': ['hardwaremanagement.method'],
'default': 'ipmi',
}),
},
},
'power': {
'state': PluginRoute({
'pluginattrs': ['hardwaremanagement.method'],
'default': 'ipmi',
}),
},
'sensors': {
'hardware': {
'all': PluginCollection({
'pluginattrs': ['hardwaremanagement.method'],
'default': 'ipmi',
}),
'temperature': PluginCollection({
'pluginattrs': ['hardwaremanagement.method'],
'default': 'ipmi',
}),
'power': PluginCollection({
'pluginattrs': ['hardwaremanagement.method'],
'default': 'ipmi',
}),
'fans': PluginCollection({
'pluginattrs': ['hardwaremanagement.method'],
'default': 'ipmi',
}),
'leds': PluginCollection({
'pluginattrs': ['hardwaremanagement.method'],
'default': 'ipmi',
}),
},
},
}
nodegroupresources = {
'attributes': {
'all': PluginRoute({'handler': 'attributes'}),
'current': PluginRoute({'handler': 'attributes'}),
},
}
def create_user(inputdata, configmanager):
try:
username = inputdata['name']
del inputdata['name']
except (KeyError, ValueError):
raise exc.InvalidArgumentException()
configmanager.create_user(username, attributemap=inputdata)
def update_user(name, attribmap, configmanager):
try:
configmanager.set_user(name, attribmap)
except ValueError:
raise exc.InvalidArgumentException()
def show_user(name, configmanager):
userobj = configmanager.get_user(name)
rv = {}
for attr in attrscheme.user.iterkeys():
rv[attr] = None
if attr == 'password':
if 'cryptpass' in userobj:
rv['password'] = {'cryptvalue': True}
yield msg.CryptedAttributes(kv={'password': rv['password']},
desc=attrscheme.user[attr][
'description'])
else:
if attr in userobj:
rv[attr] = userobj[attr]
yield msg.Attributes(kv={attr: rv[attr]},
desc=attrscheme.user[attr]['description'])
def stripnode(iterablersp, node):
for i in iterablersp:
if i is None:
raise exc.NotImplementedException("Not Implemented")
i.strip_node(node)
yield i
def iterate_collections(iterable, forcecollection=True):
for coll in iterable:
if forcecollection and coll[-1] != '/':
coll += '/'
yield msg.ChildCollection(coll, candelete=True)
def iterate_resources(fancydict):
for resource in fancydict.iterkeys():
if resource.startswith("_"):
continue
if not isinstance(fancydict[resource], PluginRoute): # a resource
resource += '/'
yield msg.ChildCollection(resource)
def delete_user(user, configmanager):
configmanager.del_user(user)
yield msg.DeletedResource(user)
def delete_nodegroup_collection(collectionpath, configmanager):
if len(collectionpath) == 2: # just the nodegroup
group = collectionpath[-1]
configmanager.del_groups([group])
yield msg.DeletedResource(group)
else:
raise Exception("Not implemented")
def delete_node_collection(collectionpath, configmanager):
if len(collectionpath) == 2: # just node
node = collectionpath[-1]
configmanager.del_nodes([node])
yield msg.DeletedResource(node)
else:
raise Exception("Not implemented")
def enumerate_nodegroup_collection(collectionpath, configmanager):
nodegroup = collectionpath[1]
if not configmanager.is_nodegroup(nodegroup):
raise exc.NotFoundException("Invalid element requested")
del collectionpath[0:2]
collection = nested_lookup(nodegroupresources, collectionpath)
return iterate_resources(collection)
def enumerate_node_collection(collectionpath, configmanager):
if collectionpath == ['nodes']: # it is just '/node/', need to list nodes
allnodes = list(configmanager.list_nodes())
try:
allnodes.sort(key=noderange.humanify_nodename)
except TypeError:
allnodes.sort()
return iterate_collections(allnodes)
nodeorrange = collectionpath[1]
if collectionpath[0] == 'nodes' and not configmanager.is_node(nodeorrange):
raise exc.NotFoundException("Invalid element requested")
collection = nested_lookup(noderesources, collectionpath[2:])
if len(collectionpath) == 2 and collectionpath[0] == 'noderange':
collection['nodes'] = {}
if not isinstance(collection, dict):
raise exc.NotFoundException("Invalid element requested")
return iterate_resources(collection)
def create_group(inputdata, configmanager):
try:
groupname = inputdata['name']
del inputdata['name']
attribmap = {groupname: inputdata}
except KeyError:
raise exc.InvalidArgumentException()
try:
configmanager.add_group_attributes(attribmap)
except ValueError as e:
raise exc.InvalidArgumentException(str(e))
def create_node(inputdata, configmanager):
try:
nodename = inputdata['name']
del inputdata['name']
attribmap = {nodename: inputdata}
except KeyError:
raise exc.InvalidArgumentException('name not specified')
try:
configmanager.add_node_attributes(attribmap)
except ValueError as e:
raise exc.InvalidArgumentException(str(e))
def enumerate_collections(collections):
for collection in collections:
yield msg.ChildCollection(collection)
def handle_nodegroup_request(configmanager, inputdata,
pathcomponents, operation):
iscollection = False
routespec = None
if len(pathcomponents) < 2:
if operation == "create":
inputdata = msg.InputAttributes(pathcomponents, inputdata)
create_group(inputdata.attribs, configmanager)
allgroups = list(configmanager.get_groups())
try:
allgroups.sort(key=noderange.humanify_nodename)
except TypeError:
allgroups.sort()
return iterate_collections(allgroups)
elif len(pathcomponents) == 2:
iscollection = True
else:
try:
routespec = nested_lookup(nodegroupresources, pathcomponents[2:])
if isinstance(routespec, dict):
iscollection = True
elif isinstance(routespec, PluginCollection):
iscollection = False # it is a collection, but plugin defined
except KeyError:
raise exc.NotFoundException("Invalid element requested")
if iscollection:
if operation == "delete":
return delete_nodegroup_collection(pathcomponents,
configmanager)
elif operation == "retrieve":
return enumerate_nodegroup_collection(pathcomponents,
configmanager)
else:
raise Exception("TODO")
plugroute = routespec.routeinfo
inputdata = msg.get_input_message(
pathcomponents[2:], operation, inputdata)
if 'handler' in plugroute: # fixed handler definition
hfunc = getattr(pluginmap[plugroute['handler']], operation)
return hfunc(
nodes=None, element=pathcomponents,
configmanager=configmanager,
inputdata=inputdata)
raise Exception("unknown case encountered")
def handle_node_request(configmanager, inputdata, operation,
pathcomponents, autostrip=True):
iscollection = False
routespec = None
if pathcomponents[0] == 'noderange':
if len(pathcomponents) > 3 and pathcomponents[2] == 'nodes':
# transform into a normal looking node request
# this does mean we don't see if it is a valid
# child, but that's not a goal for the noderange
# facility anyway
isnoderange = False
pathcomponents = pathcomponents[2:]
else:
isnoderange = True
else:
isnoderange = False
try:
nodeorrange = pathcomponents[1]
if not isnoderange and not configmanager.is_node(nodeorrange):
raise exc.NotFoundException("Invalid Node")
if isnoderange:
try:
nodes = noderange.NodeRange(nodeorrange, configmanager).nodes
except Exception as e:
raise exc.NotFoundException("Invalid Noderange: " + str(e))
else:
nodes = (nodeorrange,)
except IndexError: # doesn't actually have a long enough path
# this is enumerating a list of nodes or just empty noderange
if isnoderange and operation == "retrieve":
return iterate_collections([])
elif isnoderange or operation == "delete":
raise exc.InvalidArgumentException()
if operation == "create":
inputdata = msg.InputAttributes(pathcomponents, inputdata)
create_node(inputdata.attribs, configmanager)
allnodes = list(configmanager.list_nodes())
try:
allnodes.sort(key=noderange.humanify_nodename)
except TypeError:
allnodes.sort()
return iterate_collections(allnodes)
if (isnoderange and len(pathcomponents) == 3 and
pathcomponents[2] == 'nodes'):
# this means that it's a list of relevant nodes
nodes = list(nodes)
try:
nodes.sort(key=noderange.humanify_nodename)
except TypeError:
nodes.sort()
return iterate_collections(nodes)
if len(pathcomponents) == 2:
iscollection = True
else:
try:
routespec = nested_lookup(noderesources, pathcomponents[2:])
except KeyError:
raise exc.NotFoundException("Invalid element requested")
if isinstance(routespec, dict):
iscollection = True
elif isinstance(routespec, PluginCollection):
iscollection = False # it is a collection, but plugin defined
if iscollection:
if operation == "delete":
return delete_node_collection(pathcomponents, configmanager)
elif operation == "retrieve":
return enumerate_node_collection(pathcomponents, configmanager)
else:
raise Exception("TODO here")
del pathcomponents[0:2]
passvalues = []
plugroute = routespec.routeinfo
inputdata = msg.get_input_message(
pathcomponents, operation, inputdata, nodes, isnoderange)
if 'handler' in plugroute: # fixed handler definition, easy enough
hfunc = getattr(pluginmap[plugroute['handler']], operation)
passvalue = hfunc(
nodes=nodes, element=pathcomponents,
configmanager=configmanager,
inputdata=inputdata)
if isnoderange:
return passvalue
else:
return stripnode(passvalue, nodes[0])
elif 'pluginattrs' in plugroute:
nodeattr = configmanager.get_node_attributes(
nodes, plugroute['pluginattrs'])
plugpath = None
if 'default' in plugroute:
plugpath = plugroute['default']
nodesbyhandler = {}
for node in nodes:
for attrname in plugroute['pluginattrs']:
if attrname in nodeattr[node]:
plugpath = nodeattr[node][attrname]['value']
if plugpath is not None:
hfunc = getattr(pluginmap[plugpath], operation)
if hfunc in nodesbyhandler:
nodesbyhandler[hfunc].append(node)
else:
nodesbyhandler[hfunc] = [node]
for hfunc in nodesbyhandler:
passvalues.append(hfunc(
nodes=nodesbyhandler[hfunc], element=pathcomponents,
configmanager=configmanager,
inputdata=inputdata))
if isnoderange or not autostrip:
return itertools.chain(*passvalues)
elif isinstance(passvalues[0], console.Console):
return passvalues[0]
else:
return stripnode(passvalues[0], nodes[0])
def handle_path(path, operation, configmanager, inputdata=None, autostrip=True):
"""Given a full path request, return an object.
The plugins should generally return some sort of iterator.
An exception is made for console/session, which should return
a class with connect(), read(), write(bytes), and close()
"""
pathcomponents = path.split('/')
del pathcomponents[0] # discard the value from leading /
if pathcomponents[-1] == '':
del pathcomponents[-1]
if not pathcomponents: # root collection list
return enumerate_collections(rootcollections)
elif pathcomponents[0] == 'noderange':
return handle_node_request(configmanager, inputdata, operation,
pathcomponents, autostrip)
elif pathcomponents[0] == 'nodegroups':
return handle_nodegroup_request(configmanager, inputdata,
pathcomponents,
operation)
elif pathcomponents[0] == 'nodes':
# single node request of some sort
return handle_node_request(configmanager, inputdata,
operation, pathcomponents, autostrip)
elif pathcomponents[0] == 'users':
# TODO: when non-administrator accounts exist,
# they must only be allowed to see their own user
try:
user = pathcomponents[1]
except IndexError: # it's just users/
if operation == 'create':
inputdata = msg.get_input_message(
pathcomponents, operation, inputdata)
create_user(inputdata.attribs, configmanager)
return iterate_collections(configmanager.list_users(),
forcecollection=False)
if user not in configmanager.list_users():
raise exc.NotFoundException("Invalid user %s" % user)
if operation == 'retrieve':
return show_user(user, configmanager)
elif operation == 'delete':
return delete_user(user, configmanager)
elif operation == 'update':
inputdata = msg.get_input_message(
pathcomponents, operation, inputdata)
update_user(user, inputdata.attribs, configmanager)
return show_user(user, configmanager)
elif pathcomponents[0] == 'events':
try:
element = pathcomponents[1]
except IndexError:
if operation != 'retrieve':
raise exc.InvalidArgumentException('Target is read-only')
return (msg.ChildCollection('decode'),)
if element != 'decode':
raise exc.NotFoundException()
if operation == 'update':
return alerts.decode_alert(inputdata, configmanager)
else:
raise exc.NotFoundException()
|
|
# Copyright (C) 2018 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Schema for V3 volume_actions API.
"""
import copy
from cinder.api.validation import parameter_types
container_format = parameter_types.description
extend = {
'type': 'object',
'properties': {
'os-extend': {
'type': 'object',
'properties': {
'new_size': parameter_types.volume_size,
},
'required': ['new_size'],
'additionalProperties': False,
},
},
'required': ['os-extend'],
'additionalProperties': False,
}
attach = {
'type': 'object',
'properties': {
'os-attach': {
'type': 'object',
'properties': {
'instance_uuid': parameter_types.uuid,
'mountpoint': {
'type': 'string', 'minLength': 1,
'maxLength': 255
},
'host_name': {'type': 'string', 'maxLength': 255},
'mode': {'type': 'string', 'enum': ['rw', 'ro']}
},
'required': ['mountpoint'],
'anyOf': [{'required': ['instance_uuid']},
{'required': ['host_name']}],
'additionalProperties': False,
},
},
'required': ['os-attach'],
'additionalProperties': False,
}
detach = {
'type': 'object',
'properties': {
'os-detach': {
'type': ['object', 'null'],
'properties': {
# NOTE(mriedem): This allows null for backward compatibility.
'attachment_id': parameter_types.uuid_allow_null,
},
'additionalProperties': False,
},
},
'required': ['os-detach'],
'additionalProperties': False,
}
retype = {
'type': 'object',
'properties': {
'os-retype': {
'type': 'object',
'properties': {
'new_type': {'type': 'string'},
'migration_policy': {
'type': ['string', 'null'],
'enum': ['on-demand', 'never']},
},
'required': ['new_type'],
'additionalProperties': False,
},
},
'required': ['os-retype'],
'additionalProperties': False,
}
set_bootable = {
'type': 'object',
'properties': {
'os-set_bootable': {
'type': 'object',
'properties': {
'bootable': parameter_types.boolean
},
'required': ['bootable'],
'additionalProperties': False,
},
},
'required': ['os-set_bootable'],
'additionalProperties': False,
}
volume_upload_image = {
'type': 'object',
'properties': {
'os-volume_upload_image': {
'type': 'object',
'properties': {
'image_name': {
'type': 'string', 'minLength': 1, 'maxLength': 255
},
'force': parameter_types.boolean,
'disk_format': {
'type': 'string',
'enum': ['raw', 'vmdk', 'vdi', 'qcow2',
'vhd', 'vhdx', 'ploop']
},
'container_format': container_format
},
'required': ['image_name'],
'additionalProperties': False,
},
},
'required': ['os-volume_upload_image'],
'additionalProperties': False,
}
volume_upload_image_v31 = copy.deepcopy(volume_upload_image)
volume_upload_image_v31['properties']['os-volume_upload_image']['properties'][
'visibility'] = {'type': 'string',
'enum': ['community', 'public', 'private', 'shared']}
volume_upload_image_v31['properties']['os-volume_upload_image']['properties'][
'protected'] = parameter_types.boolean
initialize_connection = {
'type': 'object',
'properties': {
'os-initialize_connection': {
'type': 'object',
'properties': {
'connector': {'type': ['object', 'string']},
},
'required': ['connector'],
'additionalProperties': False,
},
},
'required': ['os-initialize_connection'],
'additionalProperties': False,
}
terminate_connection = {
'type': 'object',
'properties': {
'os-terminate_connection': {
'type': 'object',
'properties': {
'connector': {'type': ['string', 'object', 'null']},
},
'required': ['connector'],
'additionalProperties': False,
},
},
'required': ['os-terminate_connection'],
'additionalProperties': False,
}
volume_readonly_update = {
'type': 'object',
'properties': {
'os-update_readonly_flag': {
'type': 'object',
'properties': {
'readonly': parameter_types.boolean
},
'required': ['readonly'],
'additionalProperties': False,
},
},
'required': ['os-update_readonly_flag'],
'additionalProperties': False,
}
reimage = {
'type': 'object',
'properties': {
'os-reimage': {
'type': 'object',
'properties': {
'image_id': parameter_types.uuid,
'reimage_reserved': parameter_types.boolean,
},
'required': ['image_id'],
'additionalProperties': False,
},
},
'required': ['os-reimage'],
'additionalProperties': False,
}
|
|
from pathlib import Path
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
def plot_history(out, history, metric='loss', val=True, title=None, width=8, height=6):
title = title or 'model {}'.format(metric)
val_metric = 'val_{}'.format(metric)
plt.figure(figsize=(width, height))
plt.plot(history.history[metric], marker='o')
if val:
plt.plot(history.history[val_metric], marker='d')
plt.title(title)
plt.ylabel(metric)
plt.xlabel('epoch')
if val:
plt.legend(['train_{}'.format(metric), 'val_{}'.format(metric)], loc='upper center')
else:
plt.legend(['train_{}'.format(metric)], loc='upper center')
png = '{}.plot.{}.png'.format(out, metric)
plt.savefig(png, bbox_inches='tight')
plt.close()
def plot_scatter(data, classes, out, width=10, height=8):
cmap = plt.cm.get_cmap('gist_rainbow')
plt.figure(figsize=(width, height))
plt.scatter(data[:, 0], data[:, 1], c=classes, cmap=cmap, lw=0.5, edgecolor='black', alpha=0.7)
plt.colorbar()
png = '{}.png'.format(out)
plt.savefig(png, bbox_inches='tight')
plt.close()
def plot_error(y_true, y_pred, batch, file_ext, file_pre='output_dir', subsample=1000):
if batch % 10:
return
total = len(y_true)
if subsample and subsample < total:
usecols = np.random.choice(total, size=subsample, replace=False)
y_true = y_true[usecols]
y_pred = y_pred[usecols]
y_true = y_true * 100
y_pred = y_pred * 100
diffs = y_pred - y_true
bins = np.linspace(-200, 200, 100)
if batch == 0:
y_shuf = np.random.permutation(y_true)
plt.hist(y_shuf - y_true, bins, alpha=0.5, label='Random')
plt.hist(diffs, bins, alpha=0.3, label='Epoch {}'.format(batch + 1))
plt.title("Histogram of errors in percentage growth")
plt.legend(loc='upper right')
plt.savefig(file_pre + '.histogram' + file_ext + '.b' + str(batch) + '.png')
plt.close()
# Plot measured vs. predicted values
fig, ax = plt.subplots()
plt.grid('on')
ax.scatter(y_true, y_pred, color='red', s=10)
ax.plot([y_true.min(), y_true.max()],
[y_true.min(), y_true.max()], 'k--', lw=4)
ax.set_xlabel('Measured')
ax.set_ylabel('Predicted')
plt.savefig(file_pre + '.diff' + file_ext + '.b' + str(batch) + '.png')
plt.close()
def plot_array(nparray, xlabel, ylabel, title, fname):
plt.figure()
plt.plot(nparray, lw=3.)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
plt.savefig(fname, bbox_inches='tight')
plt.close()
# UTILS for UQ / CALIBRATION VISUALIZATION
from matplotlib.colors import LogNorm
def plot_density_observed_vs_predicted(Ytest, Ypred, pred_name=None, figprefix=None):
"""Functionality to plot a 2D histogram of the distribution of observed (ground truth)
values vs. predicted values. The plot generated is stored in a png file.
Parameters
----------
Ytest : numpy array
Array with (true) observed values
Ypred : numpy array
Array with predicted values.
pred_name : string
Name of data colum or quantity predicted (e.g. growth, AUC, etc.)
figprefix : string
String to prefix the filename to store the figure generated.
A '_density_predictions.png' string will be appended to the
figprefix given.
"""
xbins = 51
plt.figure(figsize=(24, 18)) # (30, 16)
ax = plt.gca()
plt.rc('xtick', labelsize=16) # fontsize of the tick labels
ax.plot([Ytest.min(), Ytest.max()], [Ytest.min(), Ytest.max()], 'r--', lw=4.)
plt.hist2d(Ytest, Ypred, bins=xbins, norm=LogNorm())
cb = plt.colorbar()
ax.set_xlabel('Observed ' + pred_name, fontsize=38, labelpad=15.)
ax.set_ylabel('Mean ' + pred_name + ' Predicted', fontsize=38, labelpad=15.)
ax.axis([Ytest.min() * 0.98, Ytest.max() * 1.02, Ytest.min() * 0.98, Ytest.max() * 1.02])
plt.setp(ax.get_xticklabels(), fontsize=32)
plt.setp(ax.get_yticklabels(), fontsize=32)
cb.ax.set_yticklabels(cb.ax.get_yticklabels(), fontsize=28)
plt.grid(True)
plt.savefig(figprefix + '_density_predictions.png', bbox_inches='tight')
plt.close()
print('Generated plot: ', figprefix + '_density_predictions.png')
def plot_2d_density_sigma_vs_error(sigma, yerror, method=None, figprefix=None):
"""Functionality to plot a 2D histogram of the distribution of
the standard deviations computed for the predictions vs. the
computed errors (i.e. values of observed - predicted).
The plot generated is stored in a png file.
Parameters
----------
sigma : numpy array
Array with standard deviations computed.
yerror : numpy array
Array with errors computed (observed - predicted).
method : string
Method used to comput the standard deviations (i.e. dropout,
heteroscedastic, etc.).
figprefix : string
String to prefix the filename to store the figure generated.
A '_density_sigma_error.png' string will be appended to the
figprefix given.
"""
xbins = 51
ybins = 31
plt.figure(figsize=(24, 18)) # (30, 16)
ax = plt.gca()
plt.rc('xtick', labelsize=16) # fontsize of the tick labels
plt.hist2d(sigma, yerror, bins=[xbins, ybins], norm=LogNorm())
cb = plt.colorbar()
ax.set_xlabel('Standard Deviation (' + method + ')', fontsize=38, labelpad=15.)
ax.set_ylabel('Error: Observed - Mean Predicted', fontsize=38, labelpad=15.)
ax.axis([sigma.min() * 0.98, sigma.max() * 1.02, -yerror.max(), yerror.max()])
plt.setp(ax.get_xticklabels(), fontsize=32)
plt.setp(ax.get_yticklabels(), fontsize=32)
cb.ax.set_yticklabels(cb.ax.get_yticklabels(), fontsize=28)
plt.grid(True)
plt.savefig(figprefix + '_density_std_error.png', bbox_inches='tight')
plt.close()
print('Generated plot: ', figprefix + '_density_std_error.png')
def plot_histogram_error_per_sigma(sigma, yerror, method=None, figprefix=None):
"""Functionality to plot a 1D histogram of the distribution of
computed errors (i.e. values of observed - predicted) observed
for specific values of standard deviations computed. The range of
standard deviations computed is split in xbins values and the
1D histograms of error distributions for the smallest six
standard deviations are plotted.
The plot generated is stored in a png file.
Parameters
----------
sigma : numpy array
Array with standard deviations computed.
yerror : numpy array
Array with errors computed (observed - predicted).
method : string
Method used to comput the standard deviations (i.e. dropout,
heteroscedastic, etc.).
figprefix : string
String to prefix the filename to store the figure generated.
A '_histogram_error_per_sigma.png' string will be appended to
the figprefix given.
"""
xbins = 21
ybins = 31
H, xedges, yedges, img = plt.hist2d(sigma, yerror, # normed=True,
bins=[xbins, ybins])
plt.figure(figsize=(18, 24))
legend = []
for ii in range(4): # (H.shape[0]):
if ii != 1:
plt.plot(yedges[0:H.shape[1]], H[ii, :] / np.sum(H[ii, :]),
marker='o', markersize=12, lw=6.)
legend.append(str((xedges[ii] + xedges[ii + 1]) / 2))
plt.legend(legend, fontsize=28)
ax = plt.gca()
plt.title('Error Dist. per Standard Deviation for ' + method, fontsize=40)
ax.set_xlabel('Error: Observed - Mean Predicted', fontsize=38, labelpad=15.)
ax.set_ylabel('Density', fontsize=38, labelpad=15.)
plt.setp(ax.get_xticklabels(), fontsize=32)
plt.setp(ax.get_yticklabels(), fontsize=32)
plt.grid(True)
plt.savefig(figprefix + '_histogram_error_per_std.png', bbox_inches='tight')
plt.close()
print('Generated plot: ', figprefix + '_histogram_error_per_std.png')
def plot_decile_predictions(Ypred, Ypred_Lp, Ypred_Hp, decile_list, pred_name=None, figprefix=None):
"""Functionality to plot the mean of the deciles predicted.
The plot generated is stored in a png file.
Parameters
----------
Ypred : numpy array
Array with median predicted values.
Ypred_Lp : numpy array
Array with low decile predicted values.
Ypred_Hp : numpy array
Array with high decile predicted values.
decile_list : string list
List of deciles predicted (e.g. '1st', '9th', etc.)
pred_name : string
Name of data colum or quantity predicted (e.g. growth, AUC, etc.)
figprefix : string
String to prefix the filename to store the figure generated.
A '_decile_predictions.png' string will be appended to the
figprefix given.
"""
index_ = np.argsort(Ypred)
plt.figure(figsize=(24, 18))
plt.scatter(range(index_.shape[0]), Ypred[index_])
plt.scatter(range(index_.shape[0]), Ypred_Lp[index_])
plt.scatter(range(index_.shape[0]), Ypred_Hp[index_])
plt.legend(decile_list, fontsize=28)
plt.xlabel('Index', fontsize=38.)
plt.ylabel(pred_name, fontsize=38.)
plt.title('Predicted ' + pred_name + ' Deciles', fontsize=40)
plt.grid()
ax = plt.gca()
plt.setp(ax.get_xticklabels(), fontsize=32)
plt.setp(ax.get_yticklabels(), fontsize=32)
plt.savefig(figprefix + '_decile_predictions.png', bbox_inches='tight')
plt.close()
print('Generated plot: ', figprefix + '_decile_predictions.png')
def plot_calibration_interpolation(mean_sigma, error, splineobj1, splineobj2, method='', figprefix=None, steps=False):
"""Functionality to plot empirical calibration curves
estimated by interpolation of the computed
standard deviations and errors. Since the estimations
are very noisy, two levels of smoothing are used. Both
can be plotted independently, if requested.
The plot(s) generated is(are) stored in png file(s).
Parameters
----------
mean_sigma : numpy array
Array with the mean standard deviations computed in inference.
error : numpy array
Array with the errors computed from the means predicted in inference.
splineobj1 : scipy.interpolate python object
A python object from scipy.interpolate that computes a
cubic Hermite spline (PchipInterpolator) to express
the interpolation after the first smoothing. This
spline is a partial result generated during the empirical
calibration procedure.
splineobj2 : scipy.interpolate python object
A python object from scipy.interpolate that computes a
cubic Hermite spline (PchipInterpolator) to express
the mapping from standard deviation to error. This
spline is generated for interpolating the predictions
after a process of smoothing-interpolation-smoothing
computed during the empirical calibration procedure.
method : string
Method used to comput the standard deviations (i.e. dropout,
heteroscedastic, etc.).
figprefix : string
String to prefix the filename to store the figure generated.
A '_empirical_calibration_interpolation.png' string will be appended to
the figprefix given.
steps : boolean
Besides the complete empirical calibration (including the interpolating
spline), also generates partial plots with only the spline of
the interpolating spline after the first smoothing level (smooth1).
"""
xmax = np.max(mean_sigma)
xmin = np.min(mean_sigma)
xp23 = np.linspace(xmin, xmax, 200)
yp23 = splineobj2(xp23)
if steps:
# Plot first smoothing
yp23_1 = splineobj1(xp23)
fig = plt.figure(figsize=(24, 18))
ax = plt.gca()
ax.plot(mean_sigma, error, 'kx')
ax.plot(xp23, yp23_1, 'gx', ms=20)
plt.legend(['True', 'Cubic Spline'], fontsize=28)
plt.xlabel('Standard Deviation Predicted (' + method + ')', fontsize=38.)
plt.ylabel('Error: ABS Observed - Mean Predicted', fontsize=38.)
plt.title('Calibration (by Interpolation)', fontsize=40)
plt.setp(ax.get_xticklabels(), fontsize=32)
plt.setp(ax.get_yticklabels(), fontsize=32)
plt.grid()
fig.tight_layout()
plt.savefig(figprefix + '_empirical_calibration_interp_smooth1.png', bbox_inches='tight')
plt.close()
print('Generated plot: ', figprefix + '_empirical_calibration_interp_smooth1.png')
fig = plt.figure(figsize=(24, 18))
ax = plt.gca()
ax.plot(mean_sigma, error, 'kx')
ax.plot(xp23, yp23, 'rx', ms=20)
plt.legend(['True', 'Cubic Spline'], fontsize=28)
plt.xlabel('Standard Deviation Predicted (' + method + ')', fontsize=38.)
plt.ylabel('Error: ABS Observed - Mean Predicted', fontsize=38.)
plt.title('Calibration (by Interpolation)', fontsize=40)
plt.setp(ax.get_xticklabels(), fontsize=32)
plt.setp(ax.get_yticklabels(), fontsize=32)
plt.grid()
fig.tight_layout()
plt.savefig(figprefix + '_empirical_calibration_interpolation.png', bbox_inches='tight')
plt.close()
print('Generated plot: ', figprefix + '_empirical_calibration_interpolation.png')
def plot_calibrated_std(y_test, y_pred, std_calibrated, thresC, pred_name=None, figprefix=None):
"""Functionality to plot values in testing set after calibration. An estimation of the lower-confidence samples is made. The plot generated is stored in a png file.
Parameters
----------
y_test : numpy array
Array with (true) observed values.
y_pred : numpy array
Array with predicted values.
std_calibrated : numpy array
Array with standard deviation values after calibration.
thresC : float
Threshold to label low confidence predictions (low
confidence predictions are the ones with std > thresC).
pred_name : string
Name of data colum or quantity predicted (e.g. growth, AUC, etc.).
figprefix : string
String to prefix the filename to store the figure generated.
A '_calibrated.png' string will be appended to the
figprefix given.
"""
N = y_test.shape[0]
index = np.argsort(y_pred)
x = np.array(range(N))
indexC = std_calibrated > thresC
alphafill = 0.5
if N > 2000:
alphafill = 0.7
scale = 120
fig = plt.figure(figsize=(24, 18))
ax = plt.gca()
ax.scatter(x, y_test[index], color='red', s=scale, alpha=0.5)
plt.fill_between(x, y_pred[index] - 1.28 * std_calibrated[index],
y_pred[index] + 1.28 * std_calibrated[index],
color='gray', alpha=alphafill)
plt.scatter(x, y_pred[index], color='orange', s=scale)
plt.scatter(x[indexC], y_test[indexC], color='green', s=scale, alpha=0.5)
plt.legend(['True', '1.28 Std', 'Pred', 'Low conf'], fontsize=28)
plt.xlabel('Index', fontsize=38.)
plt.ylabel(pred_name + ' Predicted', fontsize=38.)
plt.title('Calibrated Standard Deviation', fontsize=40)
plt.setp(ax.get_xticklabels(), fontsize=32)
plt.setp(ax.get_yticklabels(), fontsize=32)
plt.grid()
fig.tight_layout()
plt.savefig(figprefix + '_calibrated.png', bbox_inches='tight')
plt.close()
print('Generated plot: ', figprefix + '_calibrated.png')
def plot_contamination(y_true, y_pred, sigma, T=None, thresC=0.1, pred_name=None, figprefix=None):
"""Functionality to plot results for the contamination model.
This includes the latent variables T if they are given (i.e.
if the results provided correspond to training results). Global
parameters for the normal distribution are used for shading 80%
confidence interval.
If results for training (i.e. T available), samples determined to
be outliers (i.e. samples whose probability of membership to the
heavy tailed distribution (Cauchy) is greater than the threshold
given) are highlighted.
The plot(s) generated is(are) stored in a png file.
Parameters
----------
y_true : numpy array
Array with observed values.
y_pred : numpy array
Array with predicted values.
sigma : float
Standard deviation of the normal distribution.
T : numpy array
Array with latent variables (i.e. membership to normal and heavy-tailed
distributions). If in testing T is not available (i.e. None)
thresC : float
Threshold to label outliers (outliers are the ones
with probability of membership to heavy-tailed distribution,
i.e. T[:,1] > thresC).
pred_name : string
Name of data colum or quantity predicted (e.g. growth, AUC, etc.).
figprefix : string
String to prefix the filename to store the figures generated.
A '_contamination.png' string will be appended to the
figprefix given.
"""
N = y_true.shape[0]
index = np.argsort(y_pred)
x = np.array(range(N))
if T is not None:
indexG = T[:, 0] > (1. - thresC)
indexC = T[:, 1] > thresC
ss = sigma * indexG
prefig = '_outTrain'
else:
ss = sigma
prefig = '_outTest'
auxGh = y_pred + 1.28 * ss
auxGl = y_pred - 1.28 * ss
# Plotting Outliers
scale = 120
fig = plt.figure(figsize=(24, 18))
ax = plt.gca()
ax.scatter(x, y_true[index], color='red', s=scale)
if T is not None:
plt.scatter(x[indexC], y_true[indexC], color='green', s=scale) # , alpha=0.8)
plt.scatter(x, y_pred[index], color='orange', s=scale)
plt.fill_between(x, auxGl[index], auxGh[index], color='gray', alpha=0.5)
if T is not None:
plt.legend(['True', 'Outlier', 'Pred', '1.28 Std'], fontsize=28)
else:
plt.legend(['True', 'Pred', '1.28 Std'], fontsize=28)
plt.xlabel('Index', fontsize=38.)
plt.ylabel(pred_name + ' Predicted', fontsize=38.)
plt.title('Contamination Results', fontsize=40)
plt.setp(ax.get_xticklabels(), fontsize=32)
plt.setp(ax.get_yticklabels(), fontsize=32)
plt.grid()
fig.tight_layout()
plt.savefig(figprefix + prefig + '_contamination.png', bbox_inches='tight')
plt.close()
print('Generated plot: ', figprefix + prefig + '_contamination.png')
if T is not None:
# Plotting Latent Variables vs error
error = np.abs(y_true - y_pred)
fig = plt.figure(figsize=(24, 18))
ax = plt.gca()
ax.scatter(error, T[:, 0], color='blue', s=scale)
ax.scatter(error, T[:, 1], color='orange', s=scale)
plt.legend(['Normal', 'Heavy-Tailed'], fontsize=28)
plt.xlabel('ABS Error', fontsize=38.)
plt.ylabel('Membership Probability', fontsize=38.)
plt.title('Contamination: Latent Variables', fontsize=40)
plt.setp(ax.get_xticklabels(), fontsize=32)
plt.setp(ax.get_yticklabels(), fontsize=32)
plt.grid()
fig.tight_layout()
plt.savefig(figprefix + '_T_contamination.png', bbox_inches='tight')
plt.close()
print('Generated plot: ', figprefix + '_T_contamination.png')
# plot training and validation metrics together and generate one chart per metrics
def plot_metrics(history, title=None, skip_ep=0, outdir='.', add_lr=False):
""" Plots keras training curves history.
Args:
skip_ep: number of epochs to skip when plotting metrics
add_lr: add curve of learning rate progression over epochs
"""
def capitalize_metric(met):
return ' '.join(s.capitalize() for s in met.split('_'))
all_metrics = list(history.history.keys())
pr_metrics = ['_'.join(m.split('_')[1:]) for m in all_metrics if 'val' in m]
epochs = np.asarray(history.epoch) + 1
if len(epochs) <= skip_ep:
skip_ep = 0
eps = epochs[skip_ep:]
hh = history.history
for p, m in enumerate(pr_metrics):
metric_name = m
metric_name_val = 'val_' + m
y_tr = hh[metric_name][skip_ep:]
y_vl = hh[metric_name_val][skip_ep:]
ymin = min(set(y_tr).union(y_vl))
ymax = max(set(y_tr).union(y_vl))
lim = (ymax - ymin) * 0.1
ymin, ymax = ymin - lim, ymax + lim
# Start figure
fig, ax1 = plt.subplots()
# Plot metrics
ax1.plot(eps, y_tr, color='b', marker='.', linestyle='-', linewidth=1, alpha=0.6, label=capitalize_metric(metric_name))
ax1.plot(eps, y_vl, color='r', marker='.', linestyle='--', linewidth=1, alpha=0.6, label=capitalize_metric(metric_name_val))
ax1.set_xlabel('Epoch')
ax1.set_ylabel(capitalize_metric(metric_name))
ax1.set_xlim([min(eps) - 1, max(eps) + 1])
ax1.set_ylim([ymin, ymax])
ax1.tick_params('y', colors='k')
# Add learning rate
if (add_lr is True) and ('lr' in hh):
ax2 = ax1.twinx()
ax2.plot(eps, hh['lr'][skip_ep:], color='g', marker='.', linestyle=':', linewidth=1,
alpha=0.6, markersize=5, label='LR')
ax2.set_ylabel('Learning rate', color='g', fontsize=12)
ax2.set_yscale('log')
ax2.tick_params('y', colors='g')
ax1.grid(True)
legend = ax1.legend(loc='best', prop={'size': 10})
frame = legend.get_frame()
frame.set_facecolor('0.95')
if title is not None:
plt.title(title)
figpath = Path(outdir) / (metric_name + '.png')
plt.savefig(figpath, bbox_inches='tight')
plt.close()
|
|
#! /usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Simple distributed kmeans implementation Relies on an abstraction
for the training matrix, that can be sharded over several machines.
"""
import os
import sys
import argparse
import numpy as np
import faiss
from multiprocessing.dummy import Pool as ThreadPool
from faiss.contrib import rpc
from faiss.contrib.datasets import SyntheticDataset
from faiss.contrib.vecs_io import bvecs_mmap, fvecs_mmap
from faiss.contrib.clustering import DatasetAssign, DatasetAssignGPU, kmeans
class DatasetAssignDispatch:
"""dispatches to several other DatasetAssigns and combines the
results"""
def __init__(self, xes, in_parallel):
self.xes = xes
self.d = xes[0].dim()
if not in_parallel:
self.imap = map
else:
self.pool = ThreadPool(len(self.xes))
self.imap = self.pool.imap
self.sizes = list(map(lambda x: x.count(), self.xes))
self.cs = np.cumsum([0] + self.sizes)
def count(self):
return self.cs[-1]
def dim(self):
return self.d
def get_subset(self, indices):
res = np.zeros((len(indices), self.d), dtype='float32')
nos = np.searchsorted(self.cs[1:], indices, side='right')
def handle(i):
mask = nos == i
sub_indices = indices[mask] - self.cs[i]
subset = self.xes[i].get_subset(sub_indices)
res[mask] = subset
list(self.imap(handle, range(len(self.xes))))
return res
def assign_to(self, centroids, weights=None):
src = self.imap(
lambda x: x.assign_to(centroids, weights),
self.xes
)
I = []
D = []
sum_per_centroid = None
for Ii, Di, sum_per_centroid_i in src:
I.append(Ii)
D.append(Di)
if sum_per_centroid is None:
sum_per_centroid = sum_per_centroid_i
else:
sum_per_centroid += sum_per_centroid_i
return np.hstack(I), np.hstack(D), sum_per_centroid
class AssignServer(rpc.Server):
""" Assign version that can be exposed via RPC """
def __init__(self, s, assign, log_prefix=''):
rpc.Server.__init__(self, s, log_prefix=log_prefix)
self.assign = assign
def __getattr__(self, f):
return getattr(self.assign, f)
def do_test(todo):
testdata = '/datasets01_101/simsearch/041218/bigann/bigann_learn.bvecs'
if os.path.exists(testdata):
x = bvecs_mmap(testdata)
else:
print("using synthetic dataset")
ds = SyntheticDataset(128, 100000, 0, 0)
x = ds.get_train()
# bad distribution to stress-test split code
xx = x[:100000].copy()
xx[:50000] = x[0]
todo = sys.argv[1:]
if "0" in todo:
# reference C++ run
km = faiss.Kmeans(x.shape[1], 1000, niter=20, verbose=True)
km.train(xx.astype('float32'))
if "1" in todo:
# using the Faiss c++ implementation
data = DatasetAssign(xx)
kmeans(1000, data, 20)
if "2" in todo:
# use the dispatch object (on local datasets)
data = DatasetAssignDispatch([
DatasetAssign(xx[20000 * i : 20000 * (i + 1)])
for i in range(5)
], False
)
kmeans(1000, data, 20)
if "3" in todo:
# same, with GPU
ngpu = faiss.get_num_gpus()
print('using %d GPUs' % ngpu)
data = DatasetAssignDispatch([
DatasetAssignGPU(xx[100000 * i // ngpu: 100000 * (i + 1) // ngpu], i)
for i in range(ngpu)
], True
)
kmeans(1000, data, 20)
def main():
parser = argparse.ArgumentParser()
def aa(*args, **kwargs):
group.add_argument(*args, **kwargs)
group = parser.add_argument_group('general options')
aa('--test', default='', help='perform tests (comma-separated numbers)')
aa('--k', default=0, type=int, help='nb centroids')
aa('--seed', default=1234, type=int, help='random seed')
aa('--niter', default=20, type=int, help='nb iterations')
aa('--gpu', default=-2, type=int, help='GPU to use (-2:none, -1: all)')
group = parser.add_argument_group('I/O options')
aa('--indata', default='',
help='data file to load (supported formats fvecs, bvecs, npy')
aa('--i0', default=0, type=int, help='first vector to keep')
aa('--i1', default=-1, type=int, help='last vec to keep + 1')
aa('--out', default='', help='file to store centroids')
aa('--store_each_iteration', default=False, action='store_true',
help='store centroid checkpoints')
group = parser.add_argument_group('server options')
aa('--server', action='store_true', default=False, help='run server')
aa('--port', default=12345, type=int, help='server port')
aa('--when_ready', default=None, help='store host:port to this file when ready')
aa('--ipv4', default=False, action='store_true', help='force ipv4')
group = parser.add_argument_group('client options')
aa('--client', action='store_true', default=False, help='run client')
aa('--servers', default='', help='list of server:port separated by spaces')
args = parser.parse_args()
if args.test:
do_test(args.test.split(','))
return
# prepare data matrix (either local or remote)
if args.indata:
print('loading ', args.indata)
if args.indata.endswith('.bvecs'):
x = bvecs_mmap(args.indata)
elif args.indata.endswith('.fvecs'):
x = fvecs_mmap(args.indata)
elif args.indata.endswith('.npy'):
x = np.load(args.indata, mmap_mode='r')
else:
raise AssertionError
if args.i1 == -1:
args.i1 = len(x)
x = x[args.i0:args.i1]
if args.gpu == -2:
data = DatasetAssign(x)
else:
print('moving to GPU')
data = DatasetAssignGPU(x, args.gpu)
elif args.client:
print('connecting to servers')
def connect_client(hostport):
host, port = hostport.split(':')
port = int(port)
print('connecting %s:%d' % (host, port))
client = rpc.Client(host, port, v6=not args.ipv4)
print('client %s:%d ready' % (host, port))
return client
hostports = args.servers.strip().split(' ')
# pool = ThreadPool(len(hostports))
data = DatasetAssignDispatch(
list(map(connect_client, hostports)),
True
)
else:
raise AssertionError
if args.server:
print('starting server')
log_prefix = f"{rpc.socket.gethostname()}:{args.port}"
rpc.run_server(
lambda s: AssignServer(s, data, log_prefix=log_prefix),
args.port, report_to_file=args.when_ready,
v6=not args.ipv4)
else:
print('running kmeans')
centroids = kmeans(args.k, data, niter=args.niter, seed=args.seed,
checkpoint=args.out if args.store_each_iteration else None)
if args.out != '':
print('writing centroids to', args.out)
np.save(args.out, centroids)
if __name__ == '__main__':
main()
|
|
# Copyright 2015, Ansible, Inc.
# Luke Sneeringer <lsneeringer@ansible.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import copy
import os
import warnings
import six
from six.moves import configparser
from six import StringIO
from sdict import adict
class Parser(configparser.ConfigParser):
"""ConfigParser subclass that doesn't strictly require section
headers.
"""
def _read(self, fp, fpname):
"""Read the configuration from the given file.
If the file lacks any section header, add a [general] section
header that encompasses the whole thing.
"""
# Attempt to read the file using the superclass implementation.
#
# If it doesn't work because there's no section header, then
# create a section header and call the superclass implementation
# again.
try:
return configparser.ConfigParser._read(self, fp, fpname)
except configparser.MissingSectionHeaderError:
fp.seek(0)
string = '[general]\n%s' % fp.read()
flo = StringIO(string) # flo == file-like object
return configparser.ConfigParser._read(self, flo, fpname)
class Settings(object):
"""An object that understands permanent configuration provided to
tower-cli through configuration files or command-line arguments.
The order of precedence for settings, from least to greatest, is:
- defaults provided in this method
- `/etc/awx/tower_cli.cfg`
- `~/.tower_cli.cfg`
- command line arguments
"""
_parser_names = ['runtime', 'local', 'user', 'global', 'defaults']
def __init__(self):
"""Create the settings object, and read from appropriate files as
well as from `sys.argv`.
"""
self._cache = {}
# Initialize the data dictionary for the default level
# precedence (that is, the bottom of the totem pole).
defaults = {
'color': 'true',
'format': 'human',
'host': '127.0.0.1',
'password': '',
'username': '',
'verbose': 'false',
}
self._defaults = Parser(defaults=defaults)
self._defaults.add_section('general')
# If there is a global settings file, initialize it.
self._global = Parser()
self._global.add_section('general')
if os.path.isdir('/etc/awx/'):
# Sanity check: Try to actually get a list of files in `/etc/awx/`.
#
# The default Tower installation caused `/etc/awx/` to have
# extremely restrictive permissions, since it has its own user
# and group and has a chmod of 0750.
#
# This makes it very easy for a user to fall into the mistake
# of writing a config file under sudo which they then cannot read,
# which could lead to difficult-to-troubleshoot situations.
#
# Therefore, check for that particular problem and give a warning
# if we're in that situation.
try:
global_settings = 'tower_cli.cfg' in os.listdir('/etc/awx/')
except OSError:
warnings.warn('/etc/awx/ is present, but not readable with '
'current permissions. Any settings defined in '
'/etc/awx/tower_cli.cfg will not be honored.',
RuntimeWarning)
# If there is a global settings file for Tower CLI, read in its
# contents.
self._global.read('/etc/awx/tower_cli.cfg')
# Initialize a parser for the user settings file.
self._user = Parser()
self._user.add_section('general')
# If there is a user settings file, read it into the parser object.
user_filename = os.path.expanduser('~/.tower_cli.cfg')
self._user.read(user_filename)
# Initialize a parser for the local settings file.
self._local = Parser()
self._local.add_section('general')
# If there is a local settings file in the current working directory
# or any parent, read it into the parser object.
#
# As a first step, we need to get each of the parents.
cwd = os.getcwd()
local_dirs = []
for i in range(0, len(cwd.split('/'))):
local_dir = '/'.join(cwd.split('/')[0:i + 1])
if len(local_dir) == 0:
local_dir = '/'
# Sanity check: if this directory corresponds to our global or
# user directory, skip it.
if local_dir in (os.path.expanduser('~'), '/etc/awx'):
continue
# Add this directory to the list.
local_dirs.append(local_dir)
# Iterate over each potential local config file and attempt to read
# it (most won't exist, which is fine).
for local_dir in local_dirs:
local_filename = '%s/.tower_cli.cfg' % local_dir
self._local.read(local_filename)
# Put a stubbed runtime parser in.
self._runtime = Parser()
self._runtime.add_section('general')
def __getattr__(self, key):
"""Return the approprate value, intelligently type-casted in the
case of numbers or booleans.
"""
# Sanity check: Have I cached this value? If so, return that.
if key in self._cache:
return self._cache[key]
# Run through each of the parsers and check for a value. Whenever
# we actually find a value, try to determine the correct type for it
# and cache and return a value of that type.
for parser in self._parsers:
# Get the value from this parser; if it's None, then this
# key isn't present and we move on to the next one.
try:
value = parser.get('general', key)
except configparser.NoOptionError:
continue
# We have a value; it may or may not be a string, though, so
# try to return it as an int, float, or boolean (in that order)
# before falling back to the string value.
type_method = ('getint', 'getfloat', 'getboolean')
for tm in type_method:
try:
value = getattr(parser, tm)('general', key)
break
except ValueError:
pass
# Write the value to the cache, so we don't have to do this lookup
# logic on subsequent requests.
self._cache[key] = value
return self._cache[key]
# If we got here, that means that the attribute wasn't found, and
# also that there is no default; raise an exception.
raise AttributeError('No setting exists: %s.' % key.lower())
@property
def _parsers(self):
"""Return a tuple of all parsers, in order.
This is referenced at runtime, to avoid gleefully ignoring the
`runtime_values` context manager.
"""
return tuple([getattr(self, '_%s' % i) for i in self._parser_names])
@contextlib.contextmanager
def runtime_values(self, **kwargs):
"""Temporarily override the runtime settings, which exist at the
highest precedence level.
"""
# Coerce all values to strings (to be coerced back by configparser
# later) and defenestrate any None values.
for k, v in copy.copy(kwargs).items():
# If the value is None, just get rid of it.
if v is None:
kwargs.pop(k)
continue
# Remove these keys from the cache, if they are present.
self._cache.pop(k, None)
# Coerce values to strings.
kwargs[k] = six.text_type(v)
# Replace the `self._runtime` INI parser with a new one, using
# the context manager's kwargs as the "defaults" (there can never
# be anything other than defaults, but that isn't a problem for our
# purposes because we're using our own precedence system).
#
# Ensure that everything is put back to rights at the end of the
# context manager call.
old_runtime_parser = self._runtime
try:
self._runtime = Parser(defaults=kwargs)
self._runtime.add_section('general')
yield self
finally:
# Revert the runtime configparser object.
self._runtime = old_runtime_parser
# Remove the keys from the cache again, since the settings
# have been reverted.
for key in kwargs:
self._cache.pop(k, None)
# The primary way to interact with settings is to simply hit the
# already constructed settings object.
settings = Settings()
|
|
# -*- coding: utf-8 -*-
from __future__ import (
absolute_import, division, print_function, unicode_literals)
import mock
from django.test import TestCase
from rest_framework.test import APIRequestFactory, APITestCase
from readthedocs.builds.constants import BRANCH, LATEST, TAG
from readthedocs.builds.models import Version
from readthedocs.core.middleware import FooterNoSessionMiddleware
from readthedocs.projects.models import Project
from readthedocs.restapi.views.footer_views import (
footer_html, get_version_compare_data)
from readthedocs.rtd_tests.mocks.paths import fake_paths_by_regex
class Testmaker(APITestCase):
fixtures = ['test_data']
url = '/api/v2/footer_html/?project=pip&version=latest&page=index'
factory = APIRequestFactory()
@classmethod
def setUpTestData(cls):
cls.pip = Project.objects.get(slug='pip')
cls.latest = cls.pip.versions.create_latest()
def render(self):
request = self.factory.get(self.url)
response = footer_html(request)
response.render()
return response
def test_footer(self):
r = self.client.get(self.url)
self.assertTrue(r.data['version_active'])
self.assertTrue(r.data['version_compare']['is_highest'])
self.assertTrue(r.data['version_supported'])
self.assertFalse(r.data['show_version_warning'])
self.assertEqual(r.context['main_project'], self.pip)
self.assertEqual(r.status_code, 200)
self.latest.active = False
self.latest.save()
r = self.render()
self.assertFalse(r.data['version_active'])
self.assertEqual(r.status_code, 200)
def test_footer_uses_version_compare(self):
version_compare = 'readthedocs.restapi.views.footer_views.get_version_compare_data' # noqa
with mock.patch(version_compare) as get_version_compare_data:
get_version_compare_data.return_value = {
'MOCKED': True,
}
r = self.render()
self.assertEqual(r.status_code, 200)
self.assertEqual(r.data['version_compare'], {'MOCKED': True})
def test_pdf_build_mentioned_in_footer(self):
with fake_paths_by_regex('\.pdf$'):
response = self.render()
self.assertIn('pdf', response.data['html'])
def test_pdf_not_mentioned_in_footer_when_build_is_disabled(self):
self.pip.enable_pdf_build = False
self.pip.save()
with fake_paths_by_regex('\.pdf$'):
response = self.render()
self.assertNotIn('pdf', response.data['html'])
def test_epub_build_mentioned_in_footer(self):
with fake_paths_by_regex('\.epub$'):
response = self.render()
self.assertIn('epub', response.data['html'])
def test_epub_not_mentioned_in_footer_when_build_is_disabled(self):
self.pip.enable_epub_build = False
self.pip.save()
with fake_paths_by_regex('\.epub$'):
response = self.render()
self.assertNotIn('epub', response.data['html'])
def test_no_session_logged_out(self):
mid = FooterNoSessionMiddleware()
# Null session here
request = self.factory.get('/api/v2/footer_html/')
mid.process_request(request)
self.assertEqual(request.session, {})
# Proper session here
home_request = self.factory.get('/')
mid.process_request(home_request)
self.assertEqual(home_request.session.TEST_COOKIE_NAME, 'testcookie')
def test_show_version_warning(self):
self.pip.show_version_warning = True
self.pip.save()
response = self.render()
self.assertTrue(response.data['show_version_warning'])
class TestVersionCompareFooter(TestCase):
fixtures = ['test_data']
def setUp(self):
self.pip = Project.objects.get(slug='pip')
def test_highest_version_from_stable(self):
base_version = self.pip.get_stable_version()
valid_data = {
'project': 'Version 0.8.1 of Pip (19)',
'url': '/dashboard/pip/version/0.8.1/',
'slug': '0.8.1',
'version': '0.8.1',
'is_highest': True,
}
returned_data = get_version_compare_data(self.pip, base_version)
self.assertDictEqual(valid_data, returned_data)
def test_highest_version_from_lower(self):
base_version = self.pip.versions.get(slug='0.8')
valid_data = {
'project': 'Version 0.8.1 of Pip (19)',
'url': '/dashboard/pip/version/0.8.1/',
'slug': '0.8.1',
'version': '0.8.1',
'is_highest': False,
}
returned_data = get_version_compare_data(self.pip, base_version)
self.assertDictEqual(valid_data, returned_data)
def test_highest_version_from_latest(self):
Version.objects.create_latest(project=self.pip)
base_version = self.pip.versions.get(slug=LATEST)
valid_data = {
'project': 'Version 0.8.1 of Pip (19)',
'url': '/dashboard/pip/version/0.8.1/',
'slug': '0.8.1',
'version': '0.8.1',
'is_highest': True,
}
returned_data = get_version_compare_data(self.pip, base_version)
self.assertDictEqual(valid_data, returned_data)
def test_highest_version_over_branches(self):
Version.objects.create(
project=self.pip,
verbose_name='2.0.0',
identifier='2.0.0',
type=BRANCH,
active=True,
)
version = Version.objects.create(
project=self.pip,
verbose_name='1.0.0',
identifier='1.0.0',
type=TAG,
active=True,
)
base_version = self.pip.versions.get(slug='0.8.1')
valid_data = {
'project': 'Version 1.0.0 of Pip ({})'.format(version.pk),
'url': '/dashboard/pip/version/1.0.0/',
'slug': '1.0.0',
'version': '1.0.0',
'is_highest': False,
}
returned_data = get_version_compare_data(self.pip, base_version)
self.assertDictEqual(valid_data, returned_data)
def test_highest_version_without_tags(self):
self.pip.versions.filter(type=TAG).update(type=BRANCH)
base_version = self.pip.versions.get(slug='0.8.1')
valid_data = {
'project': 'Version 0.8.1 of Pip (19)',
'url': '/dashboard/pip/version/0.8.1/',
'slug': '0.8.1',
'version': '0.8.1',
'is_highest': True,
}
returned_data = get_version_compare_data(self.pip, base_version)
self.assertDictEqual(valid_data, returned_data)
base_version = self.pip.versions.get(slug='0.8')
valid_data = {
'project': 'Version 0.8.1 of Pip (19)',
'url': '/dashboard/pip/version/0.8.1/',
'slug': '0.8.1',
'version': '0.8.1',
'is_highest': False,
}
returned_data = get_version_compare_data(self.pip, base_version)
self.assertDictEqual(valid_data, returned_data)
version = Version.objects.create(
project=self.pip,
verbose_name='2.0.0',
identifier='2.0.0',
type=BRANCH,
active=True,
)
valid_data = {
'project': 'Version 2.0.0 of Pip ({})'.format(version.pk),
'url': '/dashboard/pip/version/2.0.0/',
'slug': '2.0.0',
'version': '2.0.0',
'is_highest': False,
}
returned_data = get_version_compare_data(self.pip, base_version)
self.assertDictEqual(valid_data, returned_data)
|
|
import fastlmm.inference.lmm_cov as lmm
import numpy as np
import fastlmm.util.stats.chi2mixture as c2
import fastlmm.association as association
import scipy.stats as st
import tests_util as tu
class lrt(association.varcomp_test):
__slots__ = ["lmm","lrt","forcefullrank","nullModel","altModel","G0","K0","__testGcalled","model0","model1"]
def __init__(self, Y, X=None, appendbias=False, forcefullrank=False, G0=None, K0=None, nullModel=None,altModel=None):
association.varcomp_test.__init__(self,Y=Y,X=X,appendbias=appendbias)
N = self.Y.shape[0]
self.forcefullrank=forcefullrank
self.nullModel = nullModel
self.altModel = altModel
self.G0=G0
self.K0=K0
self.__testGcalled=False
self.lmm = lmm.LMM(forcefullrank=self.forcefullrank, X=self.X, linreg=None, Y=self.Y[:,np.newaxis], G=self.G0, K=self.K0, regressX=True)
self.model0 = self.lmm.findH2()# The null model only has a single kernel and only needs to find h2
self.model1=None
@property
def _testGcalled(self):
return self.__testGcalled
def testG(self, G1, type=None,i_exclude=None,G_exclude=None):
self.__testGcalled=True
#compute the alternative likelihood
(lik1,stat,alteqnull) = self._altModelMixedEffectLinear(G1,i_exclude=i_exclude,G_exclude=G_exclude)
#due to optimization the alternative log-likelihood might be a about 1E-6 worse than the null log-likelihood
pvreg = (st.chi2.sf(stat,1.0)) #standard way to compute p-value when no boundary conditions
if np.isnan(pvreg) or pvreg>1.0:
pvreg=1.0
pv = 0.5*pvreg #conservative 50/50 estimate
if alteqnull: pv=1.0 #chi_0 component
test={
'pv':pv,
'stat':stat,
'lik1':lik1,
'lik0':self.model0,
'alteqnull':alteqnull
}
return test
def _altModelMixedEffectLinear(self, G1,tol=0.0,i_exclude=None,G_exclude=None):
lik0=self.model0
G, i_G1, n_exclude = tu.set_Gexclude(G_exclude, G1, i_exclude)
UGup,UUGup = self.lmm.rotate(G)
i_up=~i_G1
#update null model if SNPs are excluded:
if n_exclude:
if UUGup is not None:
UUGup_=UUGup[:,0:n_exclude]
else:
UUGup_=None
lik0 = self.lmm.findH2_2K(nGridH2=100, minH2 = 0.0, maxH2 = 0.99999, i_up=i_up[0:n_exclude], i_G1=i_G1[0:n_exclude], UW=UGup[:,0:n_exclude], UUW=UUGup_)#The alternative model has two kernels and needs to find both a2 and h2
#build indicator for test SNPs (i_G1) and excluded SNPs (i_up)
#we currently don't account for exclusion of snps in G1 (low rank update could be even more low rank)
#alternative model likelihood:
lik1 = self.lmm.findH2_2K(nGridH2=100, minH2 = 0.0, maxH2 = 0.99999, i_up=i_up, i_G1=i_G1, UW=UGup, UUW=UUGup)#The alternative model has two kernels and needs to find both a2 and h2
try:
alteqnull=lik1['h2_1'][0]<=(0.0+tol)
except:
alteqnull=lik1['h2_1']<=(0.0+tol)
stat = 2.0*(lik0['nLL'][0] - lik1['nLL'][0])
self.model1=lik1
return (lik1,stat,alteqnull)
class LRT_up(object):
__slots__ = ["model0","model1","lrt","forcefullrank","nullModel","altModel","G0","__testGcalled"]
"""description of class"""
def check_nperm(self,nperm):
return nperm #permutations are fine, so just return
def __str__(self):
return "lrt_up"
def construct(self, Y, X=None, forcefullrank = False, SNPs0 = None, i_exclude=None, nullModel = None, altModel = None,
scoring = None, greater_is_better = None):
G0,K0=tu.set_snps0(SNPs0=SNPs0,sample_size=Y.shape[0],i_exclude=i_exclude)
print "constructing LMM - this should only happen once."
return lrt(Y, X=X, forcefullrank=forcefullrank, G0=G0, K0=K0, nullModel=nullModel,altModel=altModel)
def pv(squaredform,expectationsqform,varsqform,GPG):
raise Exception("'pv' doesn't apply to lrt only to davies")
@property
def npvals(self):
return 1 # return only 1 type of p-value
def w2(self, G0, result):
if G0 is not None:
return result.h2_1
else:
raise NotImplementedError("only with backgr. K")
def lrt(self, result):
return result.stat
def pv_adj_from_result(self, result):
'''
If local aUD exists, take that, if not, take the raw local.
'''
if result.test.has_key("pv-local-aUD") and not np.isnan(result.test["pv-local-aUD"]):
return result.test["pv-local-aUD"]
elif result.test.has_key("pv-local"):
return result.test["pv-local"]
else:
return np.nan
def pv_adj_and_ind(self, nperm, pv_adj, nullfit, lrt, lrtperm,
alteqnull, alteqnullperm, qmax, nullfitfile, nlocalperm):
if nlocalperm>0: #don't do the fitting
ind = pv_adj.argsort()
return pv_adj, ind
from fastlmm.association.tests import Cv
return Cv.pv_adj_and_ind(nperm, pv_adj, nullfit, lrt, lrtperm,
alteqnull, alteqnullperm, qmax, nullfitfile, nlocalperm) # call the shared version of this method
def write(self, fp,ind, result_dict, pv_adj, detailed_table, signal_ratio=True):
if result_dict[0].test.has_key("pv-local-aUD"):
# in this case, for p_adj, we use pv-local-aUD if it exists, and otherwise
# pv-local. So don't know which is which in the "P-value adjusted" column. To
# disambiguate, also print out "pv-local" here
colnames = ["SetId", "LogLikeAlt", "LogLikeNull", "P-value_adjusted","P-value-local",
"P-value(50/50)", "#SNPs_in_Set", "#ExcludedSNPs", "chrm", "pos. range"]
else:
colnames = ["SetId", "LogLikeAlt", "LogLikeNull", "P-value_adjusted",
"P-value(50/50)", "#SNPs_in_Set", "#ExcludedSNPs", "chrm", "pos. range"]
if signal_ratio:
colnames.append("Alt_h2")
colnames.append("Alt_h2_1")
head = "\t".join(colnames)
if detailed_table:
lik1Info = result_dict[0].lik1Details
lik0Info = result_dict[0].lik0Details
altNames = lik1Info.keys()
altIndices = sorted(range(len(altNames)), key=lambda k: altNames[k])
altNames.sort()
altNames = ['Alt'+t for t in altNames]
head += "\t" + "\t".join( altNames )
nullNames = lik0Info.keys()
nullIndices = sorted(range(len(nullNames)), key=lambda k: nullNames[k])
nullNames.sort()
nullNames = ['Null'+t for t in nullNames]
head += "\t" + "\t".join( nullNames )
head += "\n"
fp.write(head)
for i in xrange(len(ind)):
ii = ind[i]
result = result_dict[ii]
ll0=str( -(result.stat/2.0+result.test['lik1']['nLL'][0]) )
if result_dict[0].test.has_key("pv-local-aUD"):
rowvals = [result.setname, str(-result.test['lik1']['nLL'][0]), ll0,
str(pv_adj[ii]),str(result.test['pv-local']),str(result.pv), str(result.setsize),
str(result.nexclude), result.ichrm, result.iposrange]
else:
rowvals = [result.setname, str(-result.test['lik1']['nLL'][0]), ll0,
str(pv_adj[ii]), str(result.pv), str(result.setsize),
str(result.nexclude), result.ichrm, result.iposrange]
if signal_ratio:
rowvals.append(str(result.h2))
rowvals.append(str(result.h2_1))
row = "\t".join(rowvals)
if detailed_table:
lik1Info = result.lik1Details
lik0Info = result.lik0Details
vals = lik1Info.values()
vals = [vals[j] for j in altIndices]
row += "\t" + "\t".join([str(v) for v in vals])
vals = lik0Info.values()
vals = [vals[j] for j in nullIndices]
row += "\t" + "\t".join([str(v) for v in vals])
row += "\n"
fp.write(row)
def pv_etc(self, filenull, G0_to_use, G1, y, x, null_model, varcomp_test, forcefullrank):
if self.filenull is not None:
return lr.twokerneltest(G0=G0_to_use, G1=G1, y=y, covar=x, appendbias=False,lik0=null_model,forcefullrank = forcefullrank)
else:
return lr.onekerneltest(G1=G1, y=y, covar=x, appendbias=False,lik0=varcomp_test,forcefullrank = self.forcefullrank)
|
|
from __future__ import absolute_import
import logging
from typing import Any, Set, Tuple, Optional, Text
from django.contrib.auth.backends import RemoteUserBackend
from django.conf import settings
from django.http import HttpResponse
import django.contrib.auth
from django_auth_ldap.backend import LDAPBackend, _LDAPUser
from zerver.lib.actions import do_create_user
from zerver.models import UserProfile, Realm, get_user_profile_by_id, \
get_user_profile_by_email, remote_user_to_email, email_to_username, \
get_realm_by_email_domain
from apiclient.sample_tools import client as googleapiclient
from oauth2client.crypt import AppIdentityError
from social.backends.github import GithubOAuth2, GithubOrganizationOAuth2, \
GithubTeamOAuth2
from social.exceptions import AuthFailed
from django.contrib.auth import authenticate
from zerver.lib.utils import check_subdomain, get_subdomain
def pad_method_dict(method_dict):
# type: (Dict[Text, bool]) -> Dict[Text, bool]
"""Pads an authentication methods dict to contain all auth backends
supported by the software, regardless of whether they are
configured on this server"""
for key in AUTH_BACKEND_NAME_MAP:
if key not in method_dict:
method_dict[key] = False
return method_dict
def auth_enabled_helper(backends_to_check, realm):
# type: (List[Text], Optional[Realm]) -> bool
if realm is not None:
enabled_method_dict = realm.authentication_methods_dict()
pad_method_dict(enabled_method_dict)
else:
enabled_method_dict = dict((method, True) for method in Realm.AUTHENTICATION_FLAGS)
pad_method_dict(enabled_method_dict)
for supported_backend in django.contrib.auth.get_backends():
for backend_name in backends_to_check:
backend = AUTH_BACKEND_NAME_MAP[backend_name]
if enabled_method_dict[backend_name] and isinstance(supported_backend, backend):
return True
return False
def ldap_auth_enabled(realm=None):
# type: (Optional[Realm]) -> bool
return auth_enabled_helper([u'LDAP'], realm)
def email_auth_enabled(realm=None):
# type: (Optional[Realm]) -> bool
return auth_enabled_helper([u'Email'], realm)
def password_auth_enabled(realm=None):
# type: (Optional[Realm]) -> bool
return ldap_auth_enabled(realm) or email_auth_enabled(realm)
def dev_auth_enabled(realm=None):
# type: (Optional[Realm]) -> bool
return auth_enabled_helper([u'Dev'], realm)
def google_auth_enabled(realm=None):
# type: (Optional[Realm]) -> bool
return auth_enabled_helper([u'Google'], realm)
def github_auth_enabled(realm=None):
# type: (Optional[Realm]) -> bool
return auth_enabled_helper([u'GitHub'], realm)
def common_get_active_user_by_email(email, return_data=None):
# type: (Text, Optional[Dict[str, Any]]) -> Optional[UserProfile]
try:
user_profile = get_user_profile_by_email(email)
except UserProfile.DoesNotExist:
return None
if not user_profile.is_active:
if return_data is not None:
return_data['inactive_user'] = True
return None
if user_profile.realm.deactivated:
if return_data is not None:
return_data['inactive_realm'] = True
return None
return user_profile
class ZulipAuthMixin(object):
def get_user(self, user_profile_id):
# type: (int) -> Optional[UserProfile]
""" Get a UserProfile object from the user_profile_id. """
try:
return get_user_profile_by_id(user_profile_id)
except UserProfile.DoesNotExist:
return None
class SocialAuthMixin(ZulipAuthMixin):
auth_backend_name = None # type: Text
def get_email_address(self, *args, **kwargs):
# type: (*Any, **Any) -> Text
raise NotImplementedError
def get_full_name(self, *args, **kwargs):
# type: (*Any, **Any) -> Text
raise NotImplementedError
def authenticate(self, *args, **kwargs):
# type: (*Any, **Any) -> Optional[UserProfile]
return_data = kwargs.get('return_data', {})
email_address = self.get_email_address(*args, **kwargs)
if not email_address:
return None
try:
user_profile = get_user_profile_by_email(email_address)
except UserProfile.DoesNotExist:
return_data["valid_attestation"] = True
return None
if not user_profile.is_active:
return_data["inactive_user"] = True
return None
if user_profile.realm.deactivated:
return_data["inactive_realm"] = True
return None
if not check_subdomain(kwargs.get("realm_subdomain"),
user_profile.realm.subdomain):
return_data["invalid_subdomain"] = True
return None
if not auth_enabled_helper([self.auth_backend_name], user_profile.realm):
return_data["auth_backend_disabled"] = True
return None
return user_profile
def process_do_auth(self, user_profile, *args, **kwargs):
# type: (UserProfile, *Any, **Any) -> Optional[HttpResponse]
# This function needs to be imported from here due to the cyclic
# dependency.
from zerver.views.auth import (login_or_register_remote_user,
redirect_to_subdomain_login_url)
from zerver.views import redirect_and_log_into_subdomain
return_data = kwargs.get('return_data', {})
inactive_user = return_data.get('inactive_user')
inactive_realm = return_data.get('inactive_realm')
invalid_subdomain = return_data.get('invalid_subdomain')
if inactive_user or inactive_realm:
return None
strategy = self.strategy # type: ignore # This comes from Python Social Auth.
request = strategy.request
email_address = self.get_email_address(*args, **kwargs)
full_name = self.get_full_name(*args, **kwargs)
subdomain = strategy.session_get('subdomain')
if not subdomain:
return login_or_register_remote_user(request, email_address,
user_profile, full_name,
bool(invalid_subdomain))
try:
realm = Realm.objects.get(string_id=subdomain)
except Realm.DoesNotExist:
return redirect_to_subdomain_login_url()
return redirect_and_log_into_subdomain(realm, full_name, email_address)
class ZulipDummyBackend(ZulipAuthMixin):
"""
Used when we want to log you in but we don't know which backend to use.
"""
def authenticate(self, username=None, realm_subdomain=None, use_dummy_backend=False,
return_data=None):
# type: (Optional[Text], Optional[Text], bool, Optional[Dict[str, Any]]) -> Optional[UserProfile]
if use_dummy_backend:
user_profile = common_get_active_user_by_email(username)
if user_profile is None:
return None
if not check_subdomain(realm_subdomain, user_profile.realm.subdomain):
return_data["invalid_subdomain"] = True
return None
return user_profile
return None
class EmailAuthBackend(ZulipAuthMixin):
"""
Email Authentication Backend
Allows a user to sign in using an email/password pair rather than
a username/password pair.
"""
def authenticate(self, username=None, password=None, realm_subdomain=None, return_data=None):
# type: (Optional[Text], Optional[str], Optional[Text], Optional[Dict[str, Any]]) -> Optional[UserProfile]
""" Authenticate a user based on email address as the user name. """
if username is None or password is None:
# Return immediately. Otherwise we will look for a SQL row with
# NULL username. While that's probably harmless, it's needless
# exposure.
return None
user_profile = common_get_active_user_by_email(username, return_data=return_data)
if user_profile is None:
return None
if not password_auth_enabled(user_profile.realm):
if return_data is not None:
return_data['password_auth_disabled'] = True
return None
if not email_auth_enabled(user_profile.realm):
if return_data is not None:
return_data['email_auth_disabled'] = True
return None
if user_profile.check_password(password):
if not check_subdomain(realm_subdomain, user_profile.realm.subdomain):
return_data["invalid_subdomain"] = True
return None
return user_profile
return None
class GoogleMobileOauth2Backend(ZulipAuthMixin):
"""
Google Apps authentication for mobile devices
Allows a user to sign in using a Google-issued OAuth2 token.
Ref:
https://developers.google.com/+/mobile/android/sign-in#server-side_access_for_your_app
https://developers.google.com/accounts/docs/CrossClientAuth#offlineAccess
"""
def authenticate(self, google_oauth2_token=None, realm_subdomain=None, return_data={}):
# type: (Optional[str], Optional[Text], Dict[str, Any]) -> Optional[UserProfile]
try:
token_payload = googleapiclient.verify_id_token(google_oauth2_token, settings.GOOGLE_CLIENT_ID)
except AppIdentityError:
return None
if token_payload["email_verified"] in (True, "true"):
try:
user_profile = get_user_profile_by_email(token_payload["email"])
except UserProfile.DoesNotExist:
return_data["valid_attestation"] = True
return None
if not user_profile.is_active:
return_data["inactive_user"] = True
return None
if user_profile.realm.deactivated:
return_data["inactive_realm"] = True
return None
if not check_subdomain(realm_subdomain, user_profile.realm.subdomain):
return_data["invalid_subdomain"] = True
return None
if not google_auth_enabled(realm=user_profile.realm):
return_data["google_auth_disabled"] = True
return None
return user_profile
else:
return_data["valid_attestation"] = False
class ZulipRemoteUserBackend(RemoteUserBackend):
create_unknown_user = False
def authenticate(self, remote_user, realm_subdomain=None):
# type: (str, Optional[Text]) -> Optional[UserProfile]
if not remote_user:
return None
email = remote_user_to_email(remote_user)
user_profile = common_get_active_user_by_email(email)
if user_profile is None:
return None
if not check_subdomain(realm_subdomain, user_profile.realm.subdomain):
return None
if not auth_enabled_helper([u"RemoteUser"], user_profile.realm):
return None
return user_profile
class ZulipLDAPException(Exception):
pass
class ZulipLDAPAuthBackendBase(ZulipAuthMixin, LDAPBackend):
# Don't use Django LDAP's permissions functions
def has_perm(self, user, perm, obj=None):
# type: (UserProfile, Any, Any) -> bool
# Using Any type is safe because we are not doing anything with
# the arguments.
return False
def has_module_perms(self, user, app_label):
# type: (UserProfile, str) -> bool
return False
def get_all_permissions(self, user, obj=None):
# type: (UserProfile, Any) -> Set
# Using Any type is safe because we are not doing anything with
# the arguments.
return set()
def get_group_permissions(self, user, obj=None):
# type: (UserProfile, Any) -> Set
# Using Any type is safe because we are not doing anything with
# the arguments.
return set()
def django_to_ldap_username(self, username):
# type: (Text) -> Text
if settings.LDAP_APPEND_DOMAIN:
if not username.endswith("@" + settings.LDAP_APPEND_DOMAIN):
raise ZulipLDAPException("Username does not match LDAP domain.")
return email_to_username(username)
return username
def ldap_to_django_username(self, username):
# type: (str) -> str
if settings.LDAP_APPEND_DOMAIN:
return "@".join((username, settings.LDAP_APPEND_DOMAIN))
return username
class ZulipLDAPAuthBackend(ZulipLDAPAuthBackendBase):
def authenticate(self, username, password, realm_subdomain=None, return_data=None):
# type: (Text, str, Optional[Text], Optional[Dict[str, Any]]) -> Optional[UserProfile]
try:
username = self.django_to_ldap_username(username)
user_profile = ZulipLDAPAuthBackendBase.authenticate(self, username, password)
if user_profile is None:
return None
if not check_subdomain(realm_subdomain, user_profile.realm.subdomain):
return None
return user_profile
except Realm.DoesNotExist:
return None
except ZulipLDAPException:
return None
def get_or_create_user(self, username, ldap_user):
# type: (str, _LDAPUser) -> Tuple[UserProfile, bool]
try:
user_profile = get_user_profile_by_email(username)
if not user_profile.is_active or user_profile.realm.deactivated:
raise ZulipLDAPException("Realm has been deactivated")
if not ldap_auth_enabled(user_profile.realm):
raise ZulipLDAPException("LDAP Authentication is not enabled")
return user_profile, False
except UserProfile.DoesNotExist:
realm = get_realm_by_email_domain(username)
# No need to check for an inactive user since they don't exist yet
if realm.deactivated:
raise ZulipLDAPException("Realm has been deactivated")
full_name_attr = settings.AUTH_LDAP_USER_ATTR_MAP["full_name"]
short_name = full_name = ldap_user.attrs[full_name_attr][0]
if "short_name" in settings.AUTH_LDAP_USER_ATTR_MAP:
short_name_attr = settings.AUTH_LDAP_USER_ATTR_MAP["short_name"]
short_name = ldap_user.attrs[short_name_attr][0]
user_profile = do_create_user(username, None, realm, full_name, short_name)
return user_profile, True
# Just like ZulipLDAPAuthBackend, but doesn't let you log in.
class ZulipLDAPUserPopulator(ZulipLDAPAuthBackendBase):
def authenticate(self, username, password, realm_subdomain=None):
# type: (Text, str, Optional[Text]) -> None
return None
class DevAuthBackend(ZulipAuthMixin):
# Allow logging in as any user without a password.
# This is used for convenience when developing Zulip.
def authenticate(self, username, realm_subdomain=None, return_data=None):
# type: (Text, Optional[Text], Optional[Dict[str, Any]]) -> Optional[UserProfile]
user_profile = common_get_active_user_by_email(username, return_data=return_data)
if user_profile is None:
return None
if not dev_auth_enabled(user_profile.realm):
return None
return user_profile
class GitHubAuthBackend(SocialAuthMixin, GithubOAuth2):
auth_backend_name = u"GitHub"
def get_email_address(self, *args, **kwargs):
# type: (*Any, **Any) -> Optional[Text]
try:
return kwargs['response']['email']
except KeyError:
return None
def get_full_name(self, *args, **kwargs):
# type: (*Any, **Any) -> Text
try:
return kwargs['response']['name']
except KeyError:
return ''
def do_auth(self, *args, **kwargs):
# type: (*Any, **Any) -> Optional[HttpResponse]
kwargs['return_data'] = {}
request = self.strategy.request
kwargs['realm_subdomain'] = get_subdomain(request)
user_profile = None
team_id = settings.SOCIAL_AUTH_GITHUB_TEAM_ID
org_name = settings.SOCIAL_AUTH_GITHUB_ORG_NAME
if (team_id is None and org_name is None):
user_profile = GithubOAuth2.do_auth(self, *args, **kwargs)
elif (team_id):
backend = GithubTeamOAuth2(self.strategy, self.redirect_uri)
try:
user_profile = backend.do_auth(*args, **kwargs)
except AuthFailed:
logging.info("User profile not member of team.")
user_profile = None
elif (org_name):
backend = GithubOrganizationOAuth2(self.strategy, self.redirect_uri)
try:
user_profile = backend.do_auth(*args, **kwargs)
except AuthFailed:
logging.info("User profile not member of organisation.")
user_profile = None
return self.process_do_auth(user_profile, *args, **kwargs)
AUTH_BACKEND_NAME_MAP = {
u'Dev': DevAuthBackend,
u'Email': EmailAuthBackend,
u'GitHub': GitHubAuthBackend,
u'Google': GoogleMobileOauth2Backend,
u'LDAP': ZulipLDAPAuthBackend,
u'RemoteUser': ZulipRemoteUserBackend,
} # type: Dict[Text, Any]
|
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Fakes For Scheduler tests.
"""
import mox
from oslo.serialization import jsonutils
from nova.compute import vm_states
from nova import db
from nova.scheduler import filter_scheduler
from nova.scheduler import host_manager
from nova.virt import hardware
NUMA_TOPOLOGY = hardware.VirtNUMAHostTopology(
cells=[hardware.VirtNUMATopologyCellUsage(
0, set([1, 2]), 512),
hardware.VirtNUMATopologyCellUsage(
1, set([3, 4]), 512)])
COMPUTE_NODES = [
dict(id=1, local_gb=1024, memory_mb=1024, vcpus=1,
disk_available_least=None, free_ram_mb=512, vcpus_used=1,
free_disk_gb=512, local_gb_used=0, updated_at=None,
service=dict(host='host1', disabled=False),
hypervisor_hostname='node1', host_ip='127.0.0.1',
hypervisor_version=0, numa_topology=None),
dict(id=2, local_gb=2048, memory_mb=2048, vcpus=2,
disk_available_least=1024, free_ram_mb=1024, vcpus_used=2,
free_disk_gb=1024, local_gb_used=0, updated_at=None,
service=dict(host='host2', disabled=True),
hypervisor_hostname='node2', host_ip='127.0.0.1',
hypervisor_version=0, numa_topology=None),
dict(id=3, local_gb=4096, memory_mb=4096, vcpus=4,
disk_available_least=3333, free_ram_mb=3072, vcpus_used=1,
free_disk_gb=3072, local_gb_used=0, updated_at=None,
service=dict(host='host3', disabled=False),
hypervisor_hostname='node3', host_ip='127.0.0.1',
hypervisor_version=0, numa_topology=NUMA_TOPOLOGY.to_json()),
dict(id=4, local_gb=8192, memory_mb=8192, vcpus=8,
disk_available_least=8192, free_ram_mb=8192, vcpus_used=0,
free_disk_gb=8888, local_gb_used=0, updated_at=None,
service=dict(host='host4', disabled=False),
hypervisor_hostname='node4', host_ip='127.0.0.1',
hypervisor_version=0, numa_topology=None),
# Broken entry
dict(id=5, local_gb=1024, memory_mb=1024, vcpus=1, service=None),
]
COMPUTE_NODES_METRICS = [
dict(id=1, local_gb=1024, memory_mb=1024, vcpus=1,
disk_available_least=512, free_ram_mb=512, vcpus_used=1,
free_disk_gb=512, local_gb_used=0, updated_at=None,
service=dict(host='host1', disabled=False),
hypervisor_hostname='node1', host_ip='127.0.0.1',
hypervisor_version=0, numa_topology=None,
metrics=jsonutils.dumps([{'name': 'foo',
'value': 512,
'timestamp': None,
'source': 'host1'
},
{'name': 'bar',
'value': 1.0,
'timestamp': None,
'source': 'host1'
},
])),
dict(id=2, local_gb=2048, memory_mb=2048, vcpus=2,
disk_available_least=1024, free_ram_mb=1024, vcpus_used=2,
free_disk_gb=1024, local_gb_used=0, updated_at=None,
service=dict(host='host2', disabled=True),
hypervisor_hostname='node2', host_ip='127.0.0.1',
hypervisor_version=0, numa_topology=None,
metrics=jsonutils.dumps([{'name': 'foo',
'value': 1024,
'timestamp': None,
'source': 'host2'
},
{'name': 'bar',
'value': 2.0,
'timestamp': None,
'source': 'host2'
},
])),
dict(id=3, local_gb=4096, memory_mb=4096, vcpus=4,
disk_available_least=3072, free_ram_mb=3072, vcpus_used=1,
free_disk_gb=3072, local_gb_used=0, updated_at=None,
service=dict(host='host3', disabled=False),
hypervisor_hostname='node3', host_ip='127.0.0.1',
hypervisor_version=0, numa_topology=None,
metrics=jsonutils.dumps([{'name': 'foo',
'value': 3072,
'timestamp': None,
'source': 'host3'
},
{'name': 'bar',
'value': 1.0,
'timestamp': None,
'source': 'host3'
},
])),
dict(id=4, local_gb=8192, memory_mb=8192, vcpus=8,
disk_available_least=8192, free_ram_mb=8192, vcpus_used=0,
free_disk_gb=8192, local_gb_used=0, updated_at=None,
service=dict(host='host4', disabled=False),
hypervisor_hostname='node4', host_ip='127.0.0.1',
hypervisor_version=0, numa_topology=None,
metrics=jsonutils.dumps([{'name': 'foo',
'value': 8192,
'timestamp': None,
'source': 'host4'
},
{'name': 'bar',
'value': 0,
'timestamp': None,
'source': 'host4'
},
])),
dict(id=5, local_gb=768, memory_mb=768, vcpus=8,
disk_available_least=768, free_ram_mb=768, vcpus_used=0,
free_disk_gb=768, local_gb_used=0, updated_at=None,
service=dict(host='host5', disabled=False),
hypervisor_hostname='node5', host_ip='127.0.0.1',
hypervisor_version=0, numa_topology=None,
metrics=jsonutils.dumps([{'name': 'foo',
'value': 768,
'timestamp': None,
'source': 'host5'
},
{'name': 'bar',
'value': 0,
'timestamp': None,
'source': 'host5'
},
{'name': 'zot',
'value': 1,
'timestamp': None,
'source': 'host5'
},
])),
dict(id=6, local_gb=2048, memory_mb=2048, vcpus=8,
disk_available_least=2048, free_ram_mb=2048, vcpus_used=0,
free_disk_gb=2048, local_gb_used=0, updated_at=None,
service=dict(host='host6', disabled=False),
hypervisor_hostname='node6', host_ip='127.0.0.1',
hypervisor_version=0, numa_topology=None,
metrics=jsonutils.dumps([{'name': 'foo',
'value': 2048,
'timestamp': None,
'source': 'host6'
},
{'name': 'bar',
'value': 0,
'timestamp': None,
'source': 'host6'
},
{'name': 'zot',
'value': 2,
'timestamp': None,
'source': 'host6'
},
])),
]
INSTANCES = [
dict(root_gb=512, ephemeral_gb=0, memory_mb=512, vcpus=1,
host='host1', node='node1'),
dict(root_gb=512, ephemeral_gb=0, memory_mb=512, vcpus=1,
host='host2', node='node2'),
dict(root_gb=512, ephemeral_gb=0, memory_mb=512, vcpus=1,
host='host2', node='node2'),
dict(root_gb=1024, ephemeral_gb=0, memory_mb=1024, vcpus=1,
host='host3', node='node3'),
# Broken host
dict(root_gb=1024, ephemeral_gb=0, memory_mb=1024, vcpus=1,
host=None),
# No matching host
dict(root_gb=1024, ephemeral_gb=0, memory_mb=1024, vcpus=1,
host='host5', node='node5'),
]
class FakeFilterScheduler(filter_scheduler.FilterScheduler):
def __init__(self, *args, **kwargs):
super(FakeFilterScheduler, self).__init__(*args, **kwargs)
self.host_manager = host_manager.HostManager()
class FakeHostManager(host_manager.HostManager):
"""host1: free_ram_mb=1024-512-512=0, free_disk_gb=1024-512-512=0
host2: free_ram_mb=2048-512=1536 free_disk_gb=2048-512=1536
host3: free_ram_mb=4096-1024=3072 free_disk_gb=4096-1024=3072
host4: free_ram_mb=8192 free_disk_gb=8192
"""
def __init__(self):
super(FakeHostManager, self).__init__()
self.service_states = {
'host1': {
'compute': {'host_memory_free': 1073741824},
},
'host2': {
'compute': {'host_memory_free': 2147483648},
},
'host3': {
'compute': {'host_memory_free': 3221225472},
},
'host4': {
'compute': {'host_memory_free': 999999999},
},
}
class FakeHostState(host_manager.HostState):
def __init__(self, host, node, attribute_dict):
super(FakeHostState, self).__init__(host, node)
for (key, val) in attribute_dict.iteritems():
setattr(self, key, val)
class FakeInstance(object):
def __init__(self, context=None, params=None):
"""Create a test instance. Returns uuid."""
self.context = context
i = self._create_fake_instance(params=params)
self.uuid = i['uuid']
def _create_fake_instance(self, params=None):
"""Create a test instance."""
if not params:
params = {}
inst = {}
inst['vm_state'] = vm_states.ACTIVE
inst['image_ref'] = 1
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = 'fake'
inst['project_id'] = 'fake'
inst['instance_type_id'] = 2
inst['ami_launch_index'] = 0
inst.update(params)
return db.instance_create(self.context, inst)
class FakeComputeAPI(object):
def create_db_entry_for_new_instance(self, *args, **kwargs):
pass
def mox_host_manager_db_calls(mock, context):
mock.StubOutWithMock(db, 'compute_node_get_all')
db.compute_node_get_all(mox.IgnoreArg()).AndReturn(COMPUTE_NODES)
|
|
"""The tests for the demo climate component."""
import pytest
import voluptuous as vol
from homeassistant.components.climate.const import (
ATTR_AUX_HEAT, ATTR_CURRENT_HUMIDITY, ATTR_CURRENT_TEMPERATURE,
ATTR_FAN_MODE, ATTR_HUMIDITY, ATTR_HVAC_ACTIONS, ATTR_HVAC_MODES,
ATTR_MAX_HUMIDITY, ATTR_MAX_TEMP, ATTR_MIN_HUMIDITY, ATTR_MIN_TEMP,
ATTR_PRESET_MODE, ATTR_SWING_MODE, ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW, CURRENT_HVAC_COOL, DOMAIN, HVAC_MODE_COOL,
HVAC_MODE_HEAT, HVAC_MODE_OFF, PRESET_AWAY, PRESET_ECO)
from homeassistant.const import ATTR_TEMPERATURE, STATE_OFF, STATE_ON
from homeassistant.setup import async_setup_component
from homeassistant.util.unit_system import METRIC_SYSTEM
from tests.components.climate import common
ENTITY_CLIMATE = 'climate.hvac'
ENTITY_ECOBEE = 'climate.ecobee'
ENTITY_HEATPUMP = 'climate.heatpump'
@pytest.fixture(autouse=True)
async def setup_demo_climate(hass):
"""Initialize setup demo climate."""
hass.config.units = METRIC_SYSTEM
assert await async_setup_component(hass, DOMAIN, {
'climate': {
'platform': 'demo',
}
})
def test_setup_params(hass):
"""Test the initial parameters."""
state = hass.states.get(ENTITY_CLIMATE)
assert state.state == HVAC_MODE_COOL
assert 21 == state.attributes.get(ATTR_TEMPERATURE)
assert 22 == state.attributes.get(ATTR_CURRENT_TEMPERATURE)
assert "On High" == state.attributes.get(ATTR_FAN_MODE)
assert 67 == state.attributes.get(ATTR_HUMIDITY)
assert 54 == state.attributes.get(ATTR_CURRENT_HUMIDITY)
assert "Off" == state.attributes.get(ATTR_SWING_MODE)
assert STATE_OFF == state.attributes.get(ATTR_AUX_HEAT)
assert state.attributes.get(ATTR_HVAC_MODES) == \
['off', 'heat', 'cool', 'auto', 'dry', 'fan_only']
def test_default_setup_params(hass):
"""Test the setup with default parameters."""
state = hass.states.get(ENTITY_CLIMATE)
assert 7 == state.attributes.get(ATTR_MIN_TEMP)
assert 35 == state.attributes.get(ATTR_MAX_TEMP)
assert 30 == state.attributes.get(ATTR_MIN_HUMIDITY)
assert 99 == state.attributes.get(ATTR_MAX_HUMIDITY)
async def test_set_only_target_temp_bad_attr(hass):
"""Test setting the target temperature without required attribute."""
state = hass.states.get(ENTITY_CLIMATE)
assert 21 == state.attributes.get(ATTR_TEMPERATURE)
with pytest.raises(vol.Invalid):
await common.async_set_temperature(hass, None, ENTITY_CLIMATE)
await hass.async_block_till_done()
assert 21 == state.attributes.get(ATTR_TEMPERATURE)
async def test_set_only_target_temp(hass):
"""Test the setting of the target temperature."""
state = hass.states.get(ENTITY_CLIMATE)
assert 21 == state.attributes.get(ATTR_TEMPERATURE)
await common.async_set_temperature(hass, 30, ENTITY_CLIMATE)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_CLIMATE)
assert 30.0 == state.attributes.get(ATTR_TEMPERATURE)
async def test_set_only_target_temp_with_convert(hass):
"""Test the setting of the target temperature."""
state = hass.states.get(ENTITY_HEATPUMP)
assert 20 == state.attributes.get(ATTR_TEMPERATURE)
await common.async_set_temperature(hass, 21, ENTITY_HEATPUMP)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_HEATPUMP)
assert 21.0 == state.attributes.get(ATTR_TEMPERATURE)
async def test_set_target_temp_range(hass):
"""Test the setting of the target temperature with range."""
state = hass.states.get(ENTITY_ECOBEE)
assert state.attributes.get(ATTR_TEMPERATURE) is None
assert 21.0 == state.attributes.get(ATTR_TARGET_TEMP_LOW)
assert 24.0 == state.attributes.get(ATTR_TARGET_TEMP_HIGH)
await common.async_set_temperature(
hass, target_temp_high=25, target_temp_low=20, entity_id=ENTITY_ECOBEE)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ECOBEE)
assert state.attributes.get(ATTR_TEMPERATURE) is None
assert 20.0 == state.attributes.get(ATTR_TARGET_TEMP_LOW)
assert 25.0 == state.attributes.get(ATTR_TARGET_TEMP_HIGH)
async def test_set_target_temp_range_bad_attr(hass):
"""Test setting the target temperature range without attribute."""
state = hass.states.get(ENTITY_ECOBEE)
assert state.attributes.get(ATTR_TEMPERATURE) is None
assert 21.0 == state.attributes.get(ATTR_TARGET_TEMP_LOW)
assert 24.0 == state.attributes.get(ATTR_TARGET_TEMP_HIGH)
with pytest.raises(vol.Invalid):
await common.async_set_temperature(
hass, temperature=None, entity_id=ENTITY_ECOBEE,
target_temp_low=None, target_temp_high=None)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ECOBEE)
assert state.attributes.get(ATTR_TEMPERATURE) is None
assert 21.0 == state.attributes.get(ATTR_TARGET_TEMP_LOW)
assert 24.0 == state.attributes.get(ATTR_TARGET_TEMP_HIGH)
async def test_set_target_humidity_bad_attr(hass):
"""Test setting the target humidity without required attribute."""
state = hass.states.get(ENTITY_CLIMATE)
assert 67 == state.attributes.get(ATTR_HUMIDITY)
with pytest.raises(vol.Invalid):
await common.async_set_humidity(hass, None, ENTITY_CLIMATE)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_CLIMATE)
assert 67 == state.attributes.get(ATTR_HUMIDITY)
async def test_set_target_humidity(hass):
"""Test the setting of the target humidity."""
state = hass.states.get(ENTITY_CLIMATE)
assert 67 == state.attributes.get(ATTR_HUMIDITY)
await common.async_set_humidity(hass, 64, ENTITY_CLIMATE)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_CLIMATE)
assert 64.0 == state.attributes.get(ATTR_HUMIDITY)
async def test_set_fan_mode_bad_attr(hass):
"""Test setting fan mode without required attribute."""
state = hass.states.get(ENTITY_CLIMATE)
assert "On High" == state.attributes.get(ATTR_FAN_MODE)
with pytest.raises(vol.Invalid):
await common.async_set_fan_mode(hass, None, ENTITY_CLIMATE)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_CLIMATE)
assert "On High" == state.attributes.get(ATTR_FAN_MODE)
async def test_set_fan_mode(hass):
"""Test setting of new fan mode."""
state = hass.states.get(ENTITY_CLIMATE)
assert "On High" == state.attributes.get(ATTR_FAN_MODE)
await common.async_set_fan_mode(hass, "On Low", ENTITY_CLIMATE)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_CLIMATE)
assert "On Low" == state.attributes.get(ATTR_FAN_MODE)
async def test_set_swing_mode_bad_attr(hass):
"""Test setting swing mode without required attribute."""
state = hass.states.get(ENTITY_CLIMATE)
assert "Off" == state.attributes.get(ATTR_SWING_MODE)
with pytest.raises(vol.Invalid):
await common.async_set_swing_mode(hass, None, ENTITY_CLIMATE)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_CLIMATE)
assert "Off" == state.attributes.get(ATTR_SWING_MODE)
async def test_set_swing(hass):
"""Test setting of new swing mode."""
state = hass.states.get(ENTITY_CLIMATE)
assert "Off" == state.attributes.get(ATTR_SWING_MODE)
await common.async_set_swing_mode(hass, "Auto", ENTITY_CLIMATE)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_CLIMATE)
assert "Auto" == state.attributes.get(ATTR_SWING_MODE)
async def test_set_hvac_bad_attr_and_state(hass):
"""Test setting hvac mode without required attribute.
Also check the state.
"""
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get(ATTR_HVAC_ACTIONS) == CURRENT_HVAC_COOL
assert state.state == HVAC_MODE_COOL
with pytest.raises(vol.Invalid):
await common.async_set_hvac_mode(hass, None, ENTITY_CLIMATE)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get(ATTR_HVAC_ACTIONS) == CURRENT_HVAC_COOL
assert state.state == HVAC_MODE_COOL
async def test_set_hvac(hass):
"""Test setting of new hvac mode."""
state = hass.states.get(ENTITY_CLIMATE)
assert state.state == HVAC_MODE_COOL
await common.async_set_hvac_mode(hass, HVAC_MODE_HEAT, ENTITY_CLIMATE)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_CLIMATE)
assert state.state == HVAC_MODE_HEAT
async def test_set_hold_mode_away(hass):
"""Test setting the hold mode away."""
await common.async_set_preset_mode(hass, PRESET_AWAY, ENTITY_ECOBEE)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ECOBEE)
assert state.attributes.get(ATTR_PRESET_MODE) == PRESET_AWAY
async def test_set_hold_mode_eco(hass):
"""Test setting the hold mode eco."""
await common.async_set_preset_mode(hass, PRESET_ECO, ENTITY_ECOBEE)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ECOBEE)
assert state.attributes.get(ATTR_PRESET_MODE) == PRESET_ECO
async def test_set_aux_heat_bad_attr(hass):
"""Test setting the auxiliary heater without required attribute."""
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get(ATTR_AUX_HEAT) == STATE_OFF
with pytest.raises(vol.Invalid):
await common.async_set_aux_heat(hass, None, ENTITY_CLIMATE)
await hass.async_block_till_done()
assert state.attributes.get(ATTR_AUX_HEAT) == STATE_OFF
async def test_set_aux_heat_on(hass):
"""Test setting the axillary heater on/true."""
await common.async_set_aux_heat(hass, True, ENTITY_CLIMATE)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get(ATTR_AUX_HEAT) == STATE_ON
async def test_set_aux_heat_off(hass):
"""Test setting the auxiliary heater off/false."""
await common.async_set_aux_heat(hass, False, ENTITY_CLIMATE)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get(ATTR_AUX_HEAT) == STATE_OFF
async def test_turn_on(hass):
"""Test turn on device."""
await common.async_set_hvac_mode(hass, HVAC_MODE_OFF, ENTITY_CLIMATE)
state = hass.states.get(ENTITY_CLIMATE)
assert state.state == HVAC_MODE_OFF
await common.async_turn_on(hass, ENTITY_CLIMATE)
state = hass.states.get(ENTITY_CLIMATE)
assert state.state == HVAC_MODE_HEAT
async def test_turn_off(hass):
"""Test turn on device."""
await common.async_set_hvac_mode(hass, HVAC_MODE_HEAT, ENTITY_CLIMATE)
state = hass.states.get(ENTITY_CLIMATE)
assert state.state == HVAC_MODE_HEAT
await common.async_turn_off(hass, ENTITY_CLIMATE)
state = hass.states.get(ENTITY_CLIMATE)
assert state.state == HVAC_MODE_OFF
|
|
colors = {
'Pink': {
'50': 'fce4ec',
'100': 'f8bbd0',
'200': 'f48fb1',
'300': 'f06292',
'400': 'ec407a',
'500': 'e91e63',
'600': 'd81b60',
'700': 'C2185B',
'800': 'ad1457',
'900': '88e4ff',
'A100': 'ff80ab',
'A400': 'F50057',
'A700': 'c51162',
'A200': 'ff4081'
},
'Blue': {
'200': '90caf9',
'900': '0D47A1',
'600': '1e88e5',
'A100': '82b1ff',
'300': '64b5f6',
'A400': '2979ff',
'700': '1976d2',
'50': 'e3f2fd',
'A700': '2962ff',
'400': '42a5f5',
'100': 'bbdefb',
'800': '1565c0',
'A200': '448aff',
'500': '2196f3'
},
'Indigo': {
'200': '9fa8da',
'900': '1a237e',
'600': '3949ab',
'A100': '8c9eff',
'300': '7986cb',
'A400': '3d5afe',
'700': '303f9f',
'50': 'e8eaf6',
'A700': '304ffe',
'400': '5c6bc0',
'100': 'c5cae9',
'800': '283593',
'A200': '536dfe',
'500': '3f51b5'
},
'BlueGrey': {
'200': 'b0bec5',
'900': '263238',
'600': '546e7a',
'300': '90a4ae',
'700': '455a64',
'50': 'eceff1',
'400': '78909c',
'100': 'cfd8dc',
'800': '37474f',
'500': '607d8b'
},
'Brown': {
'200': 'bcaaa4',
'900': '3e2723',
'600': '6d4c41',
'300': 'a1887f',
'700': '5d4037',
'50': 'efebe9',
'400': '8d6e63',
'100': 'd7ccc8',
'800': '4e342e',
'500': '795548'
},
'LightBlue': {
'200': '81d4fa',
'900': '01579B',
'600': '039BE5',
'A100': '80d8ff',
'300': '4fc3f7',
'A400': '00B0FF',
'700': '0288D1',
'50': 'e1f5fe',
'A700': '0091EA',
'400': '29b6f6',
'100': 'b3e5fc',
'800': '0277BD',
'A200': '40c4ff',
'500': '03A9F4'
},
'Purple': {
'200': 'ce93d8',
'900': '4a148c',
'600': '8e24aa',
'A100': 'ea80fc',
'300': 'ba68c8',
'A400': 'D500F9',
'700': '7b1fa2',
'50': 'f3e5f5',
'A700': 'AA00FF',
'400': 'ab47bc',
'100': 'e1bee7',
'800': '6a1b9a',
'A200': 'e040fb',
'500': '9c27b0'
},
'Grey': {
'200': 'eeeeee',
'900': '212121',
'600': '757575',
'300': 'e0e0e0',
'700': '616161',
'50': 'fafafa',
'400': 'bdbdbd',
'100': 'f5f5f5',
'800': '424242',
'500': '9e9e9e'
},
'Yellow': {
'200': 'fff59d',
'900': 'f57f17',
'600': 'fdd835',
'A100': 'ffff8d',
'300': 'fff176',
'A400': 'FFEA00',
'700': 'fbc02d',
'50': 'fffde7',
'A700': 'FFD600',
'400': 'ffee58',
'100': 'fff9c4',
'800': 'f9a825',
'A200': 'FFFF00',
'500': 'ffeb3b'
},
'LightGreen': {
'200': 'c5e1a5',
'900': '33691e',
'600': '7cb342',
'A100': 'ccff90',
'300': 'aed581',
'A400': '76FF03',
'700': '689f38',
'50': 'f1f8e9',
'A700': '64dd17',
'400': '9ccc65',
'100': 'dcedc8',
'800': '558b2f',
'A200': 'b2ff59',
'500': '8bc34a'
},
'DeepOrange': {
'200': 'ffab91',
'900': 'bf36c',
'600': 'f4511e',
'A100': 'ff9e80',
'300': 'ff8a65',
'A400': 'FF3D00',
'700': 'e64a19',
'50': 'fbe9e7',
'A700': 'DD2C00',
'400': 'ff7043',
'100': 'ffccbc',
'800': 'd84315',
'A200': 'ff6e40',
'500': 'ff5722'
},
'Green': {
'200': 'a5d6a7',
'900': '1b5e20',
'600': '43a047',
'A100': 'b9f6ca',
'300': '81c784',
'A400': '00E676',
'700': '388e3c',
'50': 'e8f5e9',
'A700': '00C853',
'400': '66bb6a',
'100': 'c8e6c9',
'800': '2e7d32',
'A200': '69f0ae',
'500': '4caf50'
},
'Red': {
'200': 'ef9a9a',
'900': 'b71c1c',
'600': 'e53935',
'A100': 'ff8a80',
'300': 'e57373',
'A400': 'ff1744',
'700': 'd32f2f',
'50': 'ffebee',
'A700': 'd50000',
'400': 'ef5350',
'100': 'ffcdd2',
'800': 'c62828',
'A200': 'ff5252',
'500': 'f44336'
},
'Teal': {
'200': '80cbc4',
'900': '004D40',
'600': '00897B',
'A100': 'a7ffeb',
'300': '4db6ac',
'A400': '1de9b6',
'700': '00796B',
'50': 'e0f2f1',
'A700': '00BFA5',
'400': '26a69a',
'100': 'b2dfdb',
'800': '00695C',
'A200': '64ffda',
'500': '009688'
},
'Orange': {
'200': 'ffcc80',
'900': 'E65100',
'600': 'FB8C00',
'A100': 'ffd180',
'300': 'ffb74d',
'A400': 'FF9100',
'700': 'F57C00',
'50': 'fff3e0',
'A700': 'FF6D00',
'400': 'ffa726',
'100': 'ffe0b2',
'800': 'EF6C00',
'A200': 'ffab40',
'500': 'FF9800'
},
'Cyan': {
'200': '80deea',
'900': '006064',
'600': '00ACC1',
'A100': '84ffff',
'300': '4dd0e1',
'A400': '00E5FF',
'700': '0097A7',
'50': 'e0f7fa',
'A700': '00B8D4',
'400': '26c6da',
'100': 'b2ebf2',
'800': '00838F',
'A200': '18ffff',
'500': '00BCD4'
},
'Amber': {
'200': 'ffe082',
'900': 'FF6F00',
'600': 'FFB300',
'A100': 'ffe57f',
'300': 'ffd54f',
'A400': 'FFC400',
'700': 'FFA000',
'50': 'fff8e1',
'A700': 'FFAB00',
'400': 'ffca28',
'100': 'ffecb3',
'800': 'FF8F00',
'A200': 'ffd740',
'500': 'FFC107'
},
'DeepPurple': {
'200': 'b39ddb',
'900': '311b92',
'600': '5e35b1',
'A100': 'b388ff',
'300': '9575cd',
'A400': '651fff',
'700': '512da8',
'50': 'ede7f6',
'A700': '6200EA',
'400': '7e57c2',
'100': 'd1c4e9',
'800': '4527a0',
'A200': '7c4dff',
'500': '673ab7'
},
'Lime': {
'200': 'e6ee9c',
'900': '827717',
'600': 'c0ca33',
'A100': 'f4ff81',
'300': 'dce775',
'A400': 'C6FF00',
'700': 'afb42b',
'50': 'f9fbe7',
'A700': 'AEEA00',
'400': 'd4e157',
'100': 'f0f4c3',
'800': '9e9d24',
'A200': 'eeff41',
'500': 'cddc39'
},
'Light': {
'StatusBar': 'E0E0E0',
'AppBar': 'F5F5F5',
'Background': 'FAFAFA',
'CardsDialogs': 'FFFFFF',
'FlatButtonDown': 'cccccc'
},
'Dark': {
'StatusBar': '000000',
'AppBar': '212121',
'Background': '303030',
'CardsDialogs': '424242',
'FlatButtonDown': '999999'
}
}
light_colors = {
'Pink': ['50' '100', '200', 'A100'],
'Blue': ['50' '100', '200', '300', '400', 'A100'],
'Indigo': ['50' '100', '200', 'A100'],
'BlueGrey': ['50' '100', '200', '300'],
'Brown': ['50' '100', '200'],
'LightBlue': ['50' '100', '200', '300', '400', '500', 'A100', 'A200', 'A400'],
'Purple': ['50' '100', '200', 'A100'],
'Grey': ['50' '100', '200', '300', '400', '500'],
'Yellow': ['50' '100', '200', '300', '400', '500', '600', '700', '800', '900', 'A100', 'A200', 'A400', 'A700'],
'LightGreen': ['50' '100', '200', '300', '400', '500', '600', 'A100', 'A200', 'A400', 'A700'],
'DeepOrange': ['50' '100', '200', '300', '400', 'A100', 'A200'],
'Green': ['50' '100', '200', '300', '400', '500', 'A100', 'A200', 'A400', 'A700'],
'Red': ['50' '100', '200', '300', 'A100'],
'Teal': ['50' '100', '200', '300', '400', 'A100', 'A200', 'A400', 'A700'],
'Orange': ['50' '100', '200', '300', '400', '500', '600', '700', 'A100', 'A200', 'A400', 'A700'],
'Cyan': ['50' '100', '200', '300', '400', '500', '600', 'A100', 'A200', 'A400', 'A700'],
'Amber': ['50' '100', '200', '300', '400', '500', '600', '700', '800', '900', 'A100', 'A200', 'A400', 'A700'],
'DeepPurple': ['50' '100', '200', 'A100'],
'Lime': ['50' '100', '200', '300', '400', '500', '600', '700', '800', 'A100', 'A200', 'A400', 'A700'],
'Dark': [],
'Light': ['White', 'MainBackground', 'DialogBackground']
}
|
|
#!/usr/bin/python
# @lint-avoid-python-3-compatibility-imports
#
# xfsslower Trace slow XFS operations.
# For Linux, uses BCC, eBPF.
#
# USAGE: xfsslower [-h] [-j] [-p PID] [min_ms]
#
# This script traces common XFS file operations: reads, writes, opens, and
# syncs. It measures the time spent in these operations, and prints details
# for each that exceeded a threshold.
#
# WARNING: This adds low-overhead instrumentation to these XFS operations,
# including reads and writes from the file system cache. Such reads and writes
# can be very frequent (depending on the workload; eg, 1M/sec), at which
# point the overhead of this tool (even if it prints no "slower" events) can
# begin to become significant.
#
# By default, a minimum millisecond threshold of 10 is used.
#
# Copyright 2016 Netflix, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
#
# 11-Feb-2016 Brendan Gregg Created this.
# 16-Oct-2016 Dina Goldshtein -p to filter by process ID.
from __future__ import print_function
from bcc import BPF
import argparse
from time import strftime
# arguments
examples = """examples:
./xfsslower # trace operations slower than 10 ms (default)
./xfsslower 1 # trace operations slower than 1 ms
./xfsslower -j 1 # ... 1 ms, parsable output (csv)
./xfsslower 0 # trace all operations (warning: verbose)
./xfsslower -p 185 # trace PID 185 only
"""
parser = argparse.ArgumentParser(
description="Trace common XFS file operations slower than a threshold",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
parser.add_argument("-j", "--csv", action="store_true",
help="just print fields: comma-separated values")
parser.add_argument("-p", "--pid",
help="trace this PID only")
parser.add_argument("min_ms", nargs="?", default='10',
help="minimum I/O duration to trace, in ms (default 10)")
parser.add_argument("--ebpf", action="store_true",
help=argparse.SUPPRESS)
args = parser.parse_args()
min_ms = int(args.min_ms)
pid = args.pid
csv = args.csv
debug = 0
# define BPF program
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/dcache.h>
// XXX: switch these to char's when supported
#define TRACE_READ 0
#define TRACE_WRITE 1
#define TRACE_OPEN 2
#define TRACE_FSYNC 3
struct val_t {
u64 ts;
u64 offset;
struct file *fp;
};
struct data_t {
// XXX: switch some to u32's when supported
u64 ts_us;
u64 type;
u64 size;
u64 offset;
u64 delta_us;
u64 pid;
char task[TASK_COMM_LEN];
char file[DNAME_INLINE_LEN];
};
BPF_HASH(entryinfo, u64, struct val_t);
BPF_PERF_OUTPUT(events);
//
// Store timestamp and size on entry
//
// xfs_file_read_iter(), xfs_file_write_iter():
int trace_rw_entry(struct pt_regs *ctx, struct kiocb *iocb)
{
u64 id = bpf_get_current_pid_tgid();
u32 pid = id >> 32; // PID is higher part
if (FILTER_PID)
return 0;
// store filep and timestamp by id
struct val_t val = {};
val.ts = bpf_ktime_get_ns();
val.fp = iocb->ki_filp;
val.offset = iocb->ki_pos;
if (val.fp)
entryinfo.update(&id, &val);
return 0;
}
// xfs_file_open():
int trace_open_entry(struct pt_regs *ctx, struct inode *inode,
struct file *file)
{
u64 id = bpf_get_current_pid_tgid();
u32 pid = id >> 32; // PID is higher part
if (FILTER_PID)
return 0;
// store filep and timestamp by id
struct val_t val = {};
val.ts = bpf_ktime_get_ns();
val.fp = file;
val.offset = 0;
if (val.fp)
entryinfo.update(&id, &val);
return 0;
}
// xfs_file_fsync():
int trace_fsync_entry(struct pt_regs *ctx, struct file *file)
{
u64 id = bpf_get_current_pid_tgid();
u32 pid = id >> 32; // PID is higher part
if (FILTER_PID)
return 0;
// store filep and timestamp by id
struct val_t val = {};
val.ts = bpf_ktime_get_ns();
val.fp = file;
val.offset = 0;
if (val.fp)
entryinfo.update(&id, &val);
return 0;
}
//
// Output
//
static int trace_return(struct pt_regs *ctx, int type)
{
struct val_t *valp;
u64 id = bpf_get_current_pid_tgid();
u32 pid = id >> 32; // PID is higher part
valp = entryinfo.lookup(&id);
if (valp == 0) {
// missed tracing issue or filtered
return 0;
}
// calculate delta
u64 ts = bpf_ktime_get_ns();
u64 delta_us = ts - valp->ts;
entryinfo.delete(&id);
// Skip entries with backwards time: temp workaround for #728
if ((s64) delta_us < 0)
return 0;
delta_us /= 1000;
if (FILTER_US)
return 0;
// populate output struct
u32 size = PT_REGS_RC(ctx);
struct data_t data = {.type = type, .size = size, .delta_us = delta_us,
.pid = pid};
data.ts_us = ts / 1000;
data.offset = valp->offset;
bpf_get_current_comm(&data.task, sizeof(data.task));
// workaround (rewriter should handle file to d_name in one step):
struct qstr qs = valp->fp->f_path.dentry->d_name;
if (qs.len == 0)
return 0;
bpf_probe_read_kernel(&data.file, sizeof(data.file), (void *)qs.name);
// output
events.perf_submit(ctx, &data, sizeof(data));
return 0;
}
int trace_read_return(struct pt_regs *ctx)
{
return trace_return(ctx, TRACE_READ);
}
int trace_write_return(struct pt_regs *ctx)
{
return trace_return(ctx, TRACE_WRITE);
}
int trace_open_return(struct pt_regs *ctx)
{
return trace_return(ctx, TRACE_OPEN);
}
int trace_fsync_return(struct pt_regs *ctx)
{
return trace_return(ctx, TRACE_FSYNC);
}
"""
if min_ms == 0:
bpf_text = bpf_text.replace('FILTER_US', '0')
else:
bpf_text = bpf_text.replace('FILTER_US',
'delta_us <= %s' % str(min_ms * 1000))
if args.pid:
bpf_text = bpf_text.replace('FILTER_PID', 'pid != %s' % pid)
else:
bpf_text = bpf_text.replace('FILTER_PID', '0')
if debug or args.ebpf:
print(bpf_text)
if args.ebpf:
exit()
# process event
def print_event(cpu, data, size):
event = b["events"].event(data)
type = 'R'
if event.type == 1:
type = 'W'
elif event.type == 2:
type = 'O'
elif event.type == 3:
type = 'S'
if (csv):
print("%d,%s,%d,%s,%d,%d,%d,%s" % (
event.ts_us, event.task, event.pid, type, event.size,
event.offset, event.delta_us, event.file))
return
print("%-8s %-14.14s %-6s %1s %-7s %-8d %7.2f %s" % (strftime("%H:%M:%S"),
event.task, event.pid, type, event.size, event.offset / 1024,
float(event.delta_us) / 1000, event.file))
# initialize BPF
b = BPF(text=bpf_text)
# common file functions
b.attach_kprobe(event="xfs_file_read_iter", fn_name="trace_rw_entry")
b.attach_kprobe(event="xfs_file_write_iter", fn_name="trace_rw_entry")
b.attach_kprobe(event="xfs_file_open", fn_name="trace_open_entry")
b.attach_kprobe(event="xfs_file_fsync", fn_name="trace_fsync_entry")
b.attach_kretprobe(event="xfs_file_read_iter", fn_name="trace_read_return")
b.attach_kretprobe(event="xfs_file_write_iter", fn_name="trace_write_return")
b.attach_kretprobe(event="xfs_file_open", fn_name="trace_open_return")
b.attach_kretprobe(event="xfs_file_fsync", fn_name="trace_fsync_return")
# header
if (csv):
print("ENDTIME_us,TASK,PID,TYPE,BYTES,OFFSET_b,LATENCY_us,FILE")
else:
if min_ms == 0:
print("Tracing XFS operations")
else:
print("Tracing XFS operations slower than %d ms" % min_ms)
print("%-8s %-14s %-6s %1s %-7s %-8s %7s %s" % ("TIME", "COMM", "PID", "T",
"BYTES", "OFF_KB", "LAT(ms)", "FILENAME"))
# read events
b["events"].open_perf_buffer(print_event, page_cnt=64)
while 1:
try:
b.perf_buffer_poll()
except KeyboardInterrupt:
exit()
|
|
from data_set import DataSet
from flowstats import cluster
import numpy as np
import pandas as pd
import multiprocessing
def bem_cluster(input_dict):
model = cluster.DPMixtureModel(
input_dict['component_count'],
input_dict['iteration_count'],
burn_in=0,
model='bem'
)
bem_results = model.fit(
input_dict['data'],
0,
seed=input_dict['random_seed'],
munkres_id=False,
verbose=True
)
dp_mixture_iter = bem_results.get_iteration(0)
log_like = dp_mixture_iter.log_likelihood(input_dict['data'])
print log_like
true_comp_count = np.sum(bem_results.pis > 0.0001)
return {
'comp': input_dict['component_count'],
'true_comp': true_comp_count,
'seed': input_dict['random_seed'],
'log_like': log_like
}
class DataCollection(object):
"""
A collection of DataSet objects
"""
def __init__(self):
self._parameter_count = None
self.data_sets = []
@property
def data_set_count(self):
return len(self.data_sets)
def add_data_set(self, data_set):
if not isinstance(data_set, DataSet):
raise TypeError("data_set must be of type DataSet")
if self._parameter_count is None:
self._parameter_count = data_set.parameter_count
if self._parameter_count != data_set.parameter_count:
raise ValueError(
"Data set parameter count must match the existing data sets"
)
else:
self.data_sets.append(data_set)
def reset_results(self):
for ds in self.data_sets:
ds.results = None
ds.raw_results = None
def estimate_initial_conditions(self, max_comp=128, max_iter=5000):
# now run bem on the combined data set to get initial conditions
max_log_like = None # the highest value for all runs
converged = False
component_count = max_comp
iteration_count = max_iter
results = [] # will be a list of dicts to convert to a DataFrame
cpu_count = multiprocessing.cpu_count()
bem_pool = multiprocessing.Pool(processes=cpu_count)
data = np.vstack(
[np.vstack(ds.blobs.values()) for ds in self.data_sets]
)
while not converged:
print component_count
new_comp_counts = []
# set of dictionaries for this comp run, one for each seed
input_dicts = [
{
'data': data,
'component_count': component_count,
'iteration_count': iteration_count,
'random_seed': seed
} for seed in range(1, 17)
]
tmp_results_list = bem_pool.map(bem_cluster, input_dicts)
for r in tmp_results_list:
if r['log_like'] > max_log_like:
max_log_like = r['log_like']
for r in tmp_results_list:
# if the new log_like is close to the max (within 1%),
# see if there are any empty components (pi < 0.0001)
if abs(max_log_like - r['log_like']) < abs(max_log_like * 0.01):
new_comp_counts.append(r['true_comp'])
# save good run to our results
results.append(r)
if len(new_comp_counts) > 0:
if int(np.mean(new_comp_counts)) < component_count:
component_count = int(np.min(new_comp_counts))
else:
converged = True
else:
converged = True
results_df = pd.DataFrame(
results,
columns=['comp', 'true_comp', 'seed', 'log_like']
)
min_comp = results_df.comp.min()
best_index = results_df[results_df.comp == min_comp].log_like.argmax()
best_run = results[best_index]
# create a data set that's the combination of all data sets
prelim_ds = DataSet(parameter_count=self._parameter_count)
for i, ds in enumerate(self.data_sets):
# start blob labels at 1 (i + 1)
prelim_ds.add_blob(i + 1, np.vstack(ds.blobs.values()))
prelim_ds.cluster(
component_count=best_run['comp'],
burn_in=0,
iteration_count=iteration_count,
random_seed=best_run['seed'],
model='bem'
)
log_like = prelim_ds.get_log_likelihood_trace()[0]
print log_like
# get classifications to calculate weights for each data set
pis = []
for label in sorted(prelim_ds.labels):
label_classes = prelim_ds.get_classifications(0, [label])
ds_pis = []
for c in range(best_run['comp']):
ds_pis.append(np.sum(label_classes == c) / float(len(label_classes)))
pis.append(ds_pis) # list of lists
# convert LoL pis to numpy array
pis = np.array(pis)
prelim_ds.plot_classifications(0)
# Re-run a chain using the initial conditions from the last iteration
last_iter = prelim_ds.raw_results.get_iteration(0)
initial_conditions = {
'pis': pis,
'mus': last_iter.mus,
'sigmas': last_iter.sigmas
}
return best_run['comp'], initial_conditions
def cluster(
self,
component_count,
burn_in,
iteration_count,
random_seed,
initial_conditions=None
):
# local 'data_sets' holds the raw data values for each DataSet
data_sets = list()
for ds in self.data_sets:
data = np.vstack(ds.blobs.values())
if data.size == 0:
raise ValueError("Found an empty data set")
data_sets.append(data)
if len(data_sets) < 2:
# nothing for us to do
raise ValueError("HDP needs at least 2 data sets")
model = cluster.HDPMixtureModel(
component_count,
iteration_count,
burn_in
)
if initial_conditions is not None:
# should check keys of initial values, the
# shapes & values should be taken care of in FlowStats
initial_weights = initial_conditions['pis']
model.load_mu(initial_conditions['mus'])
model.load_sigma(initial_conditions['sigmas'])
else:
initial_weights = None
fitted_results = model.fit(
data_sets,
0,
seed=random_seed,
munkres_id=False,
verbose=True,
initial_weights=initial_weights
)
# save results for each DataSet
for i, ds in enumerate(self.data_sets):
ds.add_results(fitted_results[i])
return fitted_results
|
|
import sys
import asyncio
import pickle
import itertools
from dataclasses import dataclass
from typing import Any, Dict, List, Optional
import random
from ray.actor import ActorHandle
from ray.serve.common import RunningReplicaInfo
from ray.serve.long_poll import LongPollClient, LongPollNamespace
from ray.serve.utils import compute_iterable_delta, logger
import ray
from ray.util import metrics
@dataclass
class RequestMetadata:
request_id: str
endpoint: str
call_method: str = "__call__"
# This flag will be set to true if the input argument is manually pickled
# and it needs to be deserialized by the replica.
http_arg_is_pickled: bool = False
@dataclass
class Query:
args: List[Any]
kwargs: Dict[Any, Any]
metadata: RequestMetadata
class ReplicaSet:
"""Data structure representing a set of replica actor handles"""
def __init__(
self,
deployment_name,
event_loop: asyncio.AbstractEventLoop,
):
self.deployment_name = deployment_name
self.in_flight_queries: Dict[RunningReplicaInfo, set] = dict()
# The iterator used for load balancing among replicas. Using itertools
# cycle, we implements a round-robin policy, skipping overloaded
# replicas.
# NOTE(simon): We can make this more pluggable and consider different
# policies like: min load, pick min of two replicas, pick replicas on
# the same node.
self.replica_iterator = itertools.cycle(self.in_flight_queries.keys())
# Used to unblock this replica set waiting for free replicas. A newly
# added replica or updated max_concurrent_queries value means the
# query that waits on a free replica might be unblocked on.
# Python 3.8 has deprecated the 'loop' parameter, and Python 3.10 has
# removed it alltogether. Call accordingly.
if sys.version_info.major >= 3 and sys.version_info.minor >= 10:
self.config_updated_event = asyncio.Event()
else:
self.config_updated_event = asyncio.Event(loop=event_loop)
self.num_queued_queries = 0
self.num_queued_queries_gauge = metrics.Gauge(
"serve_deployment_queued_queries",
description=(
"The current number of queries to this deployment waiting"
" to be assigned to a replica."
),
tag_keys=("deployment", "endpoint"),
)
self.num_queued_queries_gauge.set_default_tags(
{"deployment": self.deployment_name}
)
def update_running_replicas(self, running_replicas: List[RunningReplicaInfo]):
added, removed, _ = compute_iterable_delta(
self.in_flight_queries.keys(), running_replicas
)
for new_replica in added:
self.in_flight_queries[new_replica] = set()
for removed_replica in removed:
# Delete it directly because shutdown is processed by controller.
del self.in_flight_queries[removed_replica]
if len(added) > 0 or len(removed) > 0:
# Shuffle the keys to avoid synchronization across clients.
replicas = list(self.in_flight_queries.keys())
random.shuffle(replicas)
self.replica_iterator = itertools.cycle(replicas)
logger.debug(f"ReplicaSet: +{len(added)}, -{len(removed)} replicas.")
self.config_updated_event.set()
def _try_assign_replica(self, query: Query) -> Optional[ray.ObjectRef]:
"""Try to assign query to a replica, return the object ref if succeeded
or return None if it can't assign this query to any replicas.
"""
for _ in range(len(self.in_flight_queries.keys())):
replica = next(self.replica_iterator)
if len(self.in_flight_queries[replica]) >= replica.max_concurrent_queries:
# This replica is overloaded, try next one
continue
logger.debug(
f"Assigned query {query.metadata.request_id} "
f"to replica {replica.replica_tag}."
)
# Directly passing args because it might contain an ObjectRef.
tracker_ref, user_ref = replica.actor_handle.handle_request.remote(
pickle.dumps(query.metadata), *query.args, **query.kwargs
)
self.in_flight_queries[replica].add(tracker_ref)
return user_ref
return None
@property
def _all_query_refs(self):
return list(itertools.chain.from_iterable(self.in_flight_queries.values()))
def _drain_completed_object_refs(self) -> int:
refs = self._all_query_refs
done, _ = ray.wait(refs, num_returns=len(refs), timeout=0)
for replica_in_flight_queries in self.in_flight_queries.values():
replica_in_flight_queries.difference_update(done)
return len(done)
async def assign_replica(self, query: Query) -> ray.ObjectRef:
"""Given a query, submit it to a replica and return the object ref.
This method will keep track of the in flight queries for each replicas
and only send a query to available replicas (determined by the
max_concurrent_quries value.)
"""
endpoint = query.metadata.endpoint
self.num_queued_queries += 1
self.num_queued_queries_gauge.set(
self.num_queued_queries, tags={"endpoint": endpoint}
)
assigned_ref = self._try_assign_replica(query)
while assigned_ref is None: # Can't assign a replica right now.
logger.debug(
"Failed to assign a replica for " f"query {query.metadata.request_id}"
)
# Maybe there exists a free replica, we just need to refresh our
# query tracker.
num_finished = self._drain_completed_object_refs()
# All replicas are really busy, wait for a query to complete or the
# config to be updated.
if num_finished == 0:
logger.debug("All replicas are busy, waiting for a free replica.")
await asyncio.wait(
self._all_query_refs + [self.config_updated_event.wait()],
return_when=asyncio.FIRST_COMPLETED,
)
if self.config_updated_event.is_set():
self.config_updated_event.clear()
# We are pretty sure a free replica is ready now, let's recurse and
# assign this query a replica.
assigned_ref = self._try_assign_replica(query)
self.num_queued_queries -= 1
self.num_queued_queries_gauge.set(
self.num_queued_queries, tags={"endpoint": endpoint}
)
return assigned_ref
class Router:
def __init__(
self,
controller_handle: ActorHandle,
deployment_name: str,
event_loop: asyncio.BaseEventLoop = None,
):
"""Router process incoming queries: assign a replica.
Args:
controller_handle(ActorHandle): The controller handle.
"""
self._event_loop = event_loop
self._replica_set = ReplicaSet(deployment_name, event_loop)
# -- Metrics Registration -- #
self.num_router_requests = metrics.Counter(
"serve_num_router_requests",
description="The number of requests processed by the router.",
tag_keys=("deployment",),
)
self.num_router_requests.set_default_tags({"deployment": deployment_name})
self.long_poll_client = LongPollClient(
controller_handle,
{
(
LongPollNamespace.RUNNING_REPLICAS,
deployment_name,
): self._replica_set.update_running_replicas,
},
call_in_event_loop=event_loop,
)
async def assign_request(
self,
request_meta: RequestMetadata,
*request_args,
**request_kwargs,
):
"""Assign a query and returns an object ref represent the result"""
self.num_router_requests.inc()
return await self._replica_set.assign_replica(
Query(
args=list(request_args),
kwargs=request_kwargs,
metadata=request_meta,
)
)
|
|
import json
from contextlib import contextmanager
from tempfile import NamedTemporaryFile
import pytest
from nose.tools import eq_
import amo
import amo.tests
from addons.models import Addon
from applications.models import AppVersion
from files.models import File
from files.utils import find_jetpacks, is_beta, PackageJSONExtractor
from versions.models import Version
pytestmark = pytest.mark.django_db
def test_is_beta():
assert not is_beta('1.2')
assert is_beta('1.2a')
assert is_beta('1.2alpha')
assert is_beta('1.2a1')
assert is_beta('1.2alpha1')
assert is_beta('1.2a123')
assert is_beta('1.2alpha123')
assert is_beta('1.2b')
assert is_beta('1.2beta')
assert is_beta('1.2b1')
assert is_beta('1.2beta1')
assert is_beta('1.2b123')
assert is_beta('1.2blpha123')
class TestFindJetpacks(amo.tests.TestCase):
fixtures = ['base/addon_3615']
def setUp(self):
super(TestFindJetpacks, self).setUp()
File.objects.update(jetpack_version='1.0')
self.file = File.objects.filter(version__addon=3615).get()
def test_success(self):
files = find_jetpacks('1.0', '1.1')
eq_(files, [self.file])
def test_skip_autorepackage(self):
Addon.objects.update(auto_repackage=False)
eq_(find_jetpacks('1.0', '1.1'), [])
def test_minver(self):
files = find_jetpacks('1.1', '1.2')
eq_(files, [self.file])
eq_(files[0].needs_upgrade, False)
def test_maxver(self):
files = find_jetpacks('.1', '1.0')
eq_(files, [self.file])
eq_(files[0].needs_upgrade, False)
def test_unreviewed_files_plus_reviewed_file(self):
# We upgrade unreviewed files up to the latest reviewed file.
v = Version.objects.create(addon_id=3615)
new_file = File.objects.create(version=v, jetpack_version='1.0')
Version.objects.create(addon_id=3615)
new_file2 = File.objects.create(version=v, jetpack_version='1.0')
eq_(new_file.status, amo.STATUS_UNREVIEWED)
eq_(new_file2.status, amo.STATUS_UNREVIEWED)
files = find_jetpacks('1.0', '1.1')
eq_(files, [self.file, new_file, new_file2])
assert all(f.needs_upgrade for f in files)
# Now self.file will not need an upgrade since we skip old versions.
new_file.update(status=amo.STATUS_PUBLIC)
files = find_jetpacks('1.0', '1.1')
eq_(files, [self.file, new_file, new_file2])
eq_(files[0].needs_upgrade, False)
assert all(f.needs_upgrade for f in files[1:])
def test_ignore_non_builder_jetpacks(self):
File.objects.update(builder_version=None)
files = find_jetpacks('.1', '1.0', from_builder_only=True)
eq_(files, [])
def test_find_builder_jetpacks_only(self):
File.objects.update(builder_version='2.0.1')
files = find_jetpacks('.1', '1.0', from_builder_only=True)
eq_(files, [self.file])
class TestPackageJSONExtractor(amo.tests.TestCase):
@contextmanager
def extractor(self, base_data):
with NamedTemporaryFile() as f:
f.write(json.dumps(base_data))
f.flush()
yield PackageJSONExtractor(f.name)
def create_appversion(self, name, version):
return AppVersion.objects.create(application=amo.APPS[name].id,
version=version)
def test_guid(self):
"""Use id for the guid."""
with self.extractor({'id': 'some-id'}) as extractor:
eq_(extractor.parse()['guid'], 'some-id')
def test_name_for_guid_if_no_id(self):
"""Use the name for the guid if there is no id."""
with self.extractor({'name': 'addon-name'}) as extractor:
eq_(extractor.parse()['guid'], 'addon-name')
def test_type(self):
"""Package.json addons are always ADDON_EXTENSION."""
with self.extractor({}) as extractor:
eq_(extractor.parse()['type'], amo.ADDON_EXTENSION)
def test_no_restart(self):
"""Package.json addons are always no-restart."""
with self.extractor({}) as extractor:
eq_(extractor.parse()['no_restart'], True)
def test_name_from_title_with_name(self):
"""Use the title for the name."""
data = {'title': 'The Addon Title', 'name': 'the-addon-name'}
with self.extractor(data) as extractor:
eq_(extractor.parse()['name'], 'The Addon Title')
def test_name_from_name_without_title(self):
"""Use the name for the name if there is no title."""
with self.extractor({'name': 'the-addon-name'}) as extractor:
eq_(extractor.parse()['name'], 'the-addon-name')
def test_version(self):
"""Use version for the version."""
with self.extractor({'version': '23.0.1'}) as extractor:
eq_(extractor.parse()['version'], '23.0.1')
def test_homepage(self):
"""Use homepage for the homepage."""
with self.extractor({'homepage': 'http://my-addon.org'}) as extractor:
eq_(extractor.parse()['homepage'], 'http://my-addon.org')
def test_summary(self):
"""Use description for the summary."""
with self.extractor({'description': 'An addon.'}) as extractor:
eq_(extractor.parse()['summary'], 'An addon.')
def test_apps(self):
"""Use engines for apps."""
firefox_version = self.create_appversion('firefox', '33.0a1')
thunderbird_version = self.create_appversion('thunderbird', '33.0a1')
data = {
'engines': {
'firefox': '>=33.0a1',
'thunderbird': '>=33.0a1',
},
}
with self.extractor(data) as extractor:
apps = extractor.parse()['apps']
apps_dict = dict((app.appdata.short, app) for app in apps)
assert sorted(apps_dict.keys()) == ['firefox', 'thunderbird']
assert apps_dict['firefox'].min == firefox_version
assert apps_dict['firefox'].max == firefox_version
assert apps_dict['thunderbird'].min == thunderbird_version
assert apps_dict['thunderbird'].max == thunderbird_version
def test_unknown_apps_are_ignored(self):
"""Unknown engines get ignored."""
self.create_appversion('firefox', '33.0a1')
self.create_appversion('thunderbird', '33.0a1')
data = {
'engines': {
'firefox': '>=33.0a1',
'thunderbird': '>=33.0a1',
'node': '>=0.10',
},
}
with self.extractor(data) as extractor:
apps = extractor.parse()['apps']
engines = [app.appdata.short for app in apps]
assert sorted(engines) == ['firefox', 'thunderbird'] # Not node.
def test_invalid_app_versions_are_ignored(self):
"""Valid engines with invalid versions are ignored."""
firefox_version = self.create_appversion('firefox', '33.0a1')
data = {
'engines': {
'firefox': '>=33.0a1',
'fennec': '>=33.0a1',
},
}
with self.extractor(data) as extractor:
apps = extractor.parse()['apps']
eq_(len(apps), 1)
eq_(apps[0].appdata.short, 'firefox')
eq_(apps[0].min, firefox_version)
eq_(apps[0].max, firefox_version)
def test_fennec_is_treated_as_android(self):
"""Treat the fennec engine as android."""
android_version = self.create_appversion('android', '33.0a1')
data = {
'engines': {
'fennec': '>=33.0a1',
'node': '>=0.10',
},
}
with self.extractor(data) as extractor:
apps = extractor.parse()['apps']
eq_(apps[0].appdata.short, 'android')
eq_(apps[0].min, android_version)
eq_(apps[0].max, android_version)
|
|
<<<<<<< HEAD
<<<<<<< HEAD
from .. import util
from . import util as import_util
import sys
import unittest
import importlib
from test import support
class ParentModuleTests:
"""Importing a submodule should import the parent modules."""
def test_import_parent(self):
with util.mock_spec('pkg.__init__', 'pkg.module') as mock:
with util.import_state(meta_path=[mock]):
module = self.__import__('pkg.module')
self.assertIn('pkg', sys.modules)
def test_bad_parent(self):
with util.mock_spec('pkg.module') as mock:
with util.import_state(meta_path=[mock]):
with self.assertRaises(ImportError) as cm:
self.__import__('pkg.module')
self.assertEqual(cm.exception.name, 'pkg')
def test_raising_parent_after_importing_child(self):
def __init__():
import pkg.module
1/0
mock = util.mock_spec('pkg.__init__', 'pkg.module',
module_code={'pkg': __init__})
with mock:
with util.import_state(meta_path=[mock]):
with self.assertRaises(ZeroDivisionError):
self.__import__('pkg')
self.assertNotIn('pkg', sys.modules)
self.assertIn('pkg.module', sys.modules)
with self.assertRaises(ZeroDivisionError):
self.__import__('pkg.module')
self.assertNotIn('pkg', sys.modules)
self.assertIn('pkg.module', sys.modules)
def test_raising_parent_after_relative_importing_child(self):
def __init__():
from . import module
1/0
mock = util.mock_spec('pkg.__init__', 'pkg.module',
module_code={'pkg': __init__})
with mock:
with util.import_state(meta_path=[mock]):
with self.assertRaises((ZeroDivisionError, ImportError)):
# This raises ImportError on the "from . import module"
# line, not sure why.
self.__import__('pkg')
self.assertNotIn('pkg', sys.modules)
with self.assertRaises((ZeroDivisionError, ImportError)):
self.__import__('pkg.module')
self.assertNotIn('pkg', sys.modules)
# XXX False
#self.assertIn('pkg.module', sys.modules)
def test_raising_parent_after_double_relative_importing_child(self):
def __init__():
from ..subpkg import module
1/0
mock = util.mock_spec('pkg.__init__', 'pkg.subpkg.__init__',
'pkg.subpkg.module',
module_code={'pkg.subpkg': __init__})
with mock:
with util.import_state(meta_path=[mock]):
with self.assertRaises((ZeroDivisionError, ImportError)):
# This raises ImportError on the "from ..subpkg import module"
# line, not sure why.
self.__import__('pkg.subpkg')
self.assertNotIn('pkg.subpkg', sys.modules)
with self.assertRaises((ZeroDivisionError, ImportError)):
self.__import__('pkg.subpkg.module')
self.assertNotIn('pkg.subpkg', sys.modules)
# XXX False
#self.assertIn('pkg.subpkg.module', sys.modules)
def test_module_not_package(self):
# Try to import a submodule from a non-package should raise ImportError.
assert not hasattr(sys, '__path__')
with self.assertRaises(ImportError) as cm:
self.__import__('sys.no_submodules_here')
self.assertEqual(cm.exception.name, 'sys.no_submodules_here')
def test_module_not_package_but_side_effects(self):
# If a module injects something into sys.modules as a side-effect, then
# pick up on that fact.
name = 'mod'
subname = name + '.b'
def module_injection():
sys.modules[subname] = 'total bunk'
mock_spec = util.mock_spec('mod',
module_code={'mod': module_injection})
with mock_spec as mock:
with util.import_state(meta_path=[mock]):
try:
submodule = self.__import__(subname)
finally:
support.unload(subname)
Frozen_ParentTests, Source_ParentTests = util.test_both(
ParentModuleTests, __import__=import_util.__import__)
if __name__ == '__main__':
unittest.main()
=======
from .. import util
from . import util as import_util
import sys
import unittest
import importlib
from test import support
class ParentModuleTests:
"""Importing a submodule should import the parent modules."""
def test_import_parent(self):
with util.mock_spec('pkg.__init__', 'pkg.module') as mock:
with util.import_state(meta_path=[mock]):
module = self.__import__('pkg.module')
self.assertIn('pkg', sys.modules)
def test_bad_parent(self):
with util.mock_spec('pkg.module') as mock:
with util.import_state(meta_path=[mock]):
with self.assertRaises(ImportError) as cm:
self.__import__('pkg.module')
self.assertEqual(cm.exception.name, 'pkg')
def test_raising_parent_after_importing_child(self):
def __init__():
import pkg.module
1/0
mock = util.mock_spec('pkg.__init__', 'pkg.module',
module_code={'pkg': __init__})
with mock:
with util.import_state(meta_path=[mock]):
with self.assertRaises(ZeroDivisionError):
self.__import__('pkg')
self.assertNotIn('pkg', sys.modules)
self.assertIn('pkg.module', sys.modules)
with self.assertRaises(ZeroDivisionError):
self.__import__('pkg.module')
self.assertNotIn('pkg', sys.modules)
self.assertIn('pkg.module', sys.modules)
def test_raising_parent_after_relative_importing_child(self):
def __init__():
from . import module
1/0
mock = util.mock_spec('pkg.__init__', 'pkg.module',
module_code={'pkg': __init__})
with mock:
with util.import_state(meta_path=[mock]):
with self.assertRaises((ZeroDivisionError, ImportError)):
# This raises ImportError on the "from . import module"
# line, not sure why.
self.__import__('pkg')
self.assertNotIn('pkg', sys.modules)
with self.assertRaises((ZeroDivisionError, ImportError)):
self.__import__('pkg.module')
self.assertNotIn('pkg', sys.modules)
# XXX False
#self.assertIn('pkg.module', sys.modules)
def test_raising_parent_after_double_relative_importing_child(self):
def __init__():
from ..subpkg import module
1/0
mock = util.mock_spec('pkg.__init__', 'pkg.subpkg.__init__',
'pkg.subpkg.module',
module_code={'pkg.subpkg': __init__})
with mock:
with util.import_state(meta_path=[mock]):
with self.assertRaises((ZeroDivisionError, ImportError)):
# This raises ImportError on the "from ..subpkg import module"
# line, not sure why.
self.__import__('pkg.subpkg')
self.assertNotIn('pkg.subpkg', sys.modules)
with self.assertRaises((ZeroDivisionError, ImportError)):
self.__import__('pkg.subpkg.module')
self.assertNotIn('pkg.subpkg', sys.modules)
# XXX False
#self.assertIn('pkg.subpkg.module', sys.modules)
def test_module_not_package(self):
# Try to import a submodule from a non-package should raise ImportError.
assert not hasattr(sys, '__path__')
with self.assertRaises(ImportError) as cm:
self.__import__('sys.no_submodules_here')
self.assertEqual(cm.exception.name, 'sys.no_submodules_here')
def test_module_not_package_but_side_effects(self):
# If a module injects something into sys.modules as a side-effect, then
# pick up on that fact.
name = 'mod'
subname = name + '.b'
def module_injection():
sys.modules[subname] = 'total bunk'
mock_spec = util.mock_spec('mod',
module_code={'mod': module_injection})
with mock_spec as mock:
with util.import_state(meta_path=[mock]):
try:
submodule = self.__import__(subname)
finally:
support.unload(subname)
Frozen_ParentTests, Source_ParentTests = util.test_both(
ParentModuleTests, __import__=import_util.__import__)
if __name__ == '__main__':
unittest.main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
from .. import util
from . import util as import_util
import sys
import unittest
import importlib
from test import support
class ParentModuleTests:
"""Importing a submodule should import the parent modules."""
def test_import_parent(self):
with util.mock_spec('pkg.__init__', 'pkg.module') as mock:
with util.import_state(meta_path=[mock]):
module = self.__import__('pkg.module')
self.assertIn('pkg', sys.modules)
def test_bad_parent(self):
with util.mock_spec('pkg.module') as mock:
with util.import_state(meta_path=[mock]):
with self.assertRaises(ImportError) as cm:
self.__import__('pkg.module')
self.assertEqual(cm.exception.name, 'pkg')
def test_raising_parent_after_importing_child(self):
def __init__():
import pkg.module
1/0
mock = util.mock_spec('pkg.__init__', 'pkg.module',
module_code={'pkg': __init__})
with mock:
with util.import_state(meta_path=[mock]):
with self.assertRaises(ZeroDivisionError):
self.__import__('pkg')
self.assertNotIn('pkg', sys.modules)
self.assertIn('pkg.module', sys.modules)
with self.assertRaises(ZeroDivisionError):
self.__import__('pkg.module')
self.assertNotIn('pkg', sys.modules)
self.assertIn('pkg.module', sys.modules)
def test_raising_parent_after_relative_importing_child(self):
def __init__():
from . import module
1/0
mock = util.mock_spec('pkg.__init__', 'pkg.module',
module_code={'pkg': __init__})
with mock:
with util.import_state(meta_path=[mock]):
with self.assertRaises((ZeroDivisionError, ImportError)):
# This raises ImportError on the "from . import module"
# line, not sure why.
self.__import__('pkg')
self.assertNotIn('pkg', sys.modules)
with self.assertRaises((ZeroDivisionError, ImportError)):
self.__import__('pkg.module')
self.assertNotIn('pkg', sys.modules)
# XXX False
#self.assertIn('pkg.module', sys.modules)
def test_raising_parent_after_double_relative_importing_child(self):
def __init__():
from ..subpkg import module
1/0
mock = util.mock_spec('pkg.__init__', 'pkg.subpkg.__init__',
'pkg.subpkg.module',
module_code={'pkg.subpkg': __init__})
with mock:
with util.import_state(meta_path=[mock]):
with self.assertRaises((ZeroDivisionError, ImportError)):
# This raises ImportError on the "from ..subpkg import module"
# line, not sure why.
self.__import__('pkg.subpkg')
self.assertNotIn('pkg.subpkg', sys.modules)
with self.assertRaises((ZeroDivisionError, ImportError)):
self.__import__('pkg.subpkg.module')
self.assertNotIn('pkg.subpkg', sys.modules)
# XXX False
#self.assertIn('pkg.subpkg.module', sys.modules)
def test_module_not_package(self):
# Try to import a submodule from a non-package should raise ImportError.
assert not hasattr(sys, '__path__')
with self.assertRaises(ImportError) as cm:
self.__import__('sys.no_submodules_here')
self.assertEqual(cm.exception.name, 'sys.no_submodules_here')
def test_module_not_package_but_side_effects(self):
# If a module injects something into sys.modules as a side-effect, then
# pick up on that fact.
name = 'mod'
subname = name + '.b'
def module_injection():
sys.modules[subname] = 'total bunk'
mock_spec = util.mock_spec('mod',
module_code={'mod': module_injection})
with mock_spec as mock:
with util.import_state(meta_path=[mock]):
try:
submodule = self.__import__(subname)
finally:
support.unload(subname)
Frozen_ParentTests, Source_ParentTests = util.test_both(
ParentModuleTests, __import__=import_util.__import__)
if __name__ == '__main__':
unittest.main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
|
|
from functools import partial
from urllib.parse import urlencode
from geopy.geocoders.base import DEFAULT_SENTINEL, Geocoder
from geopy.location import Location
from geopy.util import logger
__all__ = ("Pelias", )
class Pelias(Geocoder):
"""Pelias geocoder.
Documentation at:
https://github.com/pelias/documentation
See also :class:`geopy.geocoders.GeocodeEarth` which is a Pelias-based
service provided by the developers of Pelias itself.
"""
geocode_path = '/v1/search'
reverse_path = '/v1/reverse'
def __init__(
self,
domain,
api_key=None,
*,
timeout=DEFAULT_SENTINEL,
proxies=DEFAULT_SENTINEL,
user_agent=None,
scheme=None,
ssl_context=DEFAULT_SENTINEL,
adapter_factory=None
# Make sure to synchronize the changes of this signature in the
# inheriting classes (e.g. GeocodeEarth).
):
"""
:param str domain: Specify a domain for Pelias API.
:param str api_key: Pelias API key, optional.
:param int timeout:
See :attr:`geopy.geocoders.options.default_timeout`.
:param dict proxies:
See :attr:`geopy.geocoders.options.default_proxies`.
:param str user_agent:
See :attr:`geopy.geocoders.options.default_user_agent`.
:param str scheme:
See :attr:`geopy.geocoders.options.default_scheme`.
:type ssl_context: :class:`ssl.SSLContext`
:param ssl_context:
See :attr:`geopy.geocoders.options.default_ssl_context`.
:param callable adapter_factory:
See :attr:`geopy.geocoders.options.default_adapter_factory`.
.. versionadded:: 2.0
"""
super().__init__(
scheme=scheme,
timeout=timeout,
proxies=proxies,
user_agent=user_agent,
ssl_context=ssl_context,
adapter_factory=adapter_factory,
)
self.api_key = api_key
self.domain = domain.strip('/')
self.geocode_api = (
'%s://%s%s' % (self.scheme, self.domain, self.geocode_path)
)
self.reverse_api = (
'%s://%s%s' % (self.scheme, self.domain, self.reverse_path)
)
def geocode(
self,
query,
*,
exactly_one=True,
timeout=DEFAULT_SENTINEL,
boundary_rect=None,
country_bias=None,
language=None
):
"""
Return a location point by address.
:param str query: The address or query you wish to geocode.
:param bool exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:type boundary_rect: list or tuple of 2 items of :class:`geopy.point.Point`
or ``(latitude, longitude)`` or ``"%(latitude)s, %(longitude)s"``.
:param boundary_rect: Coordinates to restrict search within.
Example: ``[Point(22, 180), Point(-22, -180)]``.
:param str country_bias: Bias results to this country (ISO alpha-3).
:param str language: Preferred language in which to return results.
Either uses standard
`RFC2616 <http://www.ietf.org/rfc/rfc2616.txt>`_
accept-language string or a simple comma-separated
list of language codes.
:rtype: ``None``, :class:`geopy.location.Location` or a list of them, if
``exactly_one=False``.
"""
params = {'text': query}
if self.api_key:
params.update({
'api_key': self.api_key
})
if boundary_rect:
lon1, lat1, lon2, lat2 = self._format_bounding_box(
boundary_rect, "%(lon1)s,%(lat1)s,%(lon2)s,%(lat2)s").split(',')
params['boundary.rect.min_lon'] = lon1
params['boundary.rect.min_lat'] = lat1
params['boundary.rect.max_lon'] = lon2
params['boundary.rect.max_lat'] = lat2
if country_bias:
params['boundary.country'] = country_bias
if language:
params["lang"] = language
url = "?".join((self.geocode_api, urlencode(params)))
logger.debug("%s.geocode_api: %s", self.__class__.__name__, url)
callback = partial(self._parse_json, exactly_one=exactly_one)
return self._call_geocoder(url, callback, timeout=timeout)
def reverse(
self,
query,
*,
exactly_one=True,
timeout=DEFAULT_SENTINEL,
language=None
):
"""
Return an address by location point.
:param query: The coordinates for which you wish to obtain the
closest human-readable addresses.
:type query: :class:`geopy.point.Point`, list or tuple of ``(latitude,
longitude)``, or string as ``"%(latitude)s, %(longitude)s"``.
:param bool exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:param str language: Preferred language in which to return results.
Either uses standard
`RFC2616 <http://www.ietf.org/rfc/rfc2616.txt>`_
accept-language string or a simple comma-separated
list of language codes.
:rtype: ``None``, :class:`geopy.location.Location` or a list of them, if
``exactly_one=False``.
"""
try:
lat, lon = self._coerce_point_to_string(query).split(',')
except ValueError:
raise ValueError("Must be a coordinate pair or Point")
params = {
'point.lat': lat,
'point.lon': lon,
}
if language:
params['lang'] = language
if self.api_key:
params.update({
'api_key': self.api_key
})
url = "?".join((self.reverse_api, urlencode(params)))
logger.debug("%s.reverse: %s", self.__class__.__name__, url)
callback = partial(self._parse_json, exactly_one=exactly_one)
return self._call_geocoder(url, callback, timeout=timeout)
def _parse_code(self, feature):
# Parse each resource.
latitude = feature.get('geometry', {}).get('coordinates', [])[1]
longitude = feature.get('geometry', {}).get('coordinates', [])[0]
placename = feature.get('properties', {}).get('name')
return Location(placename, (latitude, longitude), feature)
def _parse_json(self, response, exactly_one):
if response is None:
return None
features = response['features']
if not len(features):
return None
if exactly_one:
return self._parse_code(features[0])
else:
return [self._parse_code(feature) for feature in features]
|
|
import re
import click
import six
from httpie.context import Environment
from httpie.core import main as httpie_main
from parsimonious.exceptions import ParseError, VisitationError
from parsimonious.grammar import Grammar
from parsimonious.nodes import NodeVisitor
from six import BytesIO
from six.moves.urllib.parse import urljoin
from .completion import ROOT_COMMANDS, ACTIONS, OPTION_NAMES, HEADER_NAMES
from .context import Context
from .utils import unescape
grammar = Grammar(r"""
command = mutation / immutation
mutation = concat_mut+ / nonconcat_mut
immutation = preview / action / help / exit / _
concat_mut = option_mut / full_quoted_mut / value_quoted_mut / unquoted_mut
nonconcat_mut = cd / rm
preview = _ tool _ (method _)? (urlpath _)? concat_mut*
action = _ method _ (urlpath _)? concat_mut*
urlpath = (~r"https?://" unquoted_string) / (!concat_mut string)
help = _ "help" _
exit = _ "exit" _
unquoted_mut = _ unquoted_mutkey mutop unquoted_mutval _
full_quoted_mut = full_squoted_mut / full_dquoted_mut
value_quoted_mut = value_squoted_mut / value_dquoted_mut
full_squoted_mut = _ "'" squoted_mutkey mutop squoted_mutval "'" _
full_dquoted_mut = _ '"' dquoted_mutkey mutop dquoted_mutval '"' _
value_squoted_mut = _ unquoted_mutkey mutop "'" squoted_mutval "'" _
value_dquoted_mut = _ unquoted_mutkey mutop '"' dquoted_mutval '"' _
mutop = ":" / "==" / "="
unquoted_mutkey = unquoted_mutkey_item+
unquoted_mutval = unquoted_stringitem*
unquoted_mutkey_item = unquoted_mutkey_char / escapeseq
unquoted_mutkey_char = ~r"[^\s'\"\\=:]"
squoted_mutkey = squoted_mutkey_item+
squoted_mutval = squoted_stringitem*
squoted_mutkey_item = squoted_mutkey_char / escapeseq
squoted_mutkey_char = ~r"[^\r\n'\\=:]"
dquoted_mutkey = dquoted_mutkey_item+
dquoted_mutval = dquoted_stringitem*
dquoted_mutkey_item = dquoted_mutkey_char / escapeseq
dquoted_mutkey_char = ~r'[^\r\n"\\=:]'
option_mut = flag_option_mut / value_option_mut
flag_option_mut = _ flag_optname _
flag_optname = "--json" / "-j" / "--form" / "-f" / "--verbose" / "-v" /
"--headers" / "-h" / "--body" / "-b" / "--stream" / "-S" /
"--download" / "-d" / "--continue" / "-c" / "--follow" /
"--check-status" / "--ignore-stdin" / "--help" /
"--version" / "--traceback" / "--debug"
value_option_mut = _ value_optname ~r"(\s+|=)" string _
value_optname = "--pretty" / "--style" / "-s" / "--print" / "-p" /
"--output" / "-o" / "--session" / "--session-read-only" /
"--auth" / "-a" / "--auth-type" / "--proxy" / "--verify" /
"--cert" / "--cert-key" / "--timeout"
cd = _ "cd" _ string _
rm = (_ "rm" _ "*" _) / (_ "rm" _ ~r"\-(h|q|b|o)" _ mutkey _)
tool = "httpie" / "curl"
method = ~r"get"i / ~r"head"i / ~r"post"i / ~r"put"i / ~r"delete"i /
~r"patch"i
mutkey = unquoted_mutkey / ("'" squoted_mutkey "'") /
('"' dquoted_mutkey '"') / flag_optname / value_optname
string = quoted_string / unquoted_string
quoted_string = ('"' dquoted_stringitem* '"') /
("'" squoted_stringitem* "'")
unquoted_string = unquoted_stringitem+
dquoted_stringitem = dquoted_stringchar / escapeseq
squoted_stringitem = squoted_stringchar / escapeseq
unquoted_stringitem = unquoted_stringchar / escapeseq
dquoted_stringchar = ~r'[^\r\n"\\]'
squoted_stringchar = ~r"[^\r\n'\\]"
unquoted_stringchar = ~r"[^\s'\"\\]"
escapeseq = ~r"\\."
_ = ~r"\s*"
""")
def urljoin2(base, path, **kwargs):
if not base.endswith('/'):
base += '/'
url = urljoin(base, path, **kwargs)
if url.endswith('/') and not path.endswith('/'):
url = url[:-1]
return url
def generate_help_text():
"""Return a formatted string listing commands, HTTPie options, and HTTP
actions.
"""
def generate_cmds_with_explanations(summary, cmds):
text = '{0}:\n'.format(summary)
for cmd, explanation in cmds:
text += '\t{0:<10}\t{1:<20}\n'.format(cmd, explanation)
return text + '\n'
text = generate_cmds_with_explanations('Commands', ROOT_COMMANDS.items())
text += generate_cmds_with_explanations('Options', OPTION_NAMES.items())
text += generate_cmds_with_explanations('Actions', ACTIONS.items())
text += generate_cmds_with_explanations('Headers', HEADER_NAMES.items())
return text
class ExecutionVisitor(NodeVisitor):
def __init__(self, context):
super(ExecutionVisitor, self).__init__()
self.context = context
self.context_override = Context(context.url)
self.method = None
self.tool = None
def visit_method(self, node, children):
self.method = node.text
return node
def visit_urlpath(self, node, children):
path = node.text
self.context_override.url = urljoin2(self.context_override.url, path)
return node
def visit_cd(self, node, children):
_, _, _, path, _ = children
self.context_override.url = urljoin2(self.context_override.url, path)
return node
def visit_rm(self, node, children):
children = children[0]
kind = children[3].text
if kind == '*':
# Clear context
for target in [self.context.headers,
self.context.querystring_params,
self.context.body_params,
self.context.options]:
target.clear()
return node
name = children[5]
if kind == '-h':
target = self.context.headers
elif kind == '-q':
target = self.context.querystring_params
elif kind == '-b':
target = self.context.body_params
else:
assert kind == '-o'
target = self.context.options
del target[name]
return node
def visit_help(self, node, children):
click.echo_via_pager(generate_help_text())
return node
def visit_exit(self, node, children):
self.context.should_exit = True
return node
def visit_mutkey(self, node, children):
if isinstance(children[0], list):
return children[0][1]
return children[0]
def _mutate(self, node, key, op, val):
if op == ':':
target = self.context_override.headers
elif op == '==':
target = self.context_override.querystring_params
elif op == '=':
target = self.context_override.body_params
target[key] = val
return node
def visit_unquoted_mut(self, node, children):
_, key, op, val, _ = children
return self._mutate(node, key, op, val)
def visit_full_squoted_mut(self, node, children):
_, _, key, op, val, _, _ = children
return self._mutate(node, key, op, val)
def visit_full_dquoted_mut(self, node, children):
_, _, key, op, val, _, _ = children
return self._mutate(node, key, op, val)
def visit_value_squoted_mut(self, node, children):
_, key, op, _, val, _, _ = children
return self._mutate(node, key, op, val)
def visit_value_dquoted_mut(self, node, children):
_, key, op, _, val, _, _ = children
return self._mutate(node, key, op, val)
def visit_unquoted_mutkey(self, node, children):
return unescape(node.text)
def visit_squoted_mutkey(self, node, children):
return node.text
def visit_dquoted_mutkey(self, node, children):
return node.text
def visit_mutop(self, node, children):
return node.text
def visit_unquoted_mutval(self, node, children):
return unescape(node.text)
def visit_squoted_mutval(self, node, children):
return node.text
def visit_dquoted_mutval(self, node, children):
return node.text
def visit_flag_option_mut(self, node, children):
_, key, _ = children
self.context_override.options[key] = None
return node
def visit_flag_optname(self, node, children):
return node.text
def visit_value_option_mut(self, node, children):
_, key, _, val, _ = children
self.context_override.options[key] = val
return node
def visit_value_optname(self, node, children):
return node.text
def visit_string(self, node, children):
return children[0]
def visit_unquoted_string(self, node, children):
return unescape(node.text)
def visit_quoted_string(self, node, children):
return node.text[1:-1]
def visit_tool(self, node, children):
self.tool = node.text
return node
def visit_mutation(self, node, children):
self.context.update(self.context_override)
return node
def _final_context(self):
context = self.context.copy()
context.update(self.context_override)
return context
def visit_immutation(self, node, children):
context = self._final_context()
child_type = children[0].expr_name
if child_type == 'preview':
if self.tool == 'httpie':
command = ['http'] + context.httpie_args(self.method,
quote=True)
else:
assert self.tool == 'curl'
command = ['curl'] + context.curl_args(self.method, quote=True)
click.echo(' '.join(command))
elif child_type == 'action':
output = BytesIO()
try:
env = Environment(stdout=output, is_windows=False)
httpie_main(context.httpie_args(self.method), env=env)
content = output.getvalue()
finally:
output.close()
# XXX: Work around a bug of click.echo_via_pager(). When you pass
# a bytestring to echo_via_pager(), it converts the bytestring with
# str(b'abc'), which makes it "b'abc'".
if six.PY2:
content = unicode(content, 'utf-8') # noqa
else:
content = str(content, 'utf-8')
click.echo_via_pager(content)
return node
def generic_visit(self, node, children):
if not node.expr_name and node.children:
if len(children) == 1:
return children[0]
return children
return node
def execute(command, context):
try:
root = grammar.parse(command)
except ParseError as err:
# TODO: Better error message
part = command[err.pos:err.pos + 10]
click.secho('Syntax error near "%s"' % part, err=True, fg='red')
else:
visitor = ExecutionVisitor(context)
try:
visitor.visit(root)
except VisitationError as err:
exc_class = err.original_class
if exc_class is KeyError:
# XXX: Need to parse VisitationError error message to get the
# original error message as VisitationError doesn't hold the
# original exception object
key = re.search(r"KeyError: u?'(.*)'", str(err)).group(1)
click.secho("Key '%s' not found" % key, err=True,
fg='red')
else:
# TODO: Better error message
click.secho(str(err), err=True, fg='red')
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.exceptions import ValidationError # noqa
from django.core.urlresolvers import reverse
from django.template import defaultfilters as filters
from django.utils.http import urlencode
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import exceptions
from horizon import forms
from horizon import tables
from keystoneclient.exceptions import Conflict # noqa
from openstack_dashboard import api
from openstack_dashboard import policy
class RescopeTokenToProject(tables.LinkAction):
name = "rescope"
verbose_name = _("Set as Active Project")
url = "switch_tenants"
def allowed(self, request, project):
# allow rescoping token to any project the user has a role on,
# authorized_tenants, and that they are not currently scoped to
return next((True for proj in request.user.authorized_tenants
if proj.id == project.id and
project.id != request.user.project_id and
project.enabled), False)
def get_link_url(self, project):
# redirects to the switch_tenants url which then will redirect
# back to this page
dash_url = reverse("horizon:identity:projects:index")
base_url = reverse(self.url, args=[project.id])
param = urlencode({"next": dash_url})
return "?".join([base_url, param])
class UpdateMembersLink(tables.LinkAction):
name = "users"
verbose_name = _("Manage Members")
url = "horizon:identity:projects:update"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("identity", "identity:list_users"),
("identity", "identity:list_roles"))
def get_link_url(self, project):
step = 'update_members'
base_url = reverse(self.url, args=[project.id])
param = urlencode({"step": step})
return "?".join([base_url, param])
def allowed(self, request, project):
if api.keystone.is_multi_domain_enabled():
# domain admin or cloud admin = True
# project admin or member = False
return api.keystone.is_domain_admin(request)
else:
return super(UpdateMembersLink, self).allowed(request, project)
class UpdateGroupsLink(tables.LinkAction):
name = "groups"
verbose_name = _("Modify Groups")
url = "horizon:identity:projects:update"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("identity", "identity:list_groups"),)
def allowed(self, request, project):
if api.keystone.is_multi_domain_enabled():
# domain admin or cloud admin = True
# project admin or member = False
return api.keystone.is_domain_admin(request)
else:
return super(UpdateGroupsLink, self).allowed(request, project)
def get_link_url(self, project):
step = 'update_group_members'
base_url = reverse(self.url, args=[project.id])
param = urlencode({"step": step})
return "?".join([base_url, param])
class UsageLink(tables.LinkAction):
name = "usage"
verbose_name = _("View Usage")
url = "horizon:identity:projects:usage"
icon = "stats"
policy_rules = (("compute", "compute_extension:simple_tenant_usage:show"),)
def allowed(self, request, project):
return request.user.is_superuser
class CreateProject(tables.LinkAction):
name = "create"
verbose_name = _("Create Project")
url = "horizon:identity:projects:create"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (('identity', 'identity:create_project'),)
def allowed(self, request, project):
if api.keystone.is_multi_domain_enabled():
# domain admin or cloud admin = True
# project admin or member = False
return api.keystone.is_domain_admin(request)
else:
return api.keystone.keystone_can_edit_project()
class UpdateProject(policy.PolicyTargetMixin, tables.LinkAction):
name = "update"
verbose_name = _("Edit Project")
url = "horizon:identity:projects:update"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (('identity', 'identity:update_project'),)
policy_target_attrs = (("target.project.domain_id", "domain_id"),)
def allowed(self, request, project):
if api.keystone.is_multi_domain_enabled():
# domain admin or cloud admin = True
# project admin or member = False
return api.keystone.is_domain_admin(request)
else:
return api.keystone.keystone_can_edit_project()
class ModifyQuotas(tables.LinkAction):
name = "quotas"
verbose_name = _("Modify Quotas")
url = "horizon:identity:projects:update"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (('compute', "compute_extension:quotas:update"),)
def allowed(self, request, datum):
if api.keystone.VERSIONS.active < 3:
return True
else:
return api.keystone.is_cloud_admin(request)
def get_link_url(self, project):
step = 'update_quotas'
base_url = reverse(self.url, args=[project.id])
param = urlencode({"step": step})
return "?".join([base_url, param])
class DeleteTenantsAction(policy.PolicyTargetMixin, tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Project",
u"Delete Projects",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted Project",
u"Deleted Projects",
count
)
policy_rules = (("identity", "identity:delete_project"),)
policy_target_attrs = ("target.project.domain_id", "domain_id"),
def allowed(self, request, project):
if api.keystone.is_multi_domain_enabled() \
and not api.keystone.is_domain_admin(request):
return False
return api.keystone.keystone_can_edit_project()
def delete(self, request, obj_id):
api.keystone.tenant_delete(request, obj_id)
def handle(self, table, request, obj_ids):
response = \
super(DeleteTenantsAction, self).handle(table, request, obj_ids)
return response
class TenantFilterAction(tables.FilterAction):
def filter(self, table, tenants, filter_string):
"""Really naive case-insensitive search."""
# FIXME(gabriel): This should be smarter. Written for demo purposes.
q = filter_string.lower()
def comp(tenant):
if q in tenant.name.lower():
return True
return False
return filter(comp, tenants)
class UpdateRow(tables.Row):
ajax = True
def get_data(self, request, project_id):
project_info = api.keystone.tenant_get(request, project_id,
admin=True)
return project_info
class UpdateCell(tables.UpdateAction):
def allowed(self, request, project, cell):
policy_rule = (("identity", "identity:update_project"),)
return (
(cell.column.name != 'enabled' or
request.user.project_id != cell.datum.id) and
api.keystone.keystone_can_edit_project() and
policy.check(policy_rule, request))
def update_cell(self, request, datum, project_id,
cell_name, new_cell_value):
# inline update project info
try:
project_obj = datum
# updating changed value by new value
setattr(project_obj, cell_name, new_cell_value)
api.keystone.tenant_update(
request,
project_id,
name=project_obj.name,
description=project_obj.description,
enabled=project_obj.enabled)
except Conflict:
# Returning a nice error message about name conflict. The message
# from exception is not that clear for the users.
message = _("This name is already taken.")
raise ValidationError(message)
except Exception:
exceptions.handle(request, ignore=True)
return False
return True
class TenantsTable(tables.DataTable):
name = tables.Column('name', verbose_name=_('Name'),
link=("horizon:identity:projects:detail"),
form_field=forms.CharField(max_length=64),
update_action=UpdateCell)
description = tables.Column(lambda obj: getattr(obj, 'description', None),
verbose_name=_('Description'),
form_field=forms.CharField(
widget=forms.Textarea(attrs={'rows': 4}),
required=False),
update_action=UpdateCell)
id = tables.Column('id', verbose_name=_('Project ID'))
enabled = tables.Column('enabled', verbose_name=_('Enabled'), status=True,
filters=(filters.yesno, filters.capfirst),
form_field=forms.BooleanField(
label=_('Enabled'),
required=False),
update_action=UpdateCell)
if api.keystone.VERSIONS.active >= 3:
domain_name = tables.Column(
'domain_name', verbose_name=_('Domain Name'))
enabled = tables.Column('enabled', verbose_name=_('Enabled'),
status=True,
filters=(filters.yesno, filters.capfirst),
form_field=forms.BooleanField(
label=_('Enabled'),
required=False),
update_action=UpdateCell)
def get_project_detail_link(self, project):
# this method is an ugly monkey patch, needed because
# the column link method does not provide access to the request
if policy.check((("identity", "identity:get_project"),),
self.request, target={"project": project}):
return reverse("horizon:identity:projects:detail",
args=(project.id,))
return None
def __init__(self, request, data=None, needs_form_wrapper=None, **kwargs):
super(TenantsTable,
self).__init__(request, data=data,
needs_form_wrapper=needs_form_wrapper,
**kwargs)
# see the comment above about ugly monkey patches
self.columns['name'].get_link_url = self.get_project_detail_link
class Meta(object):
name = "tenants"
verbose_name = _("Projects")
row_class = UpdateRow
row_actions = (UpdateMembersLink, UpdateGroupsLink, UpdateProject,
UsageLink, ModifyQuotas, DeleteTenantsAction,
RescopeTokenToProject)
table_actions = (TenantFilterAction, CreateProject,
DeleteTenantsAction)
pagination_param = "tenant_marker"
|
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import time
from django.utils.translation import ugettext as _
from desktop.lib.django_util import JsonResponse
from desktop.lib.exceptions_renderable import PopupException
from hadoop.cluster import get_defaultfs
from libsentry.api2 import get_api
from libsentry.sentry_site import get_sentry_server_admin_groups
LOG = logging.getLogger(__name__)
def fetch_authorizables(request):
if request.GET['component'] == 'solr':
resp = JsonResponse(_fetch_collections(request))
elif request.GET['component'] == 'hive':
resp = _fetch_hive_path(request)
return resp
def _fetch_hive_path(request):
from beeswax.api import autocomplete
path = request.GET['path']
database = None
table = None
column = None
if path:
database = path
if '/' in path:
database, table = path.split('/', 1)
if '.' in table:
table, column = table.split('.', 1)
resp = autocomplete(request, database, table, column)
if database and request.GET['doas'] != request.user.username:
request.GET = request.GET.copy()
request.GET['doas'] = request.GET['doas']
resp = autocomplete(request, database, table, column)
return resp
def _fetch_collections(request):
from libsolr.api import SolrApi
from search.conf import SOLR_URL
path = request.GET['path']
item = None
name = None
if path:
item = path
if '/' in path:
item, name = path.split('/')
api = SolrApi(SOLR_URL.get(), request.user)
if not item:
return {"databases": ["collections", "configs"]}
elif item and name:
return {"authorizable_link": "/indexer/#edit/%s" % name, "extended_columns": [], "columns": [], "partition_keys": []}
elif item == 'collections':
return {"tables_meta": [{"comment": None, "type": "Table", "name": col} for col in api.collections2()]}
elif item == 'configs':
return {"tables_meta": [{"comment": None, "type": "Table", "name": conf} for conf in api.configs()]}
else:
raise PopupException(_('Authorizable %s could not be retrieved') % path)
def list_sentry_roles_by_group(request):
result = {'status': -1, 'message': 'Error'}
component = request.POST['component']
try:
if request.POST['groupName']:
groupName = request.POST['groupName']
else:
# Admins can see everything, other only the groups they belong too
groupName = None if request.user.groups.filter(name__in=get_sentry_server_admin_groups()).exists() else '*'
roles = get_api(request.user, component).list_sentry_roles_by_group(groupName)
result['roles'] = sorted(roles, key=lambda role: role['name'])
result['message'] = ''
result['status'] = 0
except Exception, e:
LOG.exception("could not retrieve roles")
if "couldn't be retrieved." in str(e):
result['roles'] = []
result['status'] = 0
else:
result['message'] = unicode(str(e), "utf8")
return JsonResponse(result)
def list_sentry_privileges_by_role(request):
result = {'status': -1, 'message': 'Error'}
try:
serviceName = request.POST['server']
component = request.POST['component']
roleName = request.POST['roleName']
sentry_privileges = get_api(request.user, component).list_sentry_privileges_by_role(serviceName, roleName)
result['sentry_privileges'] = sorted(sentry_privileges, key=lambda privilege: '.'.join([auth['name'] for auth in privilege['authorizables']]))
result['message'] = ''
result['status'] = 0
except Exception, e:
LOG.exception("could not list sentry privileges")
result['message'] = unicode(str(e), "utf8")
return JsonResponse(result)
def _to_sentry_privilege(privilege):
return {
'component': privilege['component'],
'serviceName': privilege['serverName'],
'authorizables': [{'type': auth['type'], 'name': auth['name_']} for auth in privilege['authorizables']], # TODO URI {'type': 'URI', 'name': _massage_uri('/path')}
'action': '*' if privilege['action'].upper() == 'ALL' else privilege['action'],
'createTime': privilege['timestamp'],
'grantorPrincipal': privilege['grantorPrincipal'],
'grantOption': 1 if privilege['grantOption'] else 0,
}
def _hive_add_privileges(user, role, privileges, component):
api = get_api(user, component)
_privileges = []
for privilege in privileges:
if privilege['status'] not in ('deleted',):
api.alter_sentry_role_grant_privilege(role['name'], _to_sentry_privilege(privilege))
# Mocked until Sentry API returns the info. Not used currently as we refresh the whole role.
_privileges.append({
'timestamp': int(time.time()),
'database': privilege.get('dbName'),
'action': privilege.get('action'),
'scope': privilege.get('privilegeScope'),
'table': privilege.get('tableName'),
'column': privilege.get('columnName'),
'URI': privilege.get('URI'),
'server': privilege.get('serverName'),
'grantOption': privilege.get('grantOption') == 1
})
return _privileges
def _massage_uri(uri):
if uri:
if uri.startswith('hdfs:///'):
uri = uri.replace('hdfs://', get_defaultfs())
elif uri.startswith('/'):
uri = get_defaultfs() + uri
return uri
def _get_splitted_path(path):
parts = path.split('.')
db, table, column = '', '', ''
if len(parts) >= 1:
db = parts[0]
if len(parts) >= 2:
table = parts[1]
if len(parts) >= 3:
column = parts[2]
return db, table, column
def _drop_sentry_privilege(user, role, authorizable, component):
return get_api(user, component).alter_sentry_role_revoke_privilege(role['name'], _to_sentry_privilege(authorizable))
def create_role(request):
result = {'status': -1, 'message': 'Error'}
try:
role = json.loads(request.POST['role'])
component = request.POST['component']
api = get_api(request.user, component)
api.create_sentry_role(role['name'])
privileges = [privilege for privilege in role['privileges'] if privilege['status'] not in ('deleted', 'alreadydeleted')]
result['privileges'] = _hive_add_privileges(request.user, role, privileges, component)
api.alter_sentry_role_add_groups(role['name'], role['groups'])
result['role'] = {"name": role['name'], "groups": role['groups']}
result['message'] = _('Role created!')
result['status'] = 0
except Exception, e:
LOG.exception("could not create role")
result['message'] = unicode(str(e), "utf8")
return JsonResponse(result)
def update_role_groups(request):
result = {'status': -1, 'message': 'Error'}
try:
role = json.loads(request.POST['role'])
component = request.POST['component']
new_groups = set(role['groups']) - set(role['originalGroups'])
deleted_groups = set(role['originalGroups']) - set(role['groups'])
api = get_api(request.user, component)
if new_groups:
api.alter_sentry_role_add_groups(role['name'], new_groups)
if deleted_groups:
api.alter_sentry_role_delete_groups(role['name'], deleted_groups)
result['message'] = ''
result['status'] = 0
except Exception, e:
LOG.exception("could not update role groups")
result['message'] = unicode(str(e), "utf8")
return JsonResponse(result)
def save_privileges(request):
result = {'status': -1, 'message': 'Error'}
try:
role = json.loads(request.POST['role'])
component = request.POST['component']
new_privileges = [privilege for privilege in role['privilegesChanged'] if privilege['status'] == 'new']
result['privileges'] = _hive_add_privileges(request.user, role, new_privileges, component)
deleted_privileges = [privilege for privilege in role['privilegesChanged'] if privilege['status'] == 'deleted']
for privilege in deleted_privileges:
_drop_sentry_privilege(request.user, role, privilege, component)
modified_privileges = [privilege for privilege in role['privilegesChanged'] if privilege['status'] == 'modified']
old_privileges_ids = [privilege['id'] for privilege in modified_privileges]
for privilege in role['originalPrivileges']:
if privilege['id'] in old_privileges_ids:
_drop_sentry_privilege(request.user, role, privilege, component)
_hive_add_privileges(request.user, role, modified_privileges, component) # After REVOKES as REVOKE ALL removes everything.
result['message'] = _('Privileges updated')
result['status'] = 0
except Exception, e:
LOG.exception("could not save privileges")
result['message'] = unicode(str(e), "utf8")
return JsonResponse(result)
def grant_privilege(request):
result = {'status': -1, 'message': 'Error'}
try:
roleName = json.loads(request.POST['roleName'])
privilege = json.loads(request.POST['privilege'])
component = request.POST['component']
result['privileges'] = _hive_add_privileges(request.user, {'name': roleName}, [privilege], component)
result['message'] = _('Privilege granted successfully to %s.') % roleName
result['status'] = 0
except Exception, e:
LOG.exception("could not grant privileges")
result['message'] = unicode(str(e), "utf8")
return JsonResponse(result)
def create_sentry_role(request):
result = {'status': -1, 'message': 'Error'}
try:
roleName = request.POST['roleName']
component = request.POST['component']
get_api(request.user, component).create_sentry_role(roleName)
result['message'] = _('Role and privileges created.')
result['status'] = 0
except Exception, e:
LOG.exception("could not create role")
result['message'] = unicode(str(e), "utf8")
return JsonResponse(result)
def drop_sentry_role(request):
result = {'status': -1, 'message': 'Error'}
try:
roleName = request.POST['roleName']
component = request.POST['component']
get_api(request.user, component).drop_sentry_role(roleName)
result['message'] = _('Role and privileges deleted.')
result['status'] = 0
except Exception, e:
LOG.exception("could not drop role")
result['message'] = unicode(str(e), "utf8")
return JsonResponse(result)
def list_sentry_privileges_by_authorizable(request):
result = {'status': -1, 'message': 'Error'}
try:
groups = [request.POST['groupName']] if request.POST['groupName'] else None
serviceName = request.POST['server']
authorizableSet = [json.loads(request.POST['authorizableHierarchy'])]
component = request.POST['component']
_privileges = []
for authorizable, roles in get_api(request.user, component).list_sentry_privileges_by_authorizable(serviceName=serviceName, authorizableSet=authorizableSet, groups=groups):
for role, privileges in roles.iteritems():
for privilege in privileges:
privilege['roleName'] = role
_privileges.append(privilege)
result['privileges'] = sorted(_privileges, key=lambda privilege: privilege['roleName'])
result['message'] = ''
result['status'] = 0
except Exception, e:
LOG.exception("could not list privileges by authorizable")
result['message'] = unicode(str(e), "utf8")
return JsonResponse(result)
def bulk_delete_privileges(request):
result = {'status': -1, 'message': 'Error'}
try:
checkedPaths = json.loads(request.POST['checkedPaths'])
authorizableHierarchy = json.loads(request.POST['authorizableHierarchy'])
component = request.POST['component']
for path in [path['path'] for path in checkedPaths]:
db, table, column = _get_splitted_path(path)
authorizableHierarchy.update({
'db': db,
'table': table,
'column': column,
})
get_api(request.user, component).drop_sentry_privileges(authorizableHierarchy)
result['message'] = _('Privileges deleted.')
result['status'] = 0
except Exception, e:
LOG.exception("could not bulk delete privileges")
result['message'] = unicode(str(e), "utf8")
return JsonResponse(result)
def bulk_add_privileges(request):
result = {'status': -1, 'message': 'Error'}
try:
privileges = json.loads(request.POST['privileges'])
checkedPaths = json.loads(request.POST['checkedPaths'])
authorizableHierarchy = json.loads(request.POST['authorizableHierarchy'])
component = request.POST['component']
privileges = [privilege for privilege in privileges if privilege['status'] == '']
for path in [path['path'] for path in checkedPaths]:
db, table, column = _get_splitted_path(path)
privilegeScope = 'COLUMN' if column else 'TABLE' if table else 'DATABASE' if db else 'SERVER'
authorizableHierarchy.update({
'db': db,
'table': table,
'column': column,
})
for privilege in privileges:
privilege['dbName'] = db
privilege['tableName'] = table
privilege['columnName'] = column
privilege['privilegeScope'] = privilegeScope
_hive_add_privileges(request.user, {'name': privilege['roleName']}, [privilege], component)
result['message'] = _('Privileges added.')
result['status'] = 0
except Exception, e:
LOG.exception("could not bulk add privileges")
result['message'] = unicode(str(e), "utf8")
return JsonResponse(result)
def rename_sentry_privilege(request):
result = {'status': -1, 'message': 'Error'}
try:
oldAuthorizable = json.loads(request.POST['oldAuthorizable'])
newAuthorizable = json.loads(request.POST['newAuthorizable'])
component = request.POST['component']
get_api(request.user, component).rename_sentry_privilege(oldAuthorizable, newAuthorizable)
result['message'] = _('Privilege deleted.')
result['status'] = 0
except Exception, e:
LOG.exception("could not rename privilege")
result['message'] = unicode(str(e), "utf8")
return JsonResponse(result)
def list_sentry_privileges_for_provider(request):
result = {'status': -1, 'message': 'Error'}
try:
groups = json.loads(request.POST['groups'])
roleSet = json.loads(request.POST['roleSet'])
authorizableHierarchy = json.loads(request.POST['authorizableHierarchy'])
component = request.POST['component']
sentry_privileges = get_api(request.user, component).list_sentry_privileges_for_provider(groups=groups, roleSet=roleSet, authorizableHierarchy=authorizableHierarchy)
result['sentry_privileges'] = sentry_privileges
result['message'] = ''
result['status'] = 0
except Exception, e:
LOG.exception("could not list privileges for provider")
result['message'] = unicode(str(e), "utf8")
return JsonResponse(result)
|
|
import os.path
from abc import ABCMeta, abstractmethod
from functools import partial
from bistiming import SimpleTimer
import h5py
import h5sparse
from mkdir_p import mkdir_p
import numpy as np
import pandas as pd
from past.builtins import basestring
import scipy.sparse as ss
import six
from six.moves import cPickle
from .data_wrappers import PandasHDFDataset
SPARSE_FORMAT_SET = set(['csr', 'csc'])
def check_redundant_keys(result_dict_key_set, will_generate_key_set,
function_name, handler_key):
redundant_key_set = result_dict_key_set - will_generate_key_set
if len(redundant_key_set) > 0:
raise ValueError("The return keys of function {} {} have "
"redundant keys {} while generating {} {}.".format(
function_name, result_dict_key_set,
redundant_key_set, handler_key,
will_generate_key_set))
def check_exact_match_keys(result_dict_key_set, will_generate_key_set,
function_name, handler_key):
if will_generate_key_set != result_dict_key_set:
raise ValueError("The return keys of function {} {} doesn't "
"match {} while generating {}.".format(
function_name, result_dict_key_set,
will_generate_key_set, handler_key))
class DataHandler(six.with_metaclass(ABCMeta, object)):
@abstractmethod
def can_skip(self, data_key):
pass
@abstractmethod
def get(self, keys):
pass
def get_function_kwargs(self, will_generate_keys, data):
# pylint: disable=unused-argument
kwargs = {}
if len(data) > 0:
kwargs['data'] = data
return kwargs
def check_result_dict_keys(self, result_dict, will_generate_keys,
function_name, handler_key):
will_generate_key_set = set(will_generate_keys)
result_dict_key_set = set(result_dict.keys())
check_exact_match_keys(result_dict_key_set, will_generate_key_set,
function_name, handler_key)
@abstractmethod
def write_data(self, result_dict):
pass
def bundle(self, key, path, new_key):
"""Copy the data to another HDF5 file with new key."""
data = self.get(key)
with h5py.File(path) as h5f:
if ss.isspmatrix(data) or isinstance(data, h5sparse.Dataset):
h5f = h5sparse.Group(h5f)
h5f.create_dataset(new_key, data=data)
class H5pyDataHandler(DataHandler):
def __init__(self, hdf_path):
hdf_dir = os.path.dirname(hdf_path)
if hdf_dir != '':
mkdir_p(hdf_dir)
self.h5f = h5py.File(hdf_path, 'a')
def can_skip(self, data_key):
if data_key in self.h5f:
return True
return False
def get(self, key):
if isinstance(key, basestring):
return h5sparse.Group(self.h5f)[key]
return {k: h5sparse.Group(self.h5f)[k] for k in key}
def get_function_kwargs(self, will_generate_keys, data,
manually_create_dataset=False):
kwargs = {}
if len(data) > 0:
kwargs['data'] = data
if manually_create_dataset is True:
kwargs['create_dataset_functions'] = {
k: partial(self.h5f.create_dataset, k)
for k in will_generate_keys
}
elif manually_create_dataset in SPARSE_FORMAT_SET:
kwargs['create_dataset_functions'] = {
k: partial(h5sparse.Group(self.h5f).create_dataset, k)
for k in will_generate_keys
}
return kwargs
def check_result_dict_keys(self, result_dict, will_generate_keys,
function_name, handler_key,
manually_create_dataset=False):
will_generate_key_set = set(will_generate_keys)
result_dict_key_set = set(result_dict.keys())
if manually_create_dataset:
check_redundant_keys(result_dict_key_set, will_generate_key_set,
function_name, handler_key)
# TODO: check all the datasets is either manually created or in
# result_dict_key_set
else:
check_exact_match_keys(result_dict_key_set, will_generate_key_set,
function_name, handler_key)
def write_data(self, result_dict):
for key, result in six.iteritems(result_dict):
if ss.isspmatrix(result):
if np.isnan(result.data).any():
raise ValueError("data {} have nan".format(key))
elif np.isnan(result).any():
raise ValueError("data {} have nan".format(key))
with SimpleTimer("Writing generated data {} to hdf5 file"
.format(key),
end_in_new_line=False):
if key in self.h5f:
# self.h5f[key][...] = result
raise NotImplementedError("Overwriting not supported.")
else:
if (isinstance(result, ss.csc_matrix)
or isinstance(result, ss.csr_matrix)):
# sparse matrix
h5sparse.Group(self.h5f).create_dataset(key,
data=result)
else:
self.h5f.create_dataset(key, data=result)
self.h5f.flush()
class PandasHDFDataHandler(DataHandler):
def __init__(self, hdf_path):
hdf_dir = os.path.dirname(hdf_path)
if hdf_dir != '':
mkdir_p(hdf_dir)
self.hdf_store = pd.HDFStore(hdf_path)
def can_skip(self, data_key):
if data_key in self.hdf_store:
return True
return False
def get(self, key):
if isinstance(key, basestring):
return PandasHDFDataset(self.hdf_store, key)
return {k: PandasHDFDataset(self.hdf_store, k) for k in key}
def get_function_kwargs(self, will_generate_keys, data,
manually_append=False):
kwargs = {}
if len(data) > 0:
kwargs['data'] = data
if manually_append is True:
kwargs['append_functions'] = {
k: partial(self.hdf_store.append, k)
for k in will_generate_keys
}
return kwargs
def check_result_dict_keys(self, result_dict, will_generate_keys,
function_name, handler_key,
manually_append=False):
will_generate_key_set = set(will_generate_keys)
result_dict_key_set = set(result_dict.keys())
if manually_append:
check_redundant_keys(result_dict_key_set, will_generate_key_set,
function_name, handler_key)
# TODO: check all the datasets is either manually created or in
# result_dict_key_set
else:
check_exact_match_keys(result_dict_key_set, will_generate_key_set,
function_name, handler_key)
def write_data(self, result_dict):
for key, result in six.iteritems(result_dict):
is_null = False
if isinstance(result, pd.DataFrame):
if result.isnull().any().any():
is_null = True
elif isinstance(result, pd.Series):
if result.isnull().any():
is_null = True
else:
raise ValueError("PandasHDFDataHandler doesn't support type "
"{} (in key {})".format(type(result), key))
if is_null:
raise ValueError("data {} have nan".format(key))
with SimpleTimer("Writing generated data {} to hdf5 file"
.format(key),
end_in_new_line=False):
if (isinstance(result, pd.DataFrame)
and isinstance(result.index, pd.MultiIndex)
and isinstance(result.columns, pd.MultiIndex)):
self.hdf_store.put(key, result)
else:
self.hdf_store.put(key, result, format='table')
self.hdf_store.flush(fsync=True)
def bundle(self, key, path, new_key):
"""Copy the data to another HDF5 file with new key."""
data = self.get(key).value
data.to_hdf(path, new_key)
class MemoryDataHandler(DataHandler):
def __init__(self):
self.data = {}
def can_skip(self, data_key):
if data_key in self.data:
return True
return False
def get(self, key):
if isinstance(key, basestring):
return self.data[key]
return {k: self.data[k] for k in key}
def write_data(self, result_dict):
self.data.update(result_dict)
class PickleDataHandler(DataHandler):
def __init__(self, pickle_dir):
mkdir_p(pickle_dir)
self.pickle_dir = pickle_dir
def can_skip(self, data_key):
data_path = os.path.join(self.pickle_dir, data_key + ".pkl")
if os.path.exists(data_path):
return True
return False
def get(self, key):
if isinstance(key, basestring):
with open(os.path.join(self.pickle_dir, key + ".pkl"), "rb") as fp:
return cPickle.load(fp)
data = {}
for k in key:
with open(os.path.join(self.pickle_dir, k + ".pkl"), "rb") as fp:
data[k] = cPickle.load(fp)
return data
def write_data(self, result_dict):
for key, val in six.viewitems(result_dict):
pickle_path = os.path.join(self.pickle_dir, key + ".pkl")
with SimpleTimer("Writing generated data %s to pickle file" % key,
end_in_new_line=False), \
open(pickle_path, "wb") as fp:
cPickle.dump(val, fp, protocol=cPickle.HIGHEST_PROTOCOL)
|
|
import argparse
import ast
import codecs
import re
import os
import sys
from configparser import SafeConfigParser, DEFAULTSECT
from syncplay import constants, utils, version, milestone
from syncplay.messages import getMessage, setLanguage, isValidLanguage
from syncplay.players.playerFactory import PlayerFactory
from syncplay.utils import isMacOS, isWindows
class InvalidConfigValue(Exception):
def __init__(self, message):
Exception.__init__(self, message)
class ConfigurationGetter(object):
def __init__(self):
self._config = {
"host": None,
"port": constants.DEFAULT_PORT,
"name": None,
"debug": False,
"forceGuiPrompt": True,
"noGui": False,
"noStore": False,
"room": "",
"roomList": [],
"password": None,
"playerPath": None,
"perPlayerArguments": None,
"mediaSearchDirectories": None,
"sharedPlaylistEnabled": True,
"loopAtEndOfPlaylist": False,
"loopSingleFiles": False,
"onlySwitchToTrustedDomains": True,
"autosaveJoinsToList": True,
"trustedDomains": constants.DEFAULT_TRUSTED_DOMAINS,
"file": None,
"playerArgs": [],
"playerClass": None,
"slowdownThreshold": constants.DEFAULT_SLOWDOWN_KICKIN_THRESHOLD,
"rewindThreshold": constants.DEFAULT_REWIND_THRESHOLD,
"fastforwardThreshold": constants.DEFAULT_FASTFORWARD_THRESHOLD,
"rewindOnDesync": True,
"slowOnDesync": True,
"fastforwardOnDesync": True,
"dontSlowDownWithMe": False,
"filenamePrivacyMode": constants.PRIVACY_SENDRAW_MODE,
"filesizePrivacyMode": constants.PRIVACY_SENDRAW_MODE,
"pauseOnLeave": False,
"readyAtStart": False,
"unpauseAction": constants.UNPAUSE_IFOTHERSREADY_MODE,
"autoplayInitialState": None,
"autoplayMinUsers": -1,
"autoplayRequireSameFilenames": True,
"clearGUIData": False,
"language": "",
"checkForUpdatesAutomatically": None,
"lastCheckedForUpdates": "",
"resetConfig": False,
"showOSD": True,
"showOSDWarnings": True,
"showSlowdownOSD": True,
"showDifferentRoomOSD": False,
"showSameRoomOSD": True,
"showNonControllerOSD": False,
"showContactInfo": True,
"showDurationNotification": True,
"chatInputEnabled": True,
"chatInputFontFamily": 'sans-serif',
"chatInputRelativeFontSize": constants.DEFAULT_CHAT_FONT_SIZE,
"chatInputFontWeight": constants.DEFAULT_CHAT_FONT_WEIGHT,
"chatInputFontUnderline": False,
"chatInputFontColor": constants.DEFAULT_CHAT_INPUT_FONT_COLOR,
"chatInputPosition": constants.INPUT_POSITION_TOP,
"chatDirectInput": False,
"chatOutputEnabled": True,
"chatOutputFontFamily": 'sans-serif',
"chatOutputRelativeFontSize": constants.DEFAULT_CHAT_FONT_SIZE,
"chatOutputFontWeight": constants.DEFAULT_CHAT_FONT_WEIGHT,
"chatOutputFontUnderline": False,
"chatOutputMode": constants.CHATROOM_MODE,
"chatMaxLines": 7,
"chatTopMargin": 25,
"chatLeftMargin": 20,
"chatBottomMargin": 30,
"chatMoveOSD": True,
"chatOSDMargin": 110,
"notificationTimeout": 3,
"alertTimeout": 5,
"chatTimeout": 7,
"publicServers": [],
"loadPlaylistFromFile": None
}
self._defaultConfig = self._config.copy()
#
# Custom validation in self._validateArguments
#
self._required = [
"host",
"port",
"room",
"playerPath",
"playerClass",
]
self._boolean = [
"debug",
"forceGuiPrompt",
"noGui",
"noStore",
"dontSlowDownWithMe",
"pauseOnLeave",
"readyAtStart",
"autoplayRequireSameFilenames",
"clearGUIData",
"rewindOnDesync",
"slowOnDesync",
"fastforwardOnDesync",
"pauseOnLeave",
"clearGUIData",
"resetConfig",
"showOSD",
"showOSDWarnings",
"showSlowdownOSD",
"showDifferentRoomOSD",
"showSameRoomOSD",
"showNonControllerOSD",
"showDurationNotification",
"sharedPlaylistEnabled",
"loopAtEndOfPlaylist",
"loopSingleFiles",
"onlySwitchToTrustedDomains",
"autosaveJoinsToList",
"chatInputEnabled",
"chatInputFontUnderline",
"chatDirectInput",
"chatMoveOSD",
"chatOutputEnabled",
"chatOutputFontUnderline"
]
self._tristate = [
"checkForUpdatesAutomatically",
"autoplayInitialState",
]
self._serialised = [
"roomList",
"perPlayerArguments",
"mediaSearchDirectories",
"trustedDomains",
"publicServers",
]
self._numeric = [
"slowdownThreshold",
"rewindThreshold",
"fastforwardThreshold",
"autoplayMinUsers",
"chatInputRelativeFontSize",
"chatInputFontWeight",
"chatOutputFontWeight",
"chatOutputRelativeFontSize",
"chatMaxLines",
"chatTopMargin",
"chatLeftMargin",
"chatBottomMargin",
"chatOSDMargin",
"notificationTimeout",
"alertTimeout",
"chatTimeout"
]
self._hexadecimal = [
"chatInputFontColor"
]
self._iniStructure = {
"server_data": ["host", "port", "password"],
"client_settings": [
"name", "room", "roomList", "playerPath",
"perPlayerArguments", "slowdownThreshold",
"rewindThreshold", "fastforwardThreshold",
"slowOnDesync", "rewindOnDesync",
"fastforwardOnDesync", "dontSlowDownWithMe",
"forceGuiPrompt", "filenamePrivacyMode",
"filesizePrivacyMode", "unpauseAction",
"pauseOnLeave", "readyAtStart", "autoplayMinUsers",
"autoplayInitialState", "mediaSearchDirectories",
"sharedPlaylistEnabled", "loopAtEndOfPlaylist",
"loopSingleFiles",
"autoplayRequireSameFilenames",
"onlySwitchToTrustedDomains", "trustedDomains", "publicServers"],
"gui": [
"autosaveJoinsToList",
"showOSD", "showOSDWarnings", "showSlowdownOSD",
"showDifferentRoomOSD", "showSameRoomOSD",
"showNonControllerOSD", "showDurationNotification",
"chatInputEnabled", "chatInputFontUnderline",
"chatInputFontFamily", "chatInputRelativeFontSize",
"chatInputFontWeight", "chatInputFontColor",
"chatInputPosition", "chatDirectInput",
"chatOutputFontFamily", "chatOutputRelativeFontSize",
"chatOutputFontWeight", "chatOutputFontUnderline",
"chatOutputMode", "chatMaxLines",
"chatTopMargin", "chatLeftMargin",
"chatBottomMargin", "chatDirectInput",
"chatMoveOSD", "chatOSDMargin",
"notificationTimeout", "alertTimeout",
"chatTimeout", "chatOutputEnabled"],
"general": [
"language", "checkForUpdatesAutomatically",
"lastCheckedForUpdates"]
}
self._playerFactory = PlayerFactory()
def _validateArguments(self):
if self._config['resetConfig']:
language = self._config['language']
checkForUpdatesAutomatically = self._config['checkForUpdatesAutomatically']
self._config = self._defaultConfig
self._config['language'] = language
self._config['checkForUpdatesAutomatically'] = checkForUpdatesAutomatically
raise InvalidConfigValue("*" + getMessage("config-cleared-notification"))
if not isValidLanguage(self._config['language']):
self._config['language'] = ""
def _isPortValid(varToTest):
try:
if varToTest == "" or varToTest is None:
return False
if not str(varToTest).isdigit():
return False
varToTest = int(varToTest)
if varToTest > 65535 or varToTest < 1:
return False
return True
except:
return False
for key in self._boolean:
if self._config[key] == "True":
self._config[key] = True
elif self._config[key] == "False":
self._config[key] = False
for key in self._serialised:
if self._config[key] is None or self._config[key] == "":
self._config[key] = {}
elif isinstance(self._config[key], str):
self._config[key] = ast.literal_eval(self._config[key])
for key in self._tristate:
if self._config[key] == "True":
self._config[key] = True
elif self._config[key] == "False":
self._config[key] = False
elif self._config[key] == "None":
self._config[key] = None
for key in self._numeric:
self._config[key] = float(self._config[key])
for key in self._hexadecimal:
match = re.search(r'^#(?:[0-9a-fA-F]){6}$', self._config[key])
if not match:
self._config[key] = "#FFFFFF"
for key in self._required:
if key == "playerPath":
player = None
if self._config["playerPath"]:
player = self._playerFactory.getPlayerByPath(self._config["playerPath"])
if player:
self._config["playerClass"] = player
else:
raise InvalidConfigValue(getMessage("player-path-config-error"))
playerPathErrors = player.getPlayerPathErrors(
self._config["playerPath"], self._config['file'] if self._config['file'] else None)
if playerPathErrors:
raise InvalidConfigValue(playerPathErrors)
elif key == "host":
self._config["host"], self._config["port"] = self._splitPortAndHost(self._config["host"])
hostNotValid = (self._config["host"] == "" or self._config["host"] is None)
portNotValid = (_isPortValid(self._config["port"]) == False)
if hostNotValid:
raise InvalidConfigValue(getMessage("no-hostname-config-error"))
elif portNotValid:
raise InvalidConfigValue(getMessage("invalid-port-config-error"))
elif self._config[key] == "" or self._config[key] is None:
raise InvalidConfigValue(getMessage("empty-value-config-error").format(key.capitalize()))
def _overrideConfigWithArgs(self, args):
for key, val in list(vars(args).items()):
if val:
if key == "force_gui_prompt":
key = "forceGuiPrompt"
if key == "no_store":
key = "noStore"
if key == "player_path":
key = "playerPath"
if key == "_args":
key = "playerArgs"
if key == "no_gui":
key = "noGui"
if key == "clear_gui_data":
key = "clearGUIData"
if key == "load_playlist_from_file":
key = "loadPlaylistFromFile"
self._config[key] = val
def _splitPortAndHost(self, host):
port = constants.DEFAULT_PORT if not self._config["port"] else self._config["port"]
if host:
if ':' in host:
if host.count(':') == 1:
#IPv4 address or hostname, with port
host, port = host.rsplit(':', 1)
try:
port = int(port)
except ValueError:
try:
port = port.encode('ascii', 'ignore')
except:
port = ""
else:
#IPv6 address
if ']' in host:
#IPv6 address in brackets
endBracket = host.index(']')
try:
#port explicitely indicated
port = int(host[endBracket+2:])
except ValueError:
#no port after the bracket
pass
host = host[:endBracket+1]
else:
#IPv6 address with no port and no brackets
#add brackets to correctly store IPv6 addresses in configs
host = '[' + host + ']'
return host, port
def _checkForPortableFile(self):
path = utils.findWorkingDir()
for name in constants.CONFIG_NAMES:
if os.path.isfile(os.path.join(path, name)):
return os.path.join(path, name)
def _getConfigurationFilePath(self):
configFile = self._checkForPortableFile()
if configFile:
return configFile
for name in constants.CONFIG_NAMES:
configFile = self._expandConfigPath(name, xdg=False)
if os.path.isfile(configFile):
return configFile
return self._expandConfigPath()
def _expandConfigPath(self, name=None, xdg=True):
if os.name != 'nt':
if xdg:
prefix = self._getXdgConfigHome()
else:
prefix = os.getenv('HOME', '.')
else:
prefix = os.getenv('APPDATA', '.')
return os.path.join(prefix, name or constants.DEFAULT_CONFIG_NAME)
def _getXdgConfigHome(self):
path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser('~/.config'))
if not os.path.isdir(path):
os.mkdir(path, 0o700)
return path
def _parseConfigFile(self, iniPath, createConfig=True):
parser = SafeConfigParserUnicode(strict=False)
if not os.path.isfile(iniPath):
if createConfig:
open(iniPath, 'w').close()
else:
return
parser.readfp(codecs.open(iniPath, "r", "utf_8_sig"))
for section, options in list(self._iniStructure.items()):
if parser.has_section(section):
for option in options:
if parser.has_option(section, option):
self._config[option] = parser.get(section, option)
def _checkConfig(self):
try:
self._validateArguments()
except InvalidConfigValue as e:
try:
for key, value in list(self._promptForMissingArguments(e).items()):
self._config[key] = value
self._checkConfig()
except:
sys.exit()
def _promptForMissingArguments(self, error=None):
if self._config['noGui']:
if error:
print("{}!".format(error))
print(getMessage("missing-arguments-error"))
sys.exit()
else:
from syncplay.ui.GuiConfiguration import GuiConfiguration
gc = GuiConfiguration(self._config, error=error)
gc.setAvailablePaths(self._playerFactory.getAvailablePlayerPaths())
gc.run()
return gc.getProcessedConfiguration()
def __wasOptionChanged(self, parser, section, option):
if parser.has_option(section, option):
if parser.get(section, option) != str(self._config[option]):
return True
else:
return True
def _saveConfig(self, iniPath):
changed = False
if self._config['noStore']:
return
parser = SafeConfigParserUnicode(strict=False)
parser.readfp(codecs.open(iniPath, "r", "utf_8_sig"))
for section, options in list(self._iniStructure.items()):
if not parser.has_section(section):
parser.add_section(section)
changed = True
for option in options:
if self.__wasOptionChanged(parser, section, option):
changed = True
parser.set(section, option, str(self._config[option]).replace('%', '%%'))
if changed:
parser.write(codecs.open(iniPath, "wb", "utf_8_sig"))
def _forceGuiPrompt(self):
from syncplay.ui.GuiConfiguration import GuiConfiguration
try:
self._validateArguments()
except InvalidConfigValue:
pass
try:
for key, value in list(self._promptForMissingArguments().items()):
self._config[key] = value
except GuiConfiguration.WindowClosed:
sys.exit()
def __getRelativeConfigLocations(self):
locations = []
path = os.path.dirname(os.path.realpath(self._config['file']))
locations.append(path)
while path != os.path.dirname(path):
path = os.path.dirname(path)
locations.append(path)
locations.reverse()
return locations
def _loadRelativeConfiguration(self):
locations = self.__getRelativeConfigLocations()
loadedPaths = []
for location in locations:
for name in constants.CONFIG_NAMES:
path = location + os.path.sep + name
if os.path.isfile(path) and (os.name == 'nt' or path != os.path.join(os.getenv('HOME', '.'), name)):
loadedPaths.append("'{}'".format(os.path.normpath(path)))
self._parseConfigFile(path, createConfig=False)
self._checkConfig()
return loadedPaths
def getConfiguration(self):
iniPath = self._getConfigurationFilePath()
self._parseConfigFile(iniPath)
#
# Watch out for the method self._overrideConfigWithArgs when you're adding custom multi-word command line arguments
#
if self._config['language']:
setLanguage(self._config['language'])
self._argparser = argparse.ArgumentParser(
description=getMessage("argument-description"),
epilog=getMessage("argument-epilog"))
self._argparser.add_argument('--no-gui', action='store_true', help=getMessage("nogui-argument"))
self._argparser.add_argument('-a', '--host', metavar='hostname', type=str, help=getMessage("host-argument"))
self._argparser.add_argument('-n', '--name', metavar='username', type=str, help=getMessage("name-argument"))
self._argparser.add_argument('-d', '--debug', action='store_true', help=getMessage("debug-argument"))
self._argparser.add_argument('-g', '--force-gui-prompt', action='store_true', help=getMessage("force-gui-prompt-argument"))
self._argparser.add_argument('--no-store', action='store_true', help=getMessage("no-store-argument"))
self._argparser.add_argument('-r', '--room', metavar='room', type=str, nargs='?', help=getMessage("room-argument"))
self._argparser.add_argument('-p', '--password', metavar='password', type=str, nargs='?', help=getMessage("password-argument"))
self._argparser.add_argument('--player-path', metavar='path', type=str, help=getMessage("player-path-argument"))
self._argparser.add_argument('-psn', metavar='blackhole', type=str, help=argparse.SUPPRESS)
self._argparser.add_argument('--language', metavar='language', type=str, help=getMessage("language-argument"))
self._argparser.add_argument('file', metavar='file', type=str, nargs='?', help=getMessage("file-argument"))
self._argparser.add_argument('--clear-gui-data', action='store_true', help=getMessage("clear-gui-data-argument"))
self._argparser.add_argument('-v', '--version', action='store_true', help=getMessage("version-argument"))
self._argparser.add_argument('--load-playlist-from-file', metavar="loadPlaylistFromFile", type=str, help=getMessage("load-playlist-from-file-argument"))
self._argparser.add_argument('_args', metavar='options', type=str, nargs='*', help=getMessage("args-argument"))
args = self._argparser.parse_args()
if args.version:
print(getMessage("version-message").format(version, milestone))
sys.exit()
self._overrideConfigWithArgs(args)
if not self._config['noGui']:
try:
from syncplay.vendor.Qt import QtWidgets, IsPySide, IsPySide2, QtGui
from syncplay.vendor.Qt.QtCore import QCoreApplication
from syncplay.vendor import qt5reactor
if not (IsPySide2 or IsPySide):
raise ImportError
if QCoreApplication.instance() is None:
self.app = QtWidgets.QApplication(sys.argv)
if isWindows():
try:
from syncplay.vendor import darkdetect
isDarkMode = darkdetect.isDark()
except:
isDarkMode = False
if isDarkMode:
self.app.setStyle(QtWidgets.QStyleFactory.create("fusion"))
self.app.setPalette(self.getDarkPalette(QtGui))
qt5reactor.install()
if isMacOS():
import appnope
appnope.nope()
except ImportError:
try:
from twisted.trial import unittest
except Exception as e:
print(e)
print(getMessage("unable-import-twisted-error"))
sys.exit()
print(getMessage("unable-import-gui-error"))
self._config['noGui'] = True
if self._config['file'] and self._config['file'][:2] == "--":
self._config['playerArgs'].insert(0, self._config['file'])
self._config['file'] = None
# Arguments not validated yet - booleans are still text values
if self._config['language']:
setLanguage(self._config['language'])
if (self._config['forceGuiPrompt'] == "True" or not self._config['file']) and not self._config['noGui']:
self._forceGuiPrompt()
self._checkConfig()
self._saveConfig(iniPath)
if self._config['file']:
self._config['loadedRelativePaths'] = self._loadRelativeConfiguration()
if self._config['language']:
setLanguage(self._config['language'])
return self._config
def getDarkPalette(self, QtGui):
# Based on https://gist.github.com/lschmierer/443b8e21ad93e2a2d7eb#gistcomment-3503395
darkPalette = QtGui.QPalette()
darkPalette.setColor(QtGui.QPalette.Window, QtGui.QColor(53, 53, 53))
darkPalette.setColor(QtGui.QPalette.WindowText, QtGui.QColor(255, 255, 255))
darkPalette.setColor(QtGui.QPalette.Base, QtGui.QColor(35, 35, 35))
darkPalette.setColor(QtGui.QPalette.AlternateBase, QtGui.QColor(53, 53, 53))
darkPalette.setColor(QtGui.QPalette.ToolTipBase, QtGui.QColor(25, 25, 25))
darkPalette.setColor(QtGui.QPalette.ToolTipText, QtGui.QColor(255, 255, 255))
darkPalette.setColor(QtGui.QPalette.Text, QtGui.QColor(255, 255, 255))
darkPalette.setColor(QtGui.QPalette.Button, QtGui.QColor(53, 53, 53))
darkPalette.setColor(QtGui.QPalette.ButtonText, QtGui.QColor(255, 255, 255))
darkPalette.setColor(QtGui.QPalette.BrightText, QtGui.QColor(255, 0, 0))
darkPalette.setColor(QtGui.QPalette.Link, QtGui.QColor(42, 130, 218))
darkPalette.setColor(QtGui.QPalette.Highlight, QtGui.QColor(42, 130, 218))
darkPalette.setColor(QtGui.QPalette.HighlightedText, QtGui.QColor(35, 35, 35))
darkPalette.setColor(QtGui.QPalette.Active, QtGui.QPalette.Button, QtGui.QColor(53, 53, 53))
darkPalette.setColor(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, QtGui.QColor(128, 128, 128))
darkPalette.setColor(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, QtGui.QColor(128, 128, 128))
darkPalette.setColor(QtGui.QPalette.Disabled, QtGui.QPalette.Text, QtGui.QColor(128, 128, 128))
darkPalette.setColor(QtGui.QPalette.Disabled, QtGui.QPalette.Light, QtGui.QColor(53, 53, 53))
return darkPalette
def setConfigOption(self, option, value):
path = self._getConfigurationFilePath()
backup = self._config.copy()
self._parseConfigFile(path)
self._config[option] = value
backup[option] = value
self._saveConfig(path)
self._config = backup
class SafeConfigParserUnicode(SafeConfigParser):
def write(self, fp):
"""Write an .ini-format representation of the configuration state."""
if self._defaults:
fp.write("[%s]\n" % DEFAULTSECT)
for (key, value) in list(self._defaults.items()):
fp.write("%s = %s\n" % (key, str(value).replace('\n', '\n\t')))
fp.write("\n")
for section in self._sections:
fp.write("[%s]\n" % section)
for (key, value) in list(self._sections[section].items()):
if key == "__name__":
continue
if (value is not None) or (self._optcre == self.OPTCRE):
key = " = ".join((key, str(value).replace('\n', '\n\t')))
fp.write("%s\n" % key)
fp.write("\n")
|
|
############################################################################################
# QN-S3VM BFGS optimizer for semi-supervised support vector machines.
#
# This implementation provides both a L-BFGS optimization scheme
# for semi-supvised support vector machines. Details can be found in:
#
# F. Gieseke, A. Airola, T. Pahikkala, O. Kramer, Sparse quasi-
# Newton optimization for semi-supervised support vector ma-
# chines, in: Proc. of the 1st Int. Conf. on Pattern Recognition
# Applications and Methods, 2012, pp. 45-54.
#
# Version: 0.1 (September, 2012)
#
# Bugs: Please send any bugs to "f DOT gieseke AT uni-oldenburg.de"
#
#
# Copyright (C) 2012 Fabian Gieseke, Antti Airola, Tapio Pahikkala, Oliver Kramer
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# INSTALLATION and DEPENDENCIES
#
# The module should work out of the box, given Python and Numpy (http://numpy.scipy.org/)
# and Scipy (http://scipy.org/) installed correctly.
#
# We have tested the code on Ubuntu 12.04 (32 Bit) with Python 2.7.3, Numpy 1.6.1,
# and Scipy 0.9.0. Installing these packages on a Ubuntu- or Debian-based systems
# can be done via "sudo apt-get install python python-numpy python-scipy".
#
#
# RUNNING THE EXAMPLES
#
# For a description of the data sets, see the paper mentioned above and the references
# therein. Running the command "python qns3vm.py" should yield an output similar to:
#
# Sparse text data set instance
# Number of labeled patterns: 48
# Number of unlabeled patterns: 924
# Number of test patterns: 974
# Time needed to compute the model: 0.775886058807 seconds
# Classification error of QN-S3VM: 0.0667351129363
#
# Dense gaussian data set instance
# Number of labeled patterns: 25
# Number of unlabeled patterns: 225
# Number of test patterns: 250
# Time needed to compute the model: 0.464584112167 seconds
# Classification error of QN-S3VM: 0.012
#
# Dense moons data set instance
# Number of labeled patterns: 5
# Number of unlabeled patterns: 495
# Number of test patterns: 500
# Time needed to compute the model: 0.69714307785 seconds
# Classification error of QN-S3VM: 0.0
############################################################################################
import array as arr
import math
import copy as cp
import logging
import numpy as np
from numpy import *
import operator
from time import time
import sys
from scipy import optimize
import scipy.sparse.csc as csc
from scipy import sparse
import scipy
import warnings
warnings.simplefilter('error')
__author__ = 'Fabian Gieseke, Antti Airola, Tapio Pahikkala, Oliver Kramer'
__version__= '0.1'
class QN_S3VM:
"""
L-BFGS optimizer for semi-supervised support vector machines (S3VM).
"""
def __init__(self, X_l, L_l, X_u, random_generator = None, ** kw):
"""
Initializes the model. Detects automatically if dense or sparse data is provided.
Keyword arguments:
X_l -- patterns of labeled part of the data
L_l -- labels of labeled part of the data
X_u -- patterns of unlabeled part of the data
random_generator -- particular instance of a random_generator (default None)
kw -- additional parameters for the optimizer
lam -- regularization parameter lambda (default 1, must be a float > 0)
lamU -- cost parameter that determines influence of unlabeled patterns (default 1, must be float > 0)
sigma -- kernel width for RBF kernel (default 1.0, must be a float > 0)
kernel_type -- "Linear" or "RBF" (default "Linear")
numR -- implementation of subset of regressors. If None is provided, all patterns are used
(no approximation). Must fulfill 0 <= numR <= len(X_l) + len(X_u) (default None)
estimate_r -- desired ratio for positive and negative assigments for
unlabeled patterns (-1.0 <= estimate_r <= 1.0). If estimate_r=None,
then L_l is used to estimate this ratio (in case len(L_l) >=
minimum_labeled_patterns_for_estimate_r. Otherwise use estimate_r = 0.0
(default None)
minimum_labeled_patterns_for_estimate_r -- see above (default 0)
BFGS_m -- BFGS parameter (default 50)
BFGS_maxfun -- BFGS parameter, maximum number of function calls (default 500)
BFGS_factr -- BFGS parameter (default 1E12)
BFGS_pgtol -- BFGS parameter (default 1.0000000000000001e-05)
"""
self.__model = None
# Initiate model for sparse data
if isinstance(X_l, csc.csc_matrix):
self.__data_type = "sparse"
self.__model = QN_S3VM_Sparse(X_l, L_l, X_u, random_generator, ** kw)
# Initiate model for dense data
elif (isinstance(X_l[0], list)) or (isinstance(X_l[0], np.ndarray)):
self.__data_type = "dense"
self.__model = QN_S3VM_Dense(X_l, L_l, X_u, random_generator, ** kw)
# Data format unknown
if self.__model == None:
logging.info("Data format for patterns is unknown.")
sys.exit(0)
def train(self):
"""
Training phase.
Returns:
The computed partition for the unlabeled patterns.
"""
return self.__model.train()
def getPredictions(self, X, real_valued=False):
"""
Computes the predicted labels for a given set of patterns
Keyword arguments:
X -- The set of patterns
real_valued -- If True, then the real prediction values are returned
Returns:
The predictions for the list X of patterns.
"""
return self.__model.getPredictions(X, real_valued=False)
def predict(self, x):
"""
Predicts a label (-1 or +1) for the pattern
Keyword arguments:
x -- The pattern
Returns:
The prediction for x.
"""
return self.__model.predict(x)
def predictValue(self, x):
"""
Computes f(x) for a given pattern (see Representer Theorem)
Keyword arguments:
x -- The pattern
Returns:
The (real) prediction value for x.
"""
return self.__model.predictValue(x)
def getNeededFunctionCalls(self):
"""
Returns the number of function calls needed during
the optimization process.
"""
return self.__model.getNeededFunctionCalls()
def mygetPreds(self, X, real_valued=False):
return self.__model.mygetPreds(X, real_valued)
############################################################################################
############################################################################################
class QN_S3VM_Dense:
"""
BFGS optimizer for semi-supervised support vector machines (S3VM).
Dense Data
"""
parameters = {
'lam': 1,
'lamU':1,
'sigma': 1,
'kernel_type': "Linear",
'numR':None,
'estimate_r':None,
'minimum_labeled_patterns_for_estimate_r':0,
'BFGS_m':50,
'BFGS_maxfun':500,
'BFGS_factr':1E12,
'BFGS_pgtol':1.0000000000000001e-05,
'BFGS_verbose':-1,
'surrogate_s':3.0,
'surrogate_gamma':20.0,
'breakpoint_for_exp':500
}
def __init__(self, X_l, L_l, X_u, random_generator, ** kw):
"""
Intializes the S3VM optimizer.
"""
self.__random_generator = random_generator
self.__X_l, self.__X_u, self.__L_l = X_l, X_u, L_l
assert len(X_l) == len(L_l)
self.__X = cp.deepcopy(self.__X_l)
self.__X.extend(cp.deepcopy(self.__X_u))
self.__size_l, self.__size_u, self.__size_n = len(X_l), len(X_u), len(X_l) + len(X_u)
self.__matrices_initialized = False
self.__setParameters( ** kw)
self.__kw = kw
def train(self):
"""
Training phase.
Returns:
The computed partition for the unlabeled patterns.
"""
indi_opt = self.__optimize()
self.__recomputeModel(indi_opt)
predictions = self.__getTrainingPredictions(self.__X)
return predictions
def mygetPreds(self, X, real_valued=False):
KNR = self.__kernel.computeKernelMatrix(X, self.__Xreg)
KNU_bar = self.__kernel.computeKernelMatrix(X, self.__X_u_subset, symmetric=False)
KNU_bar_horizontal_sum = (1.0 / len(self.__X_u_subset)) * KNU_bar.sum(axis=1)
KNR = KNR - KNU_bar_horizontal_sum - self.__KU_barR_vertical_sum + self.__KU_barU_bar_sum
preds = KNR * self.__c[0:self.__dim-1,:] + self.__c[self.__dim-1,:]
return preds
def getPredictions(self, X, real_valued=False):
"""
Computes the predicted labels for a given set of patterns
Keyword arguments:
X -- The set of patterns
real_valued -- If True, then the real prediction values are returned
Returns:
The predictions for the list X of patterns.
"""
KNR = self.__kernel.computeKernelMatrix(X, self.__Xreg)
KNU_bar = self.__kernel.computeKernelMatrix(X, self.__X_u_subset, symmetric=False)
KNU_bar_horizontal_sum = (1.0 / len(self.__X_u_subset)) * KNU_bar.sum(axis=1)
KNR = KNR - KNU_bar_horizontal_sum - self.__KU_barR_vertical_sum + self.__KU_barU_bar_sum
preds = KNR * self.__c[0:self.__dim-1,:] + self.__c[self.__dim-1,:]
if real_valued == True:
return preds.flatten(1).tolist()[0]
else:
return np.sign(np.sign(preds)+0.1).flatten(1).tolist()[0]
def predict(self, x):
"""
Predicts a label for the pattern
Keyword arguments:
x -- The pattern
Returns:
The prediction for x.
"""
return self.getPredictions([x], real_valued=False)[0]
def predictValue(self, x):
"""
Computes f(x) for a given pattern (see Representer Theorem)
Keyword arguments:
x -- The pattern
Returns:
The (real) prediction value for x.
"""
return self.getPredictions([x], real_valued=True)[0]
def getNeededFunctionCalls(self):
"""
Returns the number of function calls needed during
the optimization process.
"""
return self.__needed_function_calls
def __setParameters(self, ** kw):
for attr, val in kw.items():
self.parameters[attr] = val
self.__lam = float(self.parameters['lam'])
assert self.__lam > 0
self.__lamU = float(self.parameters['lamU'])
assert self.__lamU > 0
self.__lam_Uvec = [float(self.__lamU)*i for i in [0,0.000001,0.0001,0.01,0.1,0.5,1]]
self.__sigma = float(self.parameters['sigma'])
assert self.__sigma > 0
self.__kernel_type = str(self.parameters['kernel_type'])
if self.parameters['numR'] != None:
self.__numR = int(self.parameters['numR'])
assert (self.__numR <= len(self.__X)) and (self.__numR > 0)
else:
self.__numR = len(self.__X)
self.__regressors_indices = sorted(self.__random_generator.sample( range(0,len(self.__X)), self.__numR ))
self.__dim = self.__numR + 1 # add bias term b
self.__minimum_labeled_patterns_for_estimate_r = float(self.parameters['minimum_labeled_patterns_for_estimate_r'])
# If reliable estimate is available or can be estimated, use it, otherwise
# assume classes to be balanced (i.e., estimate_r=0.0)
if self.parameters['estimate_r'] != None:
self.__estimate_r = float(self.parameters['estimate_r'])
elif len(self.__L_l) >= self.__minimum_labeled_patterns_for_estimate_r:
self.__estimate_r = (1.0 / len(self.__L_l)) * np.sum(self.__L_l)
else:
self.__estimate_r = 0.0
self.__BFGS_m = int(self.parameters['BFGS_m'])
self.__BFGS_maxfun = int(self.parameters['BFGS_maxfun'])
self.__BFGS_factr = float(self.parameters['BFGS_factr'])
# This is a hack for 64 bit systems (Linux). The machine precision
# is different for the BFGS optimizer (Fortran code) and we fix this by:
is_64bits = sys.maxsize > 2**32
if is_64bits:
logging.debug("64-bit system detected, modifying BFGS_factr!")
self.__BFGS_factr = 0.000488288*self.__BFGS_factr
self.__BFGS_pgtol = float(self.parameters['BFGS_pgtol'])
self.__BFGS_verbose = int(self.parameters['BFGS_verbose'])
self.__surrogate_gamma = float(self.parameters['surrogate_gamma'])
self.__s = float(self.parameters['surrogate_s'])
self.__breakpoint_for_exp = float(self.parameters['breakpoint_for_exp'])
self.__b = self.__estimate_r
# size of unlabeled patterns to estimate mean (used for balancing constraint)
self.__max_unlabeled_subset_size = 1000
def __optimize(self):
logging.debug("Starting optimization with BFGS ...")
self.__needed_function_calls = 0
self.__initializeMatrices()
# starting point
c_current = zeros(self.__dim, float64)
c_current[self.__dim-1] = self.__b
# Annealing sequence.
for i in range(len(self.__lam_Uvec)):
self.__lamU = self.__lam_Uvec[i]
# crop one dimension (in case the offset b is fixed)
c_current = c_current[:self.__dim-1]
c_current = self.__localSearch(c_current)
# reappend it if needed
c_current = np.append(c_current, self.__b)
f_opt = self.__getFitness(c_current)
return c_current, f_opt
def __localSearch(self, start):
c_opt, f_opt, d = optimize.fmin_l_bfgs_b(self.__getFitness, start, m=self.__BFGS_m, \
fprime=self.__getFitness_Prime, maxfun=self.__BFGS_maxfun, factr=self.__BFGS_factr,\
pgtol=self.__BFGS_pgtol, iprint=self.__BFGS_verbose)
self.__needed_function_calls += int(d['funcalls'])
return c_opt
def __initializeMatrices(self):
if self.__matrices_initialized == False:
logging.debug("Initializing matrices...")
# Initialize labels
x = arr.array('i')
for l in self.__L_l:
x.append(l)
self.__YL = mat(x, dtype=np.float64)
self.__YL = self.__YL.transpose()
# Initialize kernel matrices
if (self.__kernel_type == "Linear"):
self.__kernel = LinearKernel()
elif (self.__kernel_type == "RBF"):
self.__kernel = RBFKernel(self.__sigma)
self.__Xreg = (mat(self.__X)[self.__regressors_indices,:].tolist())
self.__KLR = self.__kernel.computeKernelMatrix(self.__X_l,self.__Xreg, symmetric=False)
self.__KUR = self.__kernel.computeKernelMatrix(self.__X_u,self.__Xreg, symmetric=False)
self.__KNR = cp.deepcopy(bmat([[self.__KLR], [self.__KUR]]))
self.__KRR = self.__KNR[self.__regressors_indices,:]
# Center patterns in feature space (with respect to approximated mean of unlabeled patterns in the feature space)
subset_unlabled_indices = sorted(self.__random_generator.sample( range(0,len(self.__X_u)), min(self.__max_unlabeled_subset_size, len(self.__X_u)) ))
self.__X_u_subset = (mat(self.__X_u)[subset_unlabled_indices,:].tolist())
self.__KNU_bar = self.__kernel.computeKernelMatrix(self.__X, self.__X_u_subset, symmetric=False)
self.__KNU_bar_horizontal_sum = (1.0 / len(self.__X_u_subset)) * self.__KNU_bar.sum(axis=1)
self.__KU_barR = self.__kernel.computeKernelMatrix(self.__X_u_subset, self.__Xreg, symmetric=False)
self.__KU_barR_vertical_sum = (1.0 / len(self.__X_u_subset)) * self.__KU_barR.sum(axis=0)
self.__KU_barU_bar = self.__kernel.computeKernelMatrix(self.__X_u_subset, self.__X_u_subset, symmetric=False)
self.__KU_barU_bar_sum = (1.0 / (len(self.__X_u_subset)))**2 * self.__KU_barU_bar.sum()
self.__KNR = self.__KNR - self.__KNU_bar_horizontal_sum - self.__KU_barR_vertical_sum + self.__KU_barU_bar_sum
self.__KRR = self.__KNR[self.__regressors_indices,:]
self.__KLR = self.__KNR[range(0,len(self.__X_l)),:]
self.__KUR = self.__KNR[range(len(self.__X_l),len(self.__X)),:]
self.__matrices_initialized = True
def __getFitness(self,c):
# Check whether the function is called from the bfgs solver
# (that does not optimize the offset b) or not
if len(c) == self.__dim - 1:
c = np.append(c, self.__b)
c = mat(c)
b = c[:,self.__dim-1].T
c_new = c[:,0:self.__dim-1].T
preds_labeled = self.__surrogate_gamma*(1.0 - multiply(self.__YL, self.__KLR * c_new + b))
preds_unlabeled = self.__KUR * c_new + b
# This vector has a "one" for each "numerically instable" entry; "zeros" for "good ones".
preds_labeled_conflict_indicator = np.sign(np.sign(preds_labeled/self.__breakpoint_for_exp - 1.0) + 1.0)
# This vector has a one for each good entry and zero otherwise
preds_labeled_good_indicator = (-1)*(preds_labeled_conflict_indicator - 1.0)
preds_labeled_for_conflicts = multiply(preds_labeled_conflict_indicator,preds_labeled)
preds_labeled = multiply(preds_labeled,preds_labeled_good_indicator)
# Compute values for good entries
preds_labeled_log_exp = np.log(1.0 + np.exp(preds_labeled))
# Compute values for instable entries
preds_labeled_log_exp = multiply(preds_labeled_good_indicator, preds_labeled_log_exp)
# Replace critical values with values
preds_labeled_final = preds_labeled_log_exp + preds_labeled_for_conflicts
term1 = (1.0/(self.__surrogate_gamma*self.__size_l)) * np.sum(preds_labeled_final)
preds_unlabeled_squared = multiply(preds_unlabeled,preds_unlabeled)
term2 = (float(self.__lamU)/float(self.__size_u))*np.sum(np.exp(-self.__s * preds_unlabeled_squared))
term3 = self.__lam * (c_new.T * self.__KRR * c_new)
return (term1 + term2 + term3)[0,0]
def __getFitness_Prime(self,c):
# Check whether the function is called from the bfgs solver
# (that does not optimize the offset b) or not
if len(c) == self.__dim - 1:
c = np.append(c, self.__b)
c = mat(c)
b = c[:,self.__dim-1].T
c_new = c[:,0:self.__dim-1].T
preds_labeled = self.__surrogate_gamma * (1.0 - multiply(self.__YL, self.__KLR * c_new + b))
preds_unlabeled = (self.__KUR * c_new + b)
# This vector has a "one" for each "numerically instable" entry; "zeros" for "good ones".
preds_labeled_conflict_indicator = np.sign(np.sign(preds_labeled/self.__breakpoint_for_exp - 1.0) + 1.0)
# This vector has a one for each good entry and zero otherwise
preds_labeled_good_indicator = (-1)*(preds_labeled_conflict_indicator - 1.0)
preds_labeled = multiply(preds_labeled,preds_labeled_good_indicator)
preds_labeled_exp = np.exp(preds_labeled)
term1 = multiply(preds_labeled_exp, 1.0/(1.0 + preds_labeled_exp))
term1 = multiply(preds_labeled_good_indicator, term1)
# Replace critical values with "1.0"
term1 = term1 + preds_labeled_conflict_indicator
term1 = multiply(self.__YL, term1)
preds_unlabeled_squared_exp_f = multiply(preds_unlabeled,preds_unlabeled)
preds_unlabeled_squared_exp_f = np.exp(-self.__s * preds_unlabeled_squared_exp_f)
preds_unlabeled_squared_exp_f = multiply(preds_unlabeled_squared_exp_f, preds_unlabeled)
term1 = (-1.0/self.__size_l) * (term1.T * self.__KLR).T
term2 = ((-2.0 * self.__s * self.__lamU)/float(self.__size_u)) * (preds_unlabeled_squared_exp_f.T * self.__KUR).T
term3 = 2*self.__lam*(self.__KRR * c_new)
return array((term1 + term2 + term3).T)[0]
def __recomputeModel(self, indi):
self.__c = mat(indi[0]).T
def __getTrainingPredictions(self, X, real_valued=False):
preds = self.__KNR * self.__c[0:self.__dim-1,:] + self.__c[self.__dim-1,:]
if real_valued == True:
return preds.flatten(1).tolist()[0]
else:
return np.sign(np.sign(preds)+0.1).flatten(1).tolist()[0]
def __check_matrix(self, M):
smallesteval = scipy.linalg.eigvalsh(M, eigvals=(0,0))[0]
if smallesteval < 0.0:
shift = abs(smallesteval) + 0.0000001
M = M + shift
return M
############################################################################################
############################################################################################
class QN_S3VM_Sparse:
"""
BFGS optimizer for semi-supervised support vector machines (S3VM).
Sparse Data
"""
parameters = {
'lam': 1,
'lamU':1,
'estimate_r':None,
'minimum_labeled_patterns_for_estimate_r':0,
'BFGS_m':50,
'BFGS_maxfun':500,
'BFGS_factr':1E12,
'BFGS_pgtol':1.0000000000000001e-05,
'BFGS_verbose':-1,
'surrogate_s':3.0,
'surrogate_gamma':20.0,
'breakpoint_for_exp':500
}
def __init__(self, X_l, L_l, X_u, random_generator, ** kw):
"""
Intializes the S3VM optimizer.
"""
self.__random_generator = random_generator
# This is a nuisance, but we may need to pad extra dimensions to either X_l or X_u
# in case the highest feature indices appear only in one of the two data matrices
if X_l.shape[1] > X_u.shape[1]:
X_u = sparse.hstack([X_u, sparse.coo_matrix(X_u.shape[0], X_l.shape[1] - X_u.shape[1])])
elif X_l.shape[1] < X_u.shape[1]:
X_l = sparse.hstack([X_l, sparse.coo_matrix(X_l.shape[0], X_u.shape[1] - X_u.shape[1])])
# We vertically stack the data matrices into one big matrix
X = sparse.vstack([X_l, X_u])
self.__size_l, self.__size_u, self.__size_n = X_l.shape[0], X_u.shape[0], X_l.shape[0]+ X_u.shape[0]
x = arr.array('i')
for l in L_l:
x.append(int(l))
self.__YL = mat(x, dtype=np.float64)
self.__YL = self.__YL.transpose()
self.__setParameters( ** kw)
self.__kw = kw
self.X_l = X_l.tocsr()
self.X_u = X_u.tocsr()
self.X = X.tocsr()
# compute mean of unlabeled patterns
self.__mean_u = self.X_u.mean(axis=0)
self.X_u_T = X_u.tocsc().T
self.X_l_T = X_l.tocsc().T
self.X_T = X.tocsc().T
def train(self):
"""
Training phase.
Returns:
The computed partition for the unlabeled patterns.
"""
indi_opt = self.__optimize()
self.__recomputeModel(indi_opt)
predictions = self.getPredictions(self.X)
return predictions
def getPredictions(self, X, real_valued=False):
"""
Computes the predicted labels for a given set of patterns
Keyword arguments:
X -- The set of patterns
real_valued -- If True, then the real prediction values are returned
Returns:
The predictions for the list X of patterns.
"""
c_new = self.__c[:self.__dim-1]
W = self.X.T*c_new - self.__mean_u.T*np.sum(c_new)
# Again, possibility of dimension mismatch due to use of sparse matrices
if X.shape[1] > W.shape[0]:
X = X[:,range(W.shape[0])]
if X.shape[1] < W.shape[0]:
W = W[range(X.shape[1])]
X = X.tocsc()
preds = X * W + self.__b
if real_valued == True:
return preds.flatten(1).tolist()[0]
else:
return np.sign(np.sign(preds)+0.1).flatten(1).tolist()[0]
def predict(self, x):
"""
Predicts a label for the pattern
Keyword arguments:
x -- The pattern
Returns:
The prediction for x.
"""
return self.getPredictions([x], real_valued=False)[0]
def predictValue(self, x):
"""
Computes f(x) for a given pattern (see Representer Theorem)
Keyword arguments:
x -- The pattern
Returns:
The (real) prediction value for x.
"""
return self.getPredictions([x], real_valued=True)[0]
def getNeededFunctionCalls(self):
"""
Returns the number of function calls needed during
the optimization process.
"""
return self.__needed_function_calls
def __setParameters(self, ** kw):
for attr, val in kw.items():
self.parameters[attr] = val
self.__lam = float(self.parameters['lam'])
assert self.__lam > 0
self.__lamU = float(self.parameters['lamU'])
assert self.__lamU > 0
self.__lam_Uvec = [float(self.__lamU)*i for i in [0,0.000001,0.0001,0.01,0.1,0.5,1]]
self.__minimum_labeled_patterns_for_estimate_r = float(self.parameters['minimum_labeled_patterns_for_estimate_r'])
# If reliable estimate is available or can be estimated, use it, otherwise
# assume classes to be balanced (i.e., estimate_r=0.0)
if self.parameters['estimate_r'] != None:
self.__estimate_r = float(self.parameters['estimate_r'])
elif self.__YL.shape[0] > self.__minimum_labeled_patterns_for_estimate_r:
self.__estimate_r = (1.0 / self.__YL.shape[0]) * np.sum(self.__YL[0:])
else:
self.__estimate_r = 0.0
self.__dim = self.__size_n + 1 # for offset term b
self.__BFGS_m = int(self.parameters['BFGS_m'])
self.__BFGS_maxfun = int(self.parameters['BFGS_maxfun'])
self.__BFGS_factr = float(self.parameters['BFGS_factr'])
# This is a hack for 64 bit systems (Linux). The machine precision
# is different for the BFGS optimizer (Fortran code) and we fix this by:
is_64bits = sys.maxsize > 2**32
if is_64bits:
logging.debug("64-bit system detected, modifying BFGS_factr!")
self.__BFGS_factr = 0.000488288*self.__BFGS_factr
self.__BFGS_pgtol = float(self.parameters['BFGS_pgtol'])
self.__BFGS_verbose = int(self.parameters['BFGS_verbose'])
self.__surrogate_gamma = float(self.parameters['surrogate_gamma'])
self.__s = float(self.parameters['surrogate_s'])
self.__breakpoint_for_exp = float(self.parameters['breakpoint_for_exp'])
self.__b = self.__estimate_r
def __optimize(self):
logging.debug("Starting optimization with BFGS ...")
self.__needed_function_calls = 0
# starting_point
c_current = zeros(self.__dim, float64)
c_current[self.__dim-1] = self.__b
# Annealing sequence.
for i in range(len(self.__lam_Uvec)):
self.__lamU = self.__lam_Uvec[i]
# crop one dimension (in case the offset b is fixed)
c_current = c_current[:self.__dim-1]
c_current = self.__localSearch(c_current)
# reappend it if needed
c_current = np.append(c_current, self.__b)
f_opt = self.__getFitness(c_current)
return c_current, f_opt
def __localSearch(self, start):
c_opt, f_opt, d = optimize.fmin_l_bfgs_b(self.__getFitness, start, m=self.__BFGS_m, \
fprime=self.__getFitness_Prime, maxfun=self.__BFGS_maxfun,\
factr=self.__BFGS_factr, pgtol=self.__BFGS_pgtol, iprint=self.__BFGS_verbose)
self.__needed_function_calls += int(d['funcalls'])
return c_opt
def __getFitness(self,c):
# check whether the function is called from the bfgs solver
# (that does not optimize the offset b) or not
if len(c) == self.__dim - 1:
c = np.append(c, self.__b)
c = mat(c)
b = c[:,self.__dim-1].T
c_new = c[:,0:self.__dim-1].T
c_new_sum = np.sum(c_new)
XTc = self.X_T*c_new - self.__mean_u.T*c_new_sum
preds_labeled = self.__surrogate_gamma*(1.0 - multiply(self.__YL, (self.X_l*XTc - self.__mean_u*XTc) + b[0,0]))
preds_unlabeled = (self.X_u*XTc - self.__mean_u*XTc) + b[0,0]
# This vector has a "one" for each "numerically instable" entry; "zeros" for "good ones".
preds_labeled_conflict_indicator = np.sign(np.sign(preds_labeled/self.__breakpoint_for_exp - 1.0) + 1.0)
# This vector has a one for each good entry and zero otherwise
preds_labeled_good_indicator = (-1)*(preds_labeled_conflict_indicator - 1.0)
preds_labeled_for_conflicts = multiply(preds_labeled_conflict_indicator,preds_labeled)
preds_labeled = multiply(preds_labeled,preds_labeled_good_indicator)
# Compute values for good entries
preds_labeled_log_exp = np.log(1.0 + np.exp(preds_labeled))
# Compute values for instable entries
preds_labeled_log_exp = multiply(preds_labeled_good_indicator, preds_labeled_log_exp)
# Replace critical values with values
preds_labeled_final = preds_labeled_log_exp + preds_labeled_for_conflicts
term1 = (1.0/(self.__surrogate_gamma*self.__size_l)) * np.sum(preds_labeled_final)
preds_unlabeled_squared = multiply(preds_unlabeled,preds_unlabeled)
term2 = (float(self.__lamU)/float(self.__size_u))*np.sum(np.exp(-self.__s * preds_unlabeled_squared))
term3 = self.__lam * c_new.T * (self.X * XTc - self.__mean_u*XTc)
return (term1 + term2 + term3)[0,0]
def __getFitness_Prime(self,c):
# check whether the function is called from the bfgs solver
# (that does not optimize the offset b) or not
if len(c) == self.__dim - 1:
c = np.append(c, self.__b)
c = mat(c)
b = c[:,self.__dim-1].T
c_new = c[:,0:self.__dim-1].T
c_new_sum = np.sum(c_new)
XTc = self.X_T*c_new - self.__mean_u.T*c_new_sum
preds_labeled = self.__surrogate_gamma*(1.0 - multiply(self.__YL, (self.X_l*XTc -self.__mean_u*XTc) + b[0,0]))
preds_unlabeled = (self.X_u*XTc - self.__mean_u*XTc )+ b[0,0]
preds_labeled_conflict_indicator = np.sign(np.sign(preds_labeled/self.__breakpoint_for_exp - 1.0) + 1.0)
# This vector has a one for each good entry and zero otherwise
preds_labeled_good_indicator = (-1)*(preds_labeled_conflict_indicator - 1.0)
preds_labeled = multiply(preds_labeled,preds_labeled_good_indicator)
preds_labeled_exp = np.exp(preds_labeled)
term1 = multiply(preds_labeled_exp, 1.0/(1.0 + preds_labeled_exp))
term1 = multiply(preds_labeled_good_indicator, term1)
# Replace critical values with "1.0"
term1 = term1 + preds_labeled_conflict_indicator
term1 = multiply(self.__YL, term1)
preds_unlabeled_squared_exp_f = multiply(preds_unlabeled,preds_unlabeled)
preds_unlabeled_squared_exp_f = np.exp(-self.__s * preds_unlabeled_squared_exp_f)
preds_unlabeled_squared_exp_f = multiply(preds_unlabeled_squared_exp_f, preds_unlabeled)
term1_sum = np.sum(term1)
tmp = self.X_l_T * term1 - self.__mean_u.T*term1_sum
term1 = (-1.0/self.__size_l) * (self.X * tmp - self.__mean_u*tmp)
preds_unlabeled_squared_exp_f_sum = np.sum(preds_unlabeled_squared_exp_f)
tmp_unlabeled = self.X_u_T * preds_unlabeled_squared_exp_f - self.__mean_u.T * preds_unlabeled_squared_exp_f_sum
term2 = ((-2.0 * self.__s * self.__lamU)/float(self.__size_u)) * (self.X * tmp_unlabeled - self.__mean_u*tmp_unlabeled)
XTc_sum = np.sum(XTc)
term3 = 2*self.__lam*(self.X * XTc - self.__mean_u*XTc)
return array((term1 + term2 + term3).T)[0]
def __recomputeModel(self, indi):
self.__c = mat(indi[0]).T
############################################################################################
############################################################################################
class LinearKernel():
"""
Linear Kernel
"""
def __init__(self):
pass
def computeKernelMatrix(self, data1, data2, symmetric=False):
"""
Computes the kernel matrix
"""
logging.debug("Starting Linear Kernel Matrix Computation...")
self._data1 = mat(data1)
self._data2 = mat(data2)
assert self._data1.shape[1] == (self._data2.T).shape[0]
try:
return self._data1 * self._data2.T
except Exception as e:
logging.error("Error while computing kernel matrix: " + str(e))
import traceback
traceback.print_exc()
sys.exit()
logging.debug("Kernel Matrix computed...")
def getKernelValue(self, xi, xj):
"""
Returns a single kernel value.
"""
xi = array(xi)
xj = array(xj)
val = dot(xi, xj)
return val
class DictLinearKernel():
"""
Linear Kernel (for dictionaries)
"""
def __init__(self):
pass
def computeKernelMatrix(self, data1, data2, symmetric=False):
"""
Computes the kernel matrix
"""
logging.debug("Starting Linear Kernel Matrix Computation...")
self._data1 = data1
self._data2 = data2
self._dim1 = len(data1)
self._dim2 = len(data2)
self._symmetric = symmetric
self.__km = None
try:
km = mat(zeros((self._dim1, self._dim2), dtype=float64))
if self._symmetric:
for i in range(self._dim1):
message = 'Kernel Matrix Progress: %dx%d/%dx%d' % (i, self._dim2,self._dim1,self._dim2)
logging.debug(message)
for j in range(i, self._dim2):
val = self.getKernelValue(self._data1[i], self._data2[j])
km[i, j] = val
km[j, i] = val
return km
else:
for i in range(self._dim1):
message = 'Kernel Matrix Progress: %dx%d/%dx%d' % (i, self._dim2,self._dim1,self._dim2)
logging.debug(message)
for j in range(0, self._dim2):
val = self.getKernelValue(self._data1[i], self._data2[j])
km[i, j] = val
return km
except Exception as e:
logging.error("Error while computing kernel matrix: " + str(e))
sys.exit()
logging.debug("Kernel Matrix computed...")
def getKernelValue(self, xi, xj):
"""
Returns a single kernel value.
"""
val = 0.
for key in xi:
if key in xj:
val += xi[key]*xj[key]
return val
class RBFKernel():
"""
RBF Kernel
"""
def __init__(self, sigma):
self.__sigma = sigma
self.__sigma_squared_inv = 1.0 / (2* (self.__sigma ** 2) )
def computeKernelMatrix(self, data1, data2, symmetric=False):
"""
Computes the kernel matrix
"""
logging.debug("Starting RBF Kernel Matrix Computation...")
self._data1 = mat(data1)
self._data2 = mat(data2)
assert self._data1.shape[1] == (self._data2.T).shape[0]
self._dim1 = len(data1)
self._dim2 = len(data2)
self._symmetric = symmetric
self.__km = None
try:
if self._symmetric:
linearkm = self._data1 * self._data2.T
trnorms = mat(np.diag(linearkm)).T
trace_matrix = trnorms * mat(np.ones((1, self._dim1), dtype = float64))
self.__km = trace_matrix + trace_matrix.T
self.__km = self.__km - 2*linearkm
self.__km = - self.__sigma_squared_inv * self.__km
self.__km = np.exp(self.__km)
return self.__km
else:
m = self._data1.shape[0]
n = self._data2.shape[0]
assert self._data1.shape[1] == self._data2.shape[1]
linkm = mat(self._data1 * self._data2.T)
trnorms1 = []
for i in range(m):
trnorms1.append((self._data1[i] * self._data1[i].T)[0,0])
trnorms1 = mat(trnorms1).T
trnorms2 = []
for i in range(n):
trnorms2.append((self._data2[i] * self._data2[i].T)[0,0])
trnorms2 = mat(trnorms2).T
self.__km = trnorms1 * mat(np.ones((n, 1), dtype = float64)).T
self.__km = self.__km + mat(np.ones((m, 1), dtype = float64)) * trnorms2.T
self.__km = self.__km - 2 * linkm
self.__km = - self.__sigma_squared_inv * self.__km
self.__km = np.exp(self.__km)
return self.__km
except Exception as e:
logging.error("Error while computing kernel matrix: " + str(e))
sys.exit()
def getKernelValue(self, xi, xj):
"""
Returns a single kernel value.
"""
xi = array(xi)
xj = array(xj)
diff = xi-xj
val = exp(-self.__sigma_squared_inv * (dot(diff, diff)))
return val
class DictRBFKernel():
"""
RBF Kernel (for dictionaries)
"""
def __init__(self, sigma):
self.__sigma = sigma
self.__sigma_squared_inv = 1.0 / ((self.__sigma ** 2))
def computeKernelMatrix(self, data1, data2, symmetric=False):
"""
Computes the kernel matrix
"""
logging.debug("Starting RBF Kernel Matrix Computation...")
self._data1 = data1
self._data2 = data2
self._dim1 = len(data1)
self._dim2 = len(data2)
self._symmetric = symmetric
self.__km = None
try:
km = mat(zeros((self._dim1, self._dim2), dtype=float64))
if self._symmetric:
for i in range(self._dim1):
message = 'Kernel Matrix Progress: %dx%d/%dx%d' % (i, self._dim2,self._dim1,self._dim2)
logging.debug(message)
for j in range(i, self._dim2):
val = self.getKernelValue(self._data1[i], self._data2[j])
km[i, j] = val
km[j, i] = val
return km
else:
for i in range(0, self._dim1):
message = 'Kernel Matrix Progress: %dx%d/%dx%d' % (i, self._dim2,self._dim1,self._dim2)
logging.debug(message)
for j in range(0, self._dim2):
val = self.getKernelValue(self._data1[i], self._data2[j])
km[i, j] = val
return km
except Exception as e:
logging.error("Error while computing kernel matrix: " + str(e))
sys.exit()
logging.info("Kernel Matrix computed...")
def getKernelValue(self, xi, xj):
"""
Returns a single kernel value.
"""
diff = xi.copy()
for key in xj:
if key in diff:
diff[key]-=xj[key]
else:
diff[key]=-xj[key]
diff = diff.values()
val = exp(-self.__sigma_squared_inv * (dot(diff, diff)))
return val
|
|
#!/usr/bin/env python3
# Copyright (c) 2018-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the Partially Signed Transaction RPCs.
"""
from decimal import Decimal
from test_framework.test_framework import SyscoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_raises_rpc_error,
connect_nodes,
disconnect_nodes,
find_output,
)
import json
import os
MAX_BIP125_RBF_SEQUENCE = 0xfffffffd
# Create one-input, one-output, no-fee transaction:
class PSBTTest(SyscoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = False
self.num_nodes = 3
self.extra_args = [
["-walletrbf=1"],
["-walletrbf=0"],
[]
]
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def test_utxo_conversion(self):
mining_node = self.nodes[2]
offline_node = self.nodes[0]
online_node = self.nodes[1]
# Disconnect offline node from others
disconnect_nodes(offline_node, 1)
disconnect_nodes(online_node, 0)
disconnect_nodes(offline_node, 2)
disconnect_nodes(mining_node, 0)
# Mine a transaction that credits the offline address
offline_addr = offline_node.getnewaddress(address_type="p2sh-segwit")
online_addr = online_node.getnewaddress(address_type="p2sh-segwit")
online_node.importaddress(offline_addr, "", False)
mining_node.sendtoaddress(address=offline_addr, amount=1.0)
mining_node.generate(nblocks=1)
self.sync_blocks([mining_node, online_node])
# Construct an unsigned PSBT on the online node (who doesn't know the output is Segwit, so will include a non-witness UTXO)
utxos = online_node.listunspent(addresses=[offline_addr])
raw = online_node.createrawtransaction([{"txid":utxos[0]["txid"], "vout":utxos[0]["vout"]}],[{online_addr:0.9999}])
psbt = online_node.walletprocesspsbt(online_node.converttopsbt(raw))["psbt"]
assert "non_witness_utxo" in mining_node.decodepsbt(psbt)["inputs"][0]
# Have the offline node sign the PSBT (which will update the UTXO to segwit)
signed_psbt = offline_node.walletprocesspsbt(psbt)["psbt"]
assert "witness_utxo" in mining_node.decodepsbt(signed_psbt)["inputs"][0]
# Make sure we can mine the resulting transaction
txid = mining_node.sendrawtransaction(mining_node.finalizepsbt(signed_psbt)["hex"])
mining_node.generate(1)
self.sync_blocks([mining_node, online_node])
assert_equal(online_node.gettxout(txid,0)["confirmations"], 1)
# Reconnect
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
def run_test(self):
# Activate segwit at height 432.
self.nodes[0].generate (500)
self.sync_all()
# Create and fund a raw tx for sending 10 SYS
psbtx1 = self.nodes[0].walletcreatefundedpsbt([], {self.nodes[2].getnewaddress():10})['psbt']
# Node 1 should not be able to add anything to it but still return the psbtx same as before
psbtx = self.nodes[1].walletprocesspsbt(psbtx1)['psbt']
assert_equal(psbtx1, psbtx)
# Sign the transaction and send
signed_tx = self.nodes[0].walletprocesspsbt(psbtx)['psbt']
final_tx = self.nodes[0].finalizepsbt(signed_tx)['hex']
self.nodes[0].sendrawtransaction(final_tx)
# Create p2sh, p2wpkh, and p2wsh addresses
pubkey0 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())['pubkey']
pubkey1 = self.nodes[1].getaddressinfo(self.nodes[1].getnewaddress())['pubkey']
pubkey2 = self.nodes[2].getaddressinfo(self.nodes[2].getnewaddress())['pubkey']
p2sh = self.nodes[1].addmultisigaddress(2, [pubkey0, pubkey1, pubkey2], "", "legacy")['address']
p2wsh = self.nodes[1].addmultisigaddress(2, [pubkey0, pubkey1, pubkey2], "", "bech32")['address']
p2sh_p2wsh = self.nodes[1].addmultisigaddress(2, [pubkey0, pubkey1, pubkey2], "", "p2sh-segwit")['address']
p2wpkh = self.nodes[1].getnewaddress("", "bech32")
p2pkh = self.nodes[1].getnewaddress("", "legacy")
p2sh_p2wpkh = self.nodes[1].getnewaddress("", "p2sh-segwit")
# fund those addresses
rawtx = self.nodes[0].createrawtransaction([], {p2sh:10, p2wsh:10, p2wpkh:10, p2sh_p2wsh:10, p2sh_p2wpkh:10, p2pkh:10})
rawtx = self.nodes[0].fundrawtransaction(rawtx, {"changePosition":3})
signed_tx = self.nodes[0].signrawtransactionwithwallet(rawtx['hex'])['hex']
txid = self.nodes[0].sendrawtransaction(signed_tx)
self.nodes[0].generate(6)
self.sync_all()
# Find the output pos
p2sh_pos = -1
p2wsh_pos = -1
p2wpkh_pos = -1
p2pkh_pos = -1
p2sh_p2wsh_pos = -1
p2sh_p2wpkh_pos = -1
decoded = self.nodes[0].decoderawtransaction(signed_tx)
for out in decoded['vout']:
if out['scriptPubKey']['addresses'][0] == p2sh:
p2sh_pos = out['n']
elif out['scriptPubKey']['addresses'][0] == p2wsh:
p2wsh_pos = out['n']
elif out['scriptPubKey']['addresses'][0] == p2wpkh:
p2wpkh_pos = out['n']
elif out['scriptPubKey']['addresses'][0] == p2sh_p2wsh:
p2sh_p2wsh_pos = out['n']
elif out['scriptPubKey']['addresses'][0] == p2sh_p2wpkh:
p2sh_p2wpkh_pos = out['n']
elif out['scriptPubKey']['addresses'][0] == p2pkh:
p2pkh_pos = out['n']
# spend single key from node 1
rawtx = self.nodes[1].walletcreatefundedpsbt([{"txid":txid,"vout":p2wpkh_pos},{"txid":txid,"vout":p2sh_p2wpkh_pos},{"txid":txid,"vout":p2pkh_pos}], {self.nodes[1].getnewaddress():29.99})['psbt']
walletprocesspsbt_out = self.nodes[1].walletprocesspsbt(rawtx)
assert_equal(walletprocesspsbt_out['complete'], True)
self.nodes[1].sendrawtransaction(self.nodes[1].finalizepsbt(walletprocesspsbt_out['psbt'])['hex'])
# feeRate of 0.1 BTC / KB produces a total fee slightly below -maxtxfee (~0.05280000):
res = self.nodes[1].walletcreatefundedpsbt([{"txid":txid,"vout":p2wpkh_pos},{"txid":txid,"vout":p2sh_p2wpkh_pos},{"txid":txid,"vout":p2pkh_pos}], {self.nodes[1].getnewaddress():29.99}, 0, {"feeRate": 0.1})
assert_greater_than(res["fee"], 0.05)
assert_greater_than(0.06, res["fee"])
# feeRate of 10 BTC / KB produces a total fee well above -maxtxfee
# previously this was silently capped at -maxtxfee
assert_raises_rpc_error(-4, "Fee exceeds maximum configured by -maxtxfee", self.nodes[1].walletcreatefundedpsbt, [{"txid":txid,"vout":p2wpkh_pos},{"txid":txid,"vout":p2sh_p2wpkh_pos},{"txid":txid,"vout":p2pkh_pos}], {self.nodes[1].getnewaddress():29.99}, 0, {"feeRate": 10})
# partially sign multisig things with node 1
psbtx = self.nodes[1].walletcreatefundedpsbt([{"txid":txid,"vout":p2wsh_pos},{"txid":txid,"vout":p2sh_pos},{"txid":txid,"vout":p2sh_p2wsh_pos}], {self.nodes[1].getnewaddress():29.99})['psbt']
walletprocesspsbt_out = self.nodes[1].walletprocesspsbt(psbtx)
psbtx = walletprocesspsbt_out['psbt']
assert_equal(walletprocesspsbt_out['complete'], False)
# partially sign with node 2. This should be complete and sendable
walletprocesspsbt_out = self.nodes[2].walletprocesspsbt(psbtx)
assert_equal(walletprocesspsbt_out['complete'], True)
self.nodes[2].sendrawtransaction(self.nodes[2].finalizepsbt(walletprocesspsbt_out['psbt'])['hex'])
# check that walletprocesspsbt fails to decode a non-psbt
rawtx = self.nodes[1].createrawtransaction([{"txid":txid,"vout":p2wpkh_pos}], {self.nodes[1].getnewaddress():9.99})
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[1].walletprocesspsbt, rawtx)
# Convert a non-psbt to psbt and make sure we can decode it
rawtx = self.nodes[0].createrawtransaction([], {self.nodes[1].getnewaddress():10})
rawtx = self.nodes[0].fundrawtransaction(rawtx)
new_psbt = self.nodes[0].converttopsbt(rawtx['hex'])
self.nodes[0].decodepsbt(new_psbt)
# Make sure that a non-psbt with signatures cannot be converted
# Error could be either "TX decode failed" (segwit inputs causes parsing to fail) or "Inputs must not have scriptSigs and scriptWitnesses"
# We must set iswitness=True because the serialized transaction has inputs and is therefore a witness transaction
signedtx = self.nodes[0].signrawtransactionwithwallet(rawtx['hex'])
assert_raises_rpc_error(-22, "", self.nodes[0].converttopsbt, hexstring=signedtx['hex'], iswitness=True)
assert_raises_rpc_error(-22, "", self.nodes[0].converttopsbt, hexstring=signedtx['hex'], permitsigdata=False, iswitness=True)
# Unless we allow it to convert and strip signatures
self.nodes[0].converttopsbt(signedtx['hex'], True)
# Explicitly allow converting non-empty txs
new_psbt = self.nodes[0].converttopsbt(rawtx['hex'])
self.nodes[0].decodepsbt(new_psbt)
# Create outputs to nodes 1 and 2
node1_addr = self.nodes[1].getnewaddress()
node2_addr = self.nodes[2].getnewaddress()
txid1 = self.nodes[0].sendtoaddress(node1_addr, 13)
txid2 = self.nodes[0].sendtoaddress(node2_addr, 13)
blockhash = self.nodes[0].generate(6)[0]
self.sync_all()
vout1 = find_output(self.nodes[1], txid1, 13, blockhash=blockhash)
vout2 = find_output(self.nodes[2], txid2, 13, blockhash=blockhash)
# Create a psbt spending outputs from nodes 1 and 2
psbt_orig = self.nodes[0].createpsbt([{"txid":txid1, "vout":vout1}, {"txid":txid2, "vout":vout2}], {self.nodes[0].getnewaddress():25.999})
# Update psbts, should only have data for one input and not the other
psbt1 = self.nodes[1].walletprocesspsbt(psbt_orig, False, "ALL")['psbt']
psbt1_decoded = self.nodes[0].decodepsbt(psbt1)
assert psbt1_decoded['inputs'][0] and not psbt1_decoded['inputs'][1]
# Check that BIP32 path was added
assert "bip32_derivs" in psbt1_decoded['inputs'][0]
psbt2 = self.nodes[2].walletprocesspsbt(psbt_orig, False, "ALL", False)['psbt']
psbt2_decoded = self.nodes[0].decodepsbt(psbt2)
assert not psbt2_decoded['inputs'][0] and psbt2_decoded['inputs'][1]
# Check that BIP32 paths were not added
assert "bip32_derivs" not in psbt2_decoded['inputs'][1]
# Sign PSBTs (workaround issue #18039)
psbt1 = self.nodes[1].walletprocesspsbt(psbt_orig)['psbt']
psbt2 = self.nodes[2].walletprocesspsbt(psbt_orig)['psbt']
# Combine, finalize, and send the psbts
combined = self.nodes[0].combinepsbt([psbt1, psbt2])
finalized = self.nodes[0].finalizepsbt(combined)['hex']
self.nodes[0].sendrawtransaction(finalized)
self.nodes[0].generate(6)
self.sync_all()
# Test additional args in walletcreatepsbt
# Make sure both pre-included and funded inputs
# have the correct sequence numbers based on
# replaceable arg
block_height = self.nodes[0].getblockcount()
unspent = self.nodes[0].listunspent()[0]
psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], block_height+2, {"replaceable": False}, False)
decoded_psbt = self.nodes[0].decodepsbt(psbtx_info["psbt"])
for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]):
assert_greater_than(tx_in["sequence"], MAX_BIP125_RBF_SEQUENCE)
assert "bip32_derivs" not in psbt_in
assert_equal(decoded_psbt["tx"]["locktime"], block_height+2)
# Same construction with only locktime set and RBF explicitly enabled
psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], block_height, {"replaceable": True}, True)
decoded_psbt = self.nodes[0].decodepsbt(psbtx_info["psbt"])
for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]):
assert_equal(tx_in["sequence"], MAX_BIP125_RBF_SEQUENCE)
assert "bip32_derivs" in psbt_in
assert_equal(decoded_psbt["tx"]["locktime"], block_height)
# Same construction without optional arguments
psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}])
decoded_psbt = self.nodes[0].decodepsbt(psbtx_info["psbt"])
for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]):
assert_equal(tx_in["sequence"], MAX_BIP125_RBF_SEQUENCE)
assert "bip32_derivs" in psbt_in
assert_equal(decoded_psbt["tx"]["locktime"], 0)
# Same construction without optional arguments, for a node with -walletrbf=0
unspent1 = self.nodes[1].listunspent()[0]
psbtx_info = self.nodes[1].walletcreatefundedpsbt([{"txid":unspent1["txid"], "vout":unspent1["vout"]}], [{self.nodes[2].getnewaddress():unspent1["amount"]+1}], block_height)
decoded_psbt = self.nodes[1].decodepsbt(psbtx_info["psbt"])
for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]):
assert_greater_than(tx_in["sequence"], MAX_BIP125_RBF_SEQUENCE)
assert "bip32_derivs" in psbt_in
# Make sure change address wallet does not have P2SH innerscript access to results in success
# when attempting BnB coin selection
self.nodes[0].walletcreatefundedpsbt([], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], block_height+2, {"changeAddress":self.nodes[1].getnewaddress()}, False)
# Regression test for 14473 (mishandling of already-signed witness transaction):
psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}])
complete_psbt = self.nodes[0].walletprocesspsbt(psbtx_info["psbt"])
double_processed_psbt = self.nodes[0].walletprocesspsbt(complete_psbt["psbt"])
assert_equal(complete_psbt, double_processed_psbt)
# We don't care about the decode result, but decoding must succeed.
self.nodes[0].decodepsbt(double_processed_psbt["psbt"])
# BIP 174 Test Vectors
# Check that unknown values are just passed through
unknown_psbt = "cHNidP8BAD8CAAAAAf//////////////////////////////////////////AAAAAAD/////AQAAAAAAAAAAA2oBAAAAAAAACg8BAgMEBQYHCAkPAQIDBAUGBwgJCgsMDQ4PAAA="
unknown_out = self.nodes[0].walletprocesspsbt(unknown_psbt)['psbt']
assert_equal(unknown_psbt, unknown_out)
# Open the data file
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/rpc_psbt.json'), encoding='utf-8') as f:
d = json.load(f)
invalids = d['invalid']
valids = d['valid']
creators = d['creator']
signers = d['signer']
combiners = d['combiner']
finalizers = d['finalizer']
extractors = d['extractor']
# Invalid PSBTs
for invalid in invalids:
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decodepsbt, invalid)
# Valid PSBTs
for valid in valids:
self.nodes[0].decodepsbt(valid)
# Creator Tests
for creator in creators:
created_tx = self.nodes[0].createpsbt(creator['inputs'], creator['outputs'])
assert_equal(created_tx, creator['result'])
# Signer tests
for i, signer in enumerate(signers):
self.nodes[2].createwallet("wallet{}".format(i))
wrpc = self.nodes[2].get_wallet_rpc("wallet{}".format(i))
for key in signer['privkeys']:
wrpc.importprivkey(key)
signed_tx = wrpc.walletprocesspsbt(signer['psbt'])['psbt']
assert_equal(signed_tx, signer['result'])
# Combiner test
for combiner in combiners:
combined = self.nodes[2].combinepsbt(combiner['combine'])
assert_equal(combined, combiner['result'])
# Empty combiner test
assert_raises_rpc_error(-8, "Parameter 'txs' cannot be empty", self.nodes[0].combinepsbt, [])
# Finalizer test
for finalizer in finalizers:
finalized = self.nodes[2].finalizepsbt(finalizer['finalize'], False)['psbt']
assert_equal(finalized, finalizer['result'])
# Extractor test
for extractor in extractors:
extracted = self.nodes[2].finalizepsbt(extractor['extract'], True)['hex']
assert_equal(extracted, extractor['result'])
# Unload extra wallets
for i, signer in enumerate(signers):
self.nodes[2].unloadwallet("wallet{}".format(i))
self.test_utxo_conversion()
# Test that psbts with p2pkh outputs are created properly
p2pkh = self.nodes[0].getnewaddress(address_type='legacy')
psbt = self.nodes[1].walletcreatefundedpsbt([], [{p2pkh : 1}], 0, {"includeWatching" : True}, True)
self.nodes[0].decodepsbt(psbt['psbt'])
# Test decoding error: invalid base64
assert_raises_rpc_error(-22, "TX decode failed invalid base64", self.nodes[0].decodepsbt, ";definitely not base64;")
# Send to all types of addresses
addr1 = self.nodes[1].getnewaddress("", "bech32")
txid1 = self.nodes[0].sendtoaddress(addr1, 11)
vout1 = find_output(self.nodes[0], txid1, 11)
addr2 = self.nodes[1].getnewaddress("", "legacy")
txid2 = self.nodes[0].sendtoaddress(addr2, 11)
vout2 = find_output(self.nodes[0], txid2, 11)
addr3 = self.nodes[1].getnewaddress("", "p2sh-segwit")
txid3 = self.nodes[0].sendtoaddress(addr3, 11)
vout3 = find_output(self.nodes[0], txid3, 11)
self.sync_all()
def test_psbt_input_keys(psbt_input, keys):
"""Check that the psbt input has only the expected keys."""
assert_equal(set(keys), set(psbt_input.keys()))
# Create a PSBT. None of the inputs are filled initially
psbt = self.nodes[1].createpsbt([{"txid":txid1, "vout":vout1},{"txid":txid2, "vout":vout2},{"txid":txid3, "vout":vout3}], {self.nodes[0].getnewaddress():32.999})
decoded = self.nodes[1].decodepsbt(psbt)
test_psbt_input_keys(decoded['inputs'][0], [])
test_psbt_input_keys(decoded['inputs'][1], [])
test_psbt_input_keys(decoded['inputs'][2], [])
# Update a PSBT with UTXOs from the node
# Bech32 inputs should be filled with witness UTXO. Other inputs should not be filled because they are non-witness
updated = self.nodes[1].utxoupdatepsbt(psbt)
decoded = self.nodes[1].decodepsbt(updated)
test_psbt_input_keys(decoded['inputs'][0], ['witness_utxo'])
test_psbt_input_keys(decoded['inputs'][1], [])
test_psbt_input_keys(decoded['inputs'][2], [])
# Try again, now while providing descriptors, making P2SH-segwit work, and causing bip32_derivs and redeem_script to be filled in
descs = [self.nodes[1].getaddressinfo(addr)['desc'] for addr in [addr1,addr2,addr3]]
updated = self.nodes[1].utxoupdatepsbt(psbt=psbt, descriptors=descs)
decoded = self.nodes[1].decodepsbt(updated)
test_psbt_input_keys(decoded['inputs'][0], ['witness_utxo', 'bip32_derivs'])
test_psbt_input_keys(decoded['inputs'][1], [])
test_psbt_input_keys(decoded['inputs'][2], ['witness_utxo', 'bip32_derivs', 'redeem_script'])
# Two PSBTs with a common input should not be joinable
psbt1 = self.nodes[1].createpsbt([{"txid":txid1, "vout":vout1}], {self.nodes[0].getnewaddress():Decimal('10.999')})
assert_raises_rpc_error(-8, "exists in multiple PSBTs", self.nodes[1].joinpsbts, [psbt1, updated])
# Join two distinct PSBTs
addr4 = self.nodes[1].getnewaddress("", "p2sh-segwit")
txid4 = self.nodes[0].sendtoaddress(addr4, 5)
vout4 = find_output(self.nodes[0], txid4, 5)
self.nodes[0].generate(6)
self.sync_all()
psbt2 = self.nodes[1].createpsbt([{"txid":txid4, "vout":vout4}], {self.nodes[0].getnewaddress():Decimal('4.999')})
psbt2 = self.nodes[1].walletprocesspsbt(psbt2)['psbt']
psbt2_decoded = self.nodes[0].decodepsbt(psbt2)
assert "final_scriptwitness" in psbt2_decoded['inputs'][0] and "final_scriptSig" in psbt2_decoded['inputs'][0]
joined = self.nodes[0].joinpsbts([psbt, psbt2])
joined_decoded = self.nodes[0].decodepsbt(joined)
assert len(joined_decoded['inputs']) == 4 and len(joined_decoded['outputs']) == 2 and "final_scriptwitness" not in joined_decoded['inputs'][3] and "final_scriptSig" not in joined_decoded['inputs'][3]
# Check that joining shuffles the inputs and outputs
# 10 attempts should be enough to get a shuffled join
shuffled = False
for i in range(0, 10):
shuffled_joined = self.nodes[0].joinpsbts([psbt, psbt2])
shuffled |= joined != shuffled_joined
if shuffled:
break
assert shuffled
# Newly created PSBT needs UTXOs and updating
addr = self.nodes[1].getnewaddress("", "p2sh-segwit")
txid = self.nodes[0].sendtoaddress(addr, 7)
addrinfo = self.nodes[1].getaddressinfo(addr)
blockhash = self.nodes[0].generate(6)[0]
self.sync_all()
vout = find_output(self.nodes[0], txid, 7, blockhash=blockhash)
psbt = self.nodes[1].createpsbt([{"txid":txid, "vout":vout}], {self.nodes[0].getnewaddress("", "p2sh-segwit"):Decimal('6.999')})
analyzed = self.nodes[0].analyzepsbt(psbt)
assert not analyzed['inputs'][0]['has_utxo'] and not analyzed['inputs'][0]['is_final'] and analyzed['inputs'][0]['next'] == 'updater' and analyzed['next'] == 'updater'
# After update with wallet, only needs signing
updated = self.nodes[1].walletprocesspsbt(psbt, False, 'ALL', True)['psbt']
analyzed = self.nodes[0].analyzepsbt(updated)
assert analyzed['inputs'][0]['has_utxo'] and not analyzed['inputs'][0]['is_final'] and analyzed['inputs'][0]['next'] == 'signer' and analyzed['next'] == 'signer' and analyzed['inputs'][0]['missing']['signatures'][0] == addrinfo['embedded']['witness_program']
# Check fee and size things
assert analyzed['fee'] == Decimal('0.001') and analyzed['estimated_vsize'] == 134 and analyzed['estimated_feerate'] == Decimal('0.00746268')
# After signing and finalizing, needs extracting
signed = self.nodes[1].walletprocesspsbt(updated)['psbt']
analyzed = self.nodes[0].analyzepsbt(signed)
assert analyzed['inputs'][0]['has_utxo'] and analyzed['inputs'][0]['is_final'] and analyzed['next'] == 'extractor'
self.log.info("PSBT spending unspendable outputs should have error message and Creator as next")
analysis = self.nodes[0].analyzepsbt('cHNidP8BAJoCAAAAAljoeiG1ba8MI76OcHBFbDNvfLqlyHV5JPVFiHuyq911AAAAAAD/////g40EJ9DsZQpoqka7CwmK6kQiwHGyyng1Kgd5WdB86h0BAAAAAP////8CcKrwCAAAAAAWAEHYXCtx0AYLCcmIauuBXlCZHdoSTQDh9QUAAAAAFv8/wADXYP/7//////8JxOh0LR2HAI8AAAAAAAEBIADC6wsAAAAAF2oUt/X69ELjeX2nTof+fZ10l+OyAokDAQcJAwEHEAABAACAAAEBIADC6wsAAAAAF2oUt/X69ELjeX2nTof+fZ10l+OyAokDAQcJAwEHENkMak8AAAAA')
assert_equal(analysis['next'], 'creator')
assert_equal(analysis['error'], 'PSBT is not valid. Input 0 spends unspendable output')
self.log.info("PSBT with invalid values should have error message and Creator as next")
analysis = self.nodes[0].analyzepsbt('cHNidP8BAHECAAAAAfA00BFgAm6tp86RowwH6BMImQNL5zXUcTT97XoLGz0BAAAAAAD/////AgD5ApUAAAAAFgAUKNw0x8HRctAgmvoevm4u1SbN7XL87QKVAAAAABYAFPck4gF7iL4NL4wtfRAKgQbghiTUAAAAAAABAR8AgIFq49AHABYAFJUDtxf2PHo641HEOBOAIvFMNTr2AAAA')
assert_equal(analysis['next'], 'creator')
assert_equal(analysis['error'], 'PSBT is not valid. Input 0 has invalid value')
self.log.info("PSBT with signed, but not finalized, inputs should have Finalizer as next")
analysis = self.nodes[0].analyzepsbt('cHNidP8BAHECAAAAAZYezcxdnbXoQCmrD79t/LzDgtUo9ERqixk8wgioAobrAAAAAAD9////AlDDAAAAAAAAFgAUy/UxxZuzZswcmFnN/E9DGSiHLUsuGPUFAAAAABYAFLsH5o0R38wXx+X2cCosTMCZnQ4baAAAAAABAR8A4fUFAAAAABYAFOBI2h5thf3+Lflb2LGCsVSZwsltIgIC/i4dtVARCRWtROG0HHoGcaVklzJUcwo5homgGkSNAnJHMEQCIGx7zKcMIGr7cEES9BR4Kdt/pzPTK3fKWcGyCJXb7MVnAiALOBgqlMH4GbC1HDh/HmylmO54fyEy4lKde7/BT/PWxwEBAwQBAAAAIgYC/i4dtVARCRWtROG0HHoGcaVklzJUcwo5homgGkSNAnIYDwVpQ1QAAIABAACAAAAAgAAAAAAAAAAAAAAiAgL+CIiB59NSCssOJRGiMYQK1chahgAaaJpIXE41Cyir+xgPBWlDVAAAgAEAAIAAAACAAQAAAAAAAAAA')
assert_equal(analysis['next'], 'finalizer')
analysis = self.nodes[0].analyzepsbt('cHNidP8BAHECAAAAAfA00BFgAm6tp86RowwH6BMImQNL5zXUcTT97XoLGz0BAAAAAAD/////AgCAgWrj0AcAFgAUKNw0x8HRctAgmvoevm4u1SbN7XL87QKVAAAAABYAFPck4gF7iL4NL4wtfRAKgQbghiTUAAAAAAABAR8A8gUqAQAAABYAFJUDtxf2PHo641HEOBOAIvFMNTr2AAAA')
assert_equal(analysis['next'], 'creator')
assert_equal(analysis['error'], 'PSBT is not valid. Output amount invalid')
analysis = self.nodes[0].analyzepsbt('cHNidP8BAJoCAAAAAkvEW8NnDtdNtDpsmze+Ht2LH35IJcKv00jKAlUs21RrAwAAAAD/////S8Rbw2cO1020OmybN74e3Ysffkglwq/TSMoCVSzbVGsBAAAAAP7///8CwLYClQAAAAAWABSNJKzjaUb3uOxixsvh1GGE3fW7zQD5ApUAAAAAFgAUKNw0x8HRctAgmvoevm4u1SbN7XIAAAAAAAEAnQIAAAACczMa321tVHuN4GKWKRncycI22aX3uXgwSFUKM2orjRsBAAAAAP7///9zMxrfbW1Ue43gYpYpGdzJwjbZpfe5eDBIVQozaiuNGwAAAAAA/v///wIA+QKVAAAAABl2qRT9zXUVA8Ls5iVqynLHe5/vSe1XyYisQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAAAAAQEfQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAA==')
assert_equal(analysis['next'], 'creator')
assert_equal(analysis['error'], 'PSBT is not valid. Input 0 specifies invalid prevout')
assert_raises_rpc_error(-25, 'Missing inputs', self.nodes[0].walletprocesspsbt, 'cHNidP8BAJoCAAAAAkvEW8NnDtdNtDpsmze+Ht2LH35IJcKv00jKAlUs21RrAwAAAAD/////S8Rbw2cO1020OmybN74e3Ysffkglwq/TSMoCVSzbVGsBAAAAAP7///8CwLYClQAAAAAWABSNJKzjaUb3uOxixsvh1GGE3fW7zQD5ApUAAAAAFgAUKNw0x8HRctAgmvoevm4u1SbN7XIAAAAAAAEAnQIAAAACczMa321tVHuN4GKWKRncycI22aX3uXgwSFUKM2orjRsBAAAAAP7///9zMxrfbW1Ue43gYpYpGdzJwjbZpfe5eDBIVQozaiuNGwAAAAAA/v///wIA+QKVAAAAABl2qRT9zXUVA8Ls5iVqynLHe5/vSe1XyYisQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAAAAAQEfQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAA==')
if __name__ == '__main__':
PSBTTest().main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.