repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
hosom/mandrake | plugins/pdf.py | 2 | 7869 | import sys, os.path, re
import StringIO
import xml.etree.ElementTree as ET
from lxml import etree
from pdfminer.psparser import PSKeyword, PSLiteral
from pdfminer.pdfparser import PDFParser
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdftypes import PDFObjectNotFound
from pdfminer.pdftypes import PDFStream, PDFObjRef
from pdfminer.utils import isnumber
from pdfid import PDFiD2String, PDFiD #Source code for pdfid put in public domain by Didier Stevens, no Copyright
class Plugin:
__NAME__ = 'pdf'
def __init__(self, args):
self.args = args
self.analyzed_mimes = ['application/pdf']
def analyze(self, afile):
'''Analyze PDF files and extract metadata about the file into the
FileAnalysis object.
Args:
afile (FileAnalysis): The file to be analyzed.
Returns:
None
'''
'''The following functions adapted and modified from pdfminer utility dumppdf.py, they are responsible for building XML structure from PDF objects
e
dumpxml
dumptrailers
dumpallobjs
'''
ESC_PAT = re.compile(r'[\000-\037&<>()"\042\047\134\177-\377]')
def e(s):
return ESC_PAT.sub(lambda m:'&#%d;' % ord(m.group(0)), s)
# dumpxml
def dumpxml(out, obj, codec=None):
if obj is None:
out.write('<null />')
return
if isinstance(obj, dict):
out.write('<dict size="%d">\n' % len(obj))
for (k,v) in obj.iteritems():
out.write('<key>%s</key>\n' % k)
out.write('<value>')
dumpxml(out, v)
out.write('</value>\n')
out.write('</dict>')
return
if isinstance(obj, list):
out.write('<list size="%d">\n' % len(obj))
for v in obj:
dumpxml(out, v)
out.write('\n')
out.write('</list>')
return
if isinstance(obj, str):
out.write('<string size="%d">%s</string>' % (len(obj), e(obj)))
return
if isinstance(obj, PDFStream):
if codec == 'raw':
out.write(obj.get_rawdata())
elif codec == 'binary':
out.write(obj.get_data())
else:
out.write('<stream>\n<props>\n')
dumpxml(out, obj.attrs)
out.write('\n</props>\n')
if codec == 'text':
data = obj.get_data()
out.write('<data size="%d">%s</data>\n' % (len(data), e(data)))
out.write('</stream>')
return
if isinstance(obj, PDFObjRef):
out.write('<ref id="%d" />' % obj.objid)
return
if isinstance(obj, PSKeyword):
out.write('<keyword>%s</keyword>' % obj.name)
return
if isinstance(obj, PSLiteral):
out.write('<literal>%s</literal>' % obj.name)
return
if isnumber(obj):
out.write('<number>%s</number>' % obj)
return
raise TypeError(obj)
# dumptrailers
def dumptrailers(out, doc):
for xref in doc.xrefs:
out.write('<trailer>\n')
dumpxml(out, xref.trailer)
out.write('\n</trailer>\n\n')
return
# dumpallobjs
def dumpallobjs(out, doc, codec=None):
visited = set()
out.write('<pdf>')
for xref in doc.xrefs:
for objid in xref.get_objids():
if objid in visited: continue
visited.add(objid)
try:
obj = doc.getobj(objid)
if obj is None: continue
out.write('<object id="%d">\n' % objid)
dumpxml(out, obj, codec=codec)
out.write('\n</object>\n\n')
except PDFObjectNotFound as e:
print >>sys.stderr, 'not found: %r' % e
dumptrailers(out, doc)
out.write('</pdf>')
return out.getvalue()
#leverages pdfid to print out a counted summary of discovered tags in PDF file
def pdf_id(afile):
result = PDFiD2String(PDFiD(afile.path),True)
# Split off of new lines
lines = result.split('\n')[1:]
for line in lines:
#strip white spaces
line = line.strip()
#parse out line into key,value by sequential white spaces
kv_pair = re.split('\s+',line)
if len(kv_pair) > 1:
#remove forward slash
key = re.sub('/','',kv_pair[0])
value = kv_pair[1]
# if we have more than 2 entries then the value was parsed incorrectly, join the other entries in list into one value
if len(kv_pair) > 2:
value = ' '.join(kv_pair[1:])
# set the attribute with our key,value
setattr(afile,key,value)
if afile.mime_type in self.analyzed_mimes:
try:
fp = open(afile.path)
output = 'None'
afile.plugin_output[self.__NAME__] = output
process_metadata = True
except IOError:
afile.errors = afile.errors + ['pdf plugin: unsupported filetype']
output = 'None'
afile.plugin_output[self.__NAME__] = output
process_metadata = False
# Parse the metadata for the pdf file and add all pdf metadata
# attributes to the FileAnalysis object.
if process_metadata:
# count out PDF tags using pdfid and set attributes
try:
pdf_id(afile)
except:
aile.errors = afile.errors + ['pdf plugin: pdfid failed to parse PDF document']
#get metadata information and update analysis object
try:
parser = PDFParser(fp)
doc = PDFDocument(parser,'')
if doc.info:
info = doc.info[0]
for k,v in info.iteritems():
setattr(afile,k,v)
#Get XML representation of PDF document
xml_io = StringIO.StringIO()
xml_str = dumpallobjs(xml_io,doc,None)
xml_io.close()
except:
afile.errors = afile.errors + ['pdf plugin: pdfminer failed to parse PDF document']
#Go through XML and grab all string tags. This will contain embedded links, javascript, titles useful to enrich meaning behind pdfid findings
try:
#pass recover=True so element tree can parse unfriendly xml characters from PDF extract
parser = etree.XMLParser(recover=True)
xml = etree.fromstring(xml_str,parser=parser)
strings = list()
for child in xml.iter('string'):
if child.text not in strings:
text = re.sub(r'\n|\r|\\r|\\n','',child.text)
strings.append(text)
setattr(afile,'strings',strings)
except ET.ParseError as e:
afile.errors = afile.errors + ['pdf plugin: %s' % str(e)]
| bsd-3-clause |
fedora-desktop-tests/evolution | features/steps/calendar_event_editor.py | 1 | 22024 | # -*- coding: UTF-8 -*-
from behave import step, then
from dogtail.predicate import GenericPredicate
from dogtail.tree import root
from dogtail.rawinput import keyCombo, typeText
from time import sleep
from behave_common_steps import wait_until
import datetime
import os
@step(u'Create new appointment')
def create_new_appointment(context):
context.app.instance.menu('File').click()
context.app.instance.menu('File').menu('New').point()
context.app.instance.menu('File').menu('New').menuItem('Appointment').click()
context.execute_steps(u"""
* Event editor with title "Appointment - No Summary" is displayed
""")
@step(u'Create new all day appointment')
def create_new_all_day_appointment(context):
context.app.instance.menu('File').click()
context.app.instance.menu('File').menu('New').point()
context.app.instance.menu('File').menu('New').menuItem('All Day Appointment').click()
context.execute_steps(u"""
* Event editor with title "Appointment - No Summary" is displayed
""")
@step(u'Create new meeting')
def create_new_meeting(context):
context.app.instance.menu('File').click()
context.app.instance.menu('File').menu('New').point()
context.app.instance.menu('File').menu('New').menuItem('Meeting').click()
context.execute_steps(u"""
* Event editor with title "Meeting - No Summary" is displayed
""")
@step(u'Event editor with title "{name}" is displayed')
def event_editor_with_name_displayed(context, name):
context.app.event_editor = context.app.instance.window(name)
@step(u'Save the meeting and choose not to send meeting invitations')
def save_meeting(context):
save_meeting_and_send_notifications(context, send=False)
@step(u'Save the meeting and send meeting invitations')
def save_meeting_and_send_notifications(context, send=True):
context.app.event_editor.button('Save and Close').click()
sleep(3)
if context.app.instance.findChildren(GenericPredicate(roleName='dialog', name='')):
dialog = context.app.instance.dialog(' ')
dialog.grabFocus()
if send:
dialog.button('Send').doActionNamed('click')
else:
dialog.button('Do not Send').doActionNamed('click')
assert wait_until(lambda x: x.dead, dialog),\
"Meeting invitations dialog was not closed"
assert wait_until(lambda x: x.dead, context.app.event_editor),\
"Meeting editor was not closed"
@step(u'Save the event and close the editor')
def save_event(context):
context.app.event_editor.button('Save and Close').click()
assert wait_until(lambda x: x.dead and not x.showing, context.app.event_editor),\
"Meeting editor is still visible"
@step(u'Set "{field}" field in event editor to "{value}"')
def set_field_in_event_editor(context, field, value):
if field == 'Calendar:':
# This cmb has no 'click' action, so use a custom set of actions
cmb = context.app.event_editor.childLabelled('Calendar:')
cmb.doActionNamed('press')
# Calendars have 4 spaces before the actual name
cmb.menuItem(' %s' % value).click()
text_fields = ['Summary:', 'Location:', 'Description:']
if field in text_fields:
context.app.event_editor.childLabelled(field).text = value
if field == 'Time:':
if ' ' in value:
(day, time) = value.split(' ')
context.app.event_editor.\
childLabelled('Time:').textentry('').text = time
else:
day = value
context.app.event_editor.child('Date').text = day
if field in ["For:", "Until:"]:
combo = context.app.event_editor.\
child(name='for', roleName='menu item').\
findAncestor(GenericPredicate(roleName='combo box'))
field_combovalue = field.lower()[:-1]
if combo.combovalue != field_combovalue:
combo.combovalue = field_combovalue
if field_combovalue == 'for':
(hours, minutes) = value.split(':')
spins = context.app.event_editor.findChildren(
GenericPredicate(roleName='spin button'))
spins[0].text = hours
spins[0].grab_focus()
keyCombo('<Enter>')
spins[1].text = minutes
spins[1].grab_focus()
keyCombo('<Enter>')
else:
filler = context.app.event_editor.child('until').parent.\
findChildren(GenericPredicate(roleName='filler'))[-2]
if ' ' in value:
(day, time) = value.split(' ')
filler.child(roleName='combo box').textentry('').text = time
else:
day = value
filler.child('Date').text = day
if field == 'Timezone:':
context.app.event_editor.button('Select Timezone').click()
dlg = context.app.instance.dialog('Select a Time Zone')
dlg.child('Timezone drop-down combination box').combovalue = value
dlg.button('OK').click()
assert wait_until(lambda x: x.dead, dlg),\
"'Select Time Zone' dialog was not closed"
if field == 'Categories:':
context.app.event_editor.button('Categories...').click()
context.app.categories = context.app.instance.dialog('Categories')
for category in value.split(','):
context.execute_steps(u'* Check "%s" category' % category.strip())
context.execute_steps(u'* Close categories dialog')
@step(u'Set the following fields in event editor')
def set_several_fields(context):
for row in context.table:
set_field_in_event_editor(context, row['Field'], row['Value'])
@step(u'"{field}" field is set to "{value}"')
def field_is_set_to(context, field, value):
value = value.strip()
text_fields = ['Summary:', 'Location:', 'Description:']
if field in text_fields:
actual = context.app.event_editor.childLabelled(field).text
context.assertion.assertEquals(actual, value)
if field == 'Time:':
day = context.app.event_editor.child('Date').text
if ' ' in value:
time = context.app.event_editor.\
childLabelled('Time:').textentry('').text
actual = '%s %s' % (day, time)
context.assertion.assertEquals(actual.lower(), value.lower())
else:
# All day event
context.assertion.assertEquals(day, value)
time_showing = context.app.event_editor.childLabelled('Time:').showing
context.assertion.assertFalse(
time_showing, "Time controls are displayed in all day event")
if field == 'For:':
# Ensure that correct value is set in combobox
combo = context.app.event_editor.child(name='for', roleName='combo box')
spins = context.app.event_editor.findChildren(GenericPredicate(roleName='spin button'))
if ' ' in value:
actual = '%s:%s' % (spins[0], spins[1])
context.assertion.assertEquals(actual.lower(), value.lower())
else:
context.assertion.assertFalse(
spins[0].showing, "Time controls are displayed in all day event")
context.assertion.assertFalse(
spins[1].showing, "Time controls are displayed in all day event")
if field == 'Until:':
combo = context.app.event_editor.child(name='until', roleName='combo box')
filler = combo.parent.findChildren(GenericPredicate(roleName='filler'))[-2]
day = filler.child('Date').text
if ' ' in value:
time = filler.child(roleName='combo box').textentry('').text
actual = '%s %s' % (day, time)
context.assertion.assertEquals(actual.lower(), value.lower())
else:
# All day event
context.assertion.assertEquals(day, value)
time_showing = filler.child(roleName='combo box').textentry('').showing
context.assertion.assertFalse(
time_showing, "Time controls are displayed in all day event")
if field == 'Calendar:':
cmb = context.app.event_editor.childLabelled('Calendar:')
actual = cmb.combovalue.strip()
context.assertion.assertEquals(actual, value)
if field == 'Timezone:':
actual = context.app.event_editor.childLabelled('Time zone:').text
context.assertion.assertEquals(actual, value)
if field == 'Categories:':
actual = context.app.event_editor.textentry('Categories').text
context.assertion.assertEquals(actual, value)
@step(u'Event has the following details')
def event_has_fields_set(context):
for row in context.table:
context.execute_steps(u"""
* "%s" field is set to "%s"
""" % (row['Field'], row['Value']))
@step(u'Add "{name}" as attendee')
def add_user_as_attendee_with_role(context, name):
context.app.event_editor.button('Add').click()
# Input user name
typeText(name)
keyCombo('<Enter>')
# Evolution doesn't have a11y set for cell renderers, so role cannot be set
#table = context.app.event_editor.child(roleName='table')
# User will be added as a last row, so last cell is user role selector
#cell = table.findChildren(GenericPredicate(roleName='table cell'))[-1]
#cell.click()
@step(u'Remove "{name}" from attendee list')
def remove_user_from_attendee_list(context, name):
context.app.event_editor.child(name=name, roleName='table cell').click()
context.app.event_editor.button('Remove').click()
@step(u'Select first suggestion as attendee typing "{name}"')
def select_first_suggestion_as_attendee(context, name):
context.app.event_editor.button('Add').click()
typeText(name)
sleep(1)
# Again, cell renderer is not avaiable here
keyCombo("<Down>")
keyCombo("<Enter>")
sleep(0.5)
@then(u'"{user}" as "{role}" is present in attendees list')
def user_with_role_is_present_in_attendees_list(context, user, role):
table = context.app.event_editor.child(roleName='table')
cells = table.findChildren(GenericPredicate(roleName='table cell'))
found_indexes = [cells.index(c) for c in cells if c.text == user]
if found_indexes == []:
raise AssertionError("User '%s' was not found in attendees list" % user)
role_cell_index = found_indexes[0] + 1
if role_cell_index > len(cells):
raise AssertionError("Cannot find role cell for user '%s'" % user)
actual = cells[role_cell_index].text
context.assertion.assertEquals(actual, role)
@step(u'The following attendees are present in the list')
def verify_attendees_list_presence(context):
for row in context.table:
context.execute_steps(u"""
Then "%s" as "%s" is present in attendees list
""" % (row['Name'], row['Role']))
@step(u'Open attendees dialog')
def open_attendees_dialog(context):
context.app.event_editor.button('Attendees...').click()
context.app.attendees = context.app.instance.dialog('Attendees')
@step(u'Close attendees dialog')
def close_attendees_dialog(context):
context.app.attendees.button('Close').click()
assert wait_until(lambda x: not x.showing, context.app.attendees),\
"Attendees dialog was not closed"
@step(u'Change addressbook to "{name}" in attendees dialog')
def change_addressbook_in_attendees_dialog(context, name):
context.app.attendees.childLabelled('Address Book:').combovalue = ' %s' % name
@step(u'Add "{name}" contact as "{role}" in attendees dialog')
def add_contact_as_role_in_attendees_dialog(context, name, role):
contacts = context.app.attendees.childLabelled('Contacts').child(roleName='table')
contact = contacts.child(name)
contact.select()
btn = context.app.attendees.child('%ss' % role).parent.parent.parent.button('Add')
btn.click()
@step(u'Add "{user}" as "{role}" using Attendees dialog')
def add_contact_as_role_using_attendees_dialog(context, user, role):
context.execute_steps(u"""
* Open attendees dialog
* Add "%s" contact as "%s" in attendees dialog
* Close attendees dialog
""" % (user, role))
@step(u'Add "{user}" as "{role}" using Attendees dialog from "{addressbook}" addressbook')
def add_contact_from_addressbook_as_role_using_attendees_dialog(context, user, role, addressbook):
context.execute_steps(u"""
* Open attendees dialog
* Change addressbook to "%s" in attendees dialog
* Add "%s" contact as "%s" in attendees dialog
* Close attendees dialog
""" % (addressbook, user, role))
@step(u'Search for "{username}" in Attendees dialog in "{addressbook}" addressbook')
def search_for_user_in_attendees_dialog(context, username, addressbook):
context.execute_steps(u"""
* Open attendees dialog
* Change addressbook to "%s" in attendees dialog
""" % addressbook)
context.app.attendees.childLabelled('Search:').text = username
sleep(1)
@step(u'Show time zone in event editor')
def show_timezone(context):
if not context.app.event_editor.child('Time zone:').showing:
context.app.event_editor.menu('View').click()
context.app.event_editor.menu('View').menuItem('Time Zone').click()
@step(u'Show categories in event editor')
def show_categories(context):
if not context.app.event_editor.textentry('Categories').showing:
context.app.event_editor.menu('View').click()
context.app.event_editor.menu('View').menuItem('Categories').click()
@step(u'Set event start time in {num} minute')
@step(u'Set event start time in {num} minutes')
def set_event_start_time_in(context, num):
time = context.app.event_editor.childLabelled('Time:').textentry('').text
time_object = datetime.datetime.strptime(time.strip(), '%H:%M %p')
new_time_object = time_object + datetime.timedelta(minutes=int(num))
new_time = new_time_object.strftime('%H:%M %p')
context.app.event_editor.childLabelled('Time:').textentry('').text = new_time
context.app.event_editor.childLabelled('Time:').textentry('').keyCombo('<Enter>')
@step(u'Set event start date in {num} day')
@step(u'Set event start date in {num} days')
def set_event_start_date_in(context, num):
date = context.app.event_editor.child('Date').text
date_object = datetime.datetime.strptime(date, '%m/%d/%Y')
new_date_object = date_object + datetime.timedelta(days=int(num))
new_date = new_date_object.strftime('%m/%d/%Y')
context.app.event_editor.child('Date').text = ''
context.app.event_editor.child('Date').typeText(new_date)
context.app.event_editor.childLabelled('Time:').textentry('').click()
@step(u'Open reminders window')
def open_reminders_window(context):
context.app.event_editor.button('Reminders').click()
context.app.reminders = context.app.instance.dialog('Reminders')
@step(u'Select predefined reminder "{name}"')
def select_predefined_reminder(context, name):
context.app.reminders.child(roleName='combo box').combovalue = name
@step(u'Select custom reminder')
def select_custom_reminder(context):
context.app.reminders.child(roleName='combo box').combovalue = 'Customize'
@step(u'Add new reminder with "{action}" {num} {period} {before_after} "{start_end}"')
def add_new_custom_reminder(context, action, num, period, before_after, start_end):
context.app.reminders.button('Add').click()
dialog = context.app.instance.dialog('Add Reminder')
for value in [action, period, before_after, start_end]:
combo = dialog.child(value, roleName='menu item').parent.parent
if combo.combovalue != value:
combo.combovalue = value
spin_button = dialog.child(roleName='spin button')
spin_button.text = num
spin_button.grab_focus()
keyCombo('<Enter>')
dialog.button('OK').click()
assert wait_until(lambda x: x.dead, dialog), "Add Reminder dialog was not closed"
@step(u'Add new reminder with the following options')
def add_new_reminder_with_following_options(context):
context.app.reminders.button('Add').click()
dialog = context.app.instance.dialog('Add Reminder')
for row in context.table:
if row['Field'] in ['Action', 'Period', 'Before/After', 'Start/End']:
value = row['Value']
combo = dialog.child(value, roleName='menu item').parent.parent
if combo.combovalue != value:
combo.combovalue = value
elif row['Field'] == 'Num':
spin_button = dialog.child(roleName='spin button')
spin_button.text = row['Value']
spin_button.grab_focus()
keyCombo('<Enter>')
elif row['Field'] == 'Message':
dialog.child('Custom message').click()
# dialog.childLabelled('Message:').text = row['Value']
dialog.child(roleName='text').text = row['Value']
else:
dialog.childLabelled(row['Field']).text = row['Value']
dialog.button('OK').click()
assert wait_until(lambda x: x.dead, dialog), "Add Reminder dialog was not closed"
@step(u'Close reminders window')
def close_reminders_window(context):
context.app.reminders.button('Close').click()
assert wait_until(lambda x: not x.showing, context.app.reminders),\
"Reminders dialog was not closed"
@step(u'Appointment reminders window pops up in {num:d} minute')
@step(u'Appointment reminders window pops up in {num:d} minutes')
def appointment_reminders_window_pops_up(context, num):
alarm_notify = root.application('evolution-alarm-notify')
assert wait_until(
lambda x: x.findChildren(GenericPredicate(name='Appointments')) != [],
element=alarm_notify, timeout=60 * int(num)),\
"Appointments window didn't appear"
context.app.alarm_notify = alarm_notify.child(name='Appointments')
@step(u'Appointment reminders window contains reminder for "{name}" event')
def alarm_notify_contains_event(context, name):
reminders = context.app.alarm_notify.findChildren(
GenericPredicate(roleName='table cell'))
matching_reminders = [x for x in reminders if name in x.text]
assert matching_reminders != [], "Cannot find reminder '%s'" % name
@step(u'Application trigger warning pops up in {num} minutes')
def application_trigger_warning_pops_up(context, num):
alarm_notify = root.application('evolution-alarm-notify')
assert wait_until(
lambda x: x.findChildren(GenericPredicate(name='Warning', roleName='dialog')) != [],
element=alarm_notify, timeout=60 * int(num)),\
"Warning window didn't appear"
@step(u'{action} to run the specified program in application trigger warning window')
def action_to_run_specified_program(context, action):
alarm_notify = root.application('evolution-alarm-notify')
dialog = alarm_notify.dialog('Warning')
if action == 'Agree':
dialog.button('Yes').click()
else:
dialog.button('No').click()
@step(u'"{app}" is present in process list')
def app_is_present_in_process_list(context, app):
try:
assert root.application(app)
finally:
os.system("killall gnome-screenshot")
@step(u'"{app}" is not present in process list')
def app_is_not_present_in_process_list(context, app):
try:
app_names = map(lambda x: x.name, root.applications())
assert app not in app_names
finally:
os.system("killall %s" % app)
@step(u'Add "{filepath}" attachment in event editor')
def add_attachement_in_event_editor(context, filepath):
context.app.event_editor.button("Add Attachment...").click()
context.execute_steps(u"""
* file select dialog with name "Add Attachment" is displayed
* in file select dialog I select "%s"
""" % filepath)
@step(u'Save attachment "{name}" in event editor to "{file}"')
def save_attachment_to_file(context, name, file):
# Switch to List View
combo = context.app.event_editor.child(roleName='menu item', name='List View').parent.parent
if combo.name != 'List View':
combo.combovalue = 'List View'
# Right-click on the cell
cells = context.app.event_editor.findChildren(GenericPredicate(roleName='table cell'))
matching_cells = [x for x in cells if name in x.name]
if matching_cells == []:
raise RuntimeError("Cannot find attachment containing '%s'" % name)
cell = matching_cells[0]
cell.click(button=3)
# Get popup menu
popup_menu = context.app.instance.child(name='Add Attachment...', roleName='menu item').parent
popup_menu.child('Save As').click()
context.execute_steps(u"""
* Save attachment "%s" in mail viewer to "%s"
""" % (name, file))
@step(u'Display attendee {field}')
def show_attendee_field(context, field):
context.app.event_editor.menu('View').click()
menuItem = context.app.event_editor.menu('View').menuItem('%s Field' % field.capitalize())
if not menuItem.checked:
menuItem.click()
else:
keyCombo('<Esc>')
def get_contact_parameter_by_name(context, contact_name, column):
# Get attendees table
table = context.app.event_editor.child(roleName='table')
# Get header offset
headers = table.findChildren(GenericPredicate(roleName='table column header'))
header_names = [x.name for x in headers]
offset = header_names.index(column)
# Get table cells
cells = table.findChildren(GenericPredicate(roleName='table cell'))
found_indexes = [cells.index(c) for c in cells if c.text == str(contact_name)]
if found_indexes == []:
raise AssertionError("User '%s' was not found in attendees list" % contact_name)
cell_index = found_indexes[0] + offset
if cell_index > len(cells):
raise AssertionError("Cannot find '%s' cell for user '%s'" % (column, contact_name))
return cells[cell_index]
@step(u'Attendee "{name}" has "{status}" status')
def attendee_has_status(context, name, status):
actual = get_contact_parameter_by_name(context, name, 'Status').text
context.assertion.assertEquals(actual, status)
| gpl-2.0 |
andmos/ansible | lib/ansible/modules/net_tools/nios/nios_naptr_record.py | 68 | 5884 | #!/usr/bin/python
# Copyright (c) 2018 Red Hat, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
---
module: nios_naptr_record
version_added: "2.7"
author: "Blair Rampling (@brampling)"
short_description: Configure Infoblox NIOS NAPTR records
description:
- Adds and/or removes instances of NAPTR record objects from
Infoblox NIOS servers. This module manages NIOS C(record:naptr) objects
using the Infoblox WAPI interface over REST.
requirements:
- infoblox_client
extends_documentation_fragment: nios
options:
name:
description:
- Specifies the fully qualified hostname to add or remove from
the system
required: true
view:
description:
- Sets the DNS view to associate this a record with. The DNS
view must already be configured on the system
required: true
default: default
aliases:
- dns_view
order:
description:
- Configures the order (0-65535) for this NAPTR record. This parameter
specifies the order in which the NAPTR rules are applied when
multiple rules are present.
required: true
preference:
description:
- Configures the preference (0-65535) for this NAPTR record. The
preference field determines the order NAPTR records are processed
when multiple records with the same order parameter are present.
required: true
replacement:
description:
- Configures the replacement field for this NAPTR record.
For nonterminal NAPTR records, this field specifies the
next domain name to look up.
required: true
services:
description:
- Configures the services field (128 characters maximum) for this
NAPTR record. The services field contains protocol and service
identifiers, such as "http+E2U" or "SIPS+D2T".
required: false
flags:
description:
- Configures the flags field for this NAPTR record. These control the
interpretation of the fields for an NAPTR record object. Supported
values for the flags field are "U", "S", "P" and "A".
required: false
regexp:
description:
- Configures the regexp field for this NAPTR record. This is the
regular expression-based rewriting rule of the NAPTR record. This
should be a POSIX compliant regular expression, including the
substitution rule and flags. Refer to RFC 2915 for the field syntax
details.
required: false
ttl:
description:
- Configures the TTL to be associated with this NAPTR record
extattrs:
description:
- Allows for the configuration of Extensible Attributes on the
instance of the object. This argument accepts a set of key / value
pairs for configuration.
comment:
description:
- Configures a text string comment to be associated with the instance
of this object. The provided text string will be configured on the
object instance.
state:
description:
- Configures the intended state of the instance of the object on
the NIOS server. When this value is set to C(present), the object
is configured on the device and when this value is set to C(absent)
the value is removed (if necessary) from the device.
default: present
choices:
- present
- absent
'''
EXAMPLES = '''
- name: configure a NAPTR record
nios_naptr_record:
name: '*.subscriber-100.ansiblezone.com'
order: 1000
preference: 10
replacement: replacement1.network.ansiblezone.com
state: present
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
- name: add a comment to an existing NAPTR record
nios_naptr_record:
name: '*.subscriber-100.ansiblezone.com'
order: 1000
preference: 10
replacement: replacement1.network.ansiblezone.com
comment: this is a test comment
state: present
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
- name: remove a NAPTR record from the system
nios_naptr_record:
name: '*.subscriber-100.ansiblezone.com'
order: 1000
preference: 10
replacement: replacement1.network.ansiblezone.com
state: absent
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
'''
RETURN = ''' # '''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
from ansible.module_utils.net_tools.nios.api import WapiModule
def main():
''' Main entry point for module execution
'''
ib_spec = dict(
name=dict(required=True, ib_req=True),
view=dict(default='default', aliases=['dns_view'], ib_req=True),
order=dict(type='int', ib_req=True),
preference=dict(type='int', ib_req=True),
replacement=dict(ib_req=True),
services=dict(),
flags=dict(),
regexp=dict(),
ttl=dict(type='int'),
extattrs=dict(type='dict'),
comment=dict(),
)
argument_spec = dict(
provider=dict(required=True),
state=dict(default='present', choices=['present', 'absent'])
)
argument_spec.update(ib_spec)
argument_spec.update(WapiModule.provider_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
wapi = WapiModule(module)
result = wapi.run('record:naptr', ib_spec)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
phantasien/falkor | deps/bastian/deps/v8/tools/android-run.py | 95 | 4155 | #!/usr/bin/env python
#
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This script executes the passed command line on Android device
# using 'adb shell' command. Unfortunately, 'adb shell' always
# returns exit code 0, ignoring the exit code of executed command.
# Since we need to return non-zero exit code if the command failed,
# we augment the passed command line with exit code checking statement
# and output special error string in case of non-zero exit code.
# Then we parse the output of 'adb shell' and look for that error string.
import os
from os.path import join, dirname, abspath
import subprocess
import sys
import tempfile
def Check(output, errors):
failed = any([s.startswith('/system/bin/sh:') or s.startswith('ANDROID')
for s in output.split('\n')])
return 1 if failed else 0
def Execute(cmdline):
(fd_out, outname) = tempfile.mkstemp()
(fd_err, errname) = tempfile.mkstemp()
process = subprocess.Popen(
args=cmdline,
shell=True,
stdout=fd_out,
stderr=fd_err,
)
exit_code = process.wait()
os.close(fd_out)
os.close(fd_err)
output = file(outname).read()
errors = file(errname).read()
os.unlink(outname)
os.unlink(errname)
sys.stdout.write(output)
sys.stderr.write(errors)
return exit_code or Check(output, errors)
def Escape(arg):
def ShouldEscape():
for x in arg:
if not x.isalnum() and x != '-' and x != '_':
return True
return False
return arg if not ShouldEscape() else '"%s"' % (arg.replace('"', '\\"'))
def WriteToTemporaryFile(data):
(fd, fname) = tempfile.mkstemp()
os.close(fd)
tmp_file = open(fname, "w")
tmp_file.write(data)
tmp_file.close()
return fname
def Main():
if (len(sys.argv) == 1):
print("Usage: %s <command-to-run-on-device>" % sys.argv[0])
return 1
workspace = abspath(join(dirname(sys.argv[0]), '..'))
android_workspace = os.getenv("ANDROID_V8", "/data/local/tmp/v8")
args = [Escape(arg) for arg in sys.argv[1:]]
script = (" ".join(args) + "\n"
"case $? in\n"
" 0) ;;\n"
" *) echo \"ANDROID: Error returned by test\";;\n"
"esac\n")
script = script.replace(workspace, android_workspace)
script_file = WriteToTemporaryFile(script)
android_script_file = android_workspace + "/" + script_file
command = ("adb push '%s' %s;" % (script_file, android_script_file) +
"adb shell 'sh %s';" % android_script_file +
"adb shell 'rm %s'" % android_script_file)
error_code = Execute(command)
os.unlink(script_file)
return error_code
if __name__ == '__main__':
sys.exit(Main())
| mit |
g-weatherill/oq-hazardlib | openquake/hazardlib/gsim/chiou_youngs_2008_swiss_coeffs.py | 4 | 10852 | # coding: utf-8
# The Hazard Library
# Copyright (C) 2012 GEM Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from openquake.hazardlib.gsim.base import CoeffsTable
from openquake.hazardlib.imt import PGA, PGV, SA
#: Coefficient table constructed from the electronic suplements of the
#: original paper.
COEFFS_FS_ROCK_SWISS01 = CoeffsTable(sa_damping=5, table="""\
IMT k_adj a1 a2 b1 b2 Rm phi_11 phi_21 C2 Mc1 Mc2 Rc11 Rc21 mean_phi_ss
pga 0.770968000 6.308282E+00 1.000000E+00 9.814496E-01 -7.784689E-01 7.056087E+01 0.58000 0.47000 0.35000 5 7 16 36 0.46000
0.010 0.770968000 6.308282E+00 1.000000E+00 9.814496E-01 -7.784689E-01 7.056087E+01 0.58000 0.47000 0.35000 5 7 16 36 0.46000
0.050 0.781884504 6.242341E+00 1.000000E+00 9.792917E-01 -7.239180E-01 7.736220E+01 0.55204 0.44903 0.40592 5 7 16 36 0.45301
0.100 0.745908877 5.332961E+00 1.000000E+00 9.742506E-01 -1.092188E+00 4.880096E+01 0.54000 0.44000 0.43000 5 7 16 36 0.45000
0.150 0.744117229 4.545627E+00 1.000000E+00 9.824773E-01 -9.934861E-01 5.436996E+01 0.58095 0.47510 0.40075 5 7 16 36 0.46755
0.200 0.744577747 3.987006E+00 1.000000E+00 9.883142E-01 -9.234564E-01 5.832123E+01 0.61000 0.50000 0.38000 5 7 16 36 0.48000
0.250 0.748103885 3.824292E+00 1.000000E+00 9.902861E-01 -8.590989E-01 6.387936E+01 0.62651 0.50000 0.37450 5 7 16 36 0.48000
0.300 0.755136175 3.691346E+00 1.000000E+00 9.918973E-01 -8.065151E-01 6.842068E+01 0.64000 0.50000 0.37000 5 7 16 36 0.48000
0.400 0.767879693 4.056852E+00 1.000000E+00 9.932212E-01 -8.277473E-01 6.639628E+01 0.61747 0.48874 0.37000 5 7 16 36 0.46874
0.500 0.778052686 3.955542E+00 1.000000E+00 9.943901E-01 -7.686919E-01 7.702964E+01 0.60000 0.48000 0.37000 5 7 16 36 0.46000
0.750 0.796961618 3.771458E+00 1.000000E+00 9.965141E-01 -6.613849E-01 9.635109E+01 0.56490 0.46245 0.38755 5 7 16 36 0.45415
1.000 0.804115657 3.640847E+00 1.000000E+00 9.980211E-01 -5.852493E-01 1.100599E+02 0.54000 0.45000 0.40000 5 7 16 36 0.45000
1.500 0.806238935 3.010737E+00 1.000000E+00 9.987325E-01 -5.862774E-01 1.098648E+02 0.53631 0.43155 0.40000 5 7 16 36 0.43524
2.000 0.809163942 2.563667E+00 1.000000E+00 9.992372E-01 -5.870069E-01 1.097264E+02 0.53369 0.41845 0.40000 5 7 16 36 0.42476
3.000 0.822779154 2.563667E+00 1.000000E+00 9.992372E-01 -5.870069E-01 1.097264E+02 0.53000 0.40000 0.40000 5 7 16 36 0.41000
4.000 0.835713694 2.563667E+00 1.000000E+00 9.992372E-01 -5.870069E-01 1.097264E+02 0.53000 0.40000 0.40000 5 7 16 36 0.41000
5.000 0.847331737 2.563667E+00 1.000000E+00 9.992372E-01 -5.870069E-01 1.097264E+02 0.53000 0.40000 0.40000 5 7 16 36 0.41000
7.500 0.874425160 2.563667E+00 1.000000E+00 9.992372E-01 -5.870069E-01 1.097264E+02 0.53000 0.40000 0.40000 5 7 16 36 0.41000
10.00 0.894172022 2.563667E+00 1.000000E+00 9.992372E-01 -5.870069E-01 1.097264E+02 0.53000 0.40000 0.40000 5 7 16 36 0.41000
""")
COEFFS_FS_ROCK_SWISS06 = CoeffsTable(sa_damping=5, table="""\
IMT k_adj a1 a2 b1 b2 Rm phi_11 phi_21 C2 Mc1 Mc2 Rc11 Rc21 mean_phi_ss
pga 0.907406000 6.308282E+00 1.000000E+00 9.814496E-01 -7.784689E-01 7.056087E+01 0.58000 0.47000 0.35000 5 7 16 36 0.46000
0.010 0.907406000 6.308282E+00 1.000000E+00 9.814496E-01 -7.784689E-01 7.056087E+01 0.58000 0.47000 0.35000 5 7 16 36 0.46000
0.050 1.052062325 6.242341E+00 1.000000E+00 9.792917E-01 -7.239180E-01 7.736220E+01 0.55204 0.44903 0.40592 5 7 16 36 0.45301
0.100 0.903944171 5.332961E+00 1.000000E+00 9.742506E-01 -1.092188E+00 4.880096E+01 0.54000 0.44000 0.43000 5 7 16 36 0.45000
0.150 0.846557682 4.545627E+00 1.000000E+00 9.824773E-01 -9.934861E-01 5.436996E+01 0.58095 0.47510 0.40075 5 7 16 36 0.46755
0.200 0.8152693 3.987006E+00 1.000000E+00 9.883142E-01 -9.234564E-01 5.832123E+01 0.61000 0.50000 0.38000 5 7 16 36 0.48000
0.250 0.797908534 3.824292E+00 1.000000E+00 9.902861E-01 -8.590989E-01 6.387936E+01 0.62651 0.50000 0.37450 5 7 16 36 0.48000
0.300 0.789245393 3.691346E+00 1.000000E+00 9.918973E-01 -8.065151E-01 6.842068E+01 0.64000 0.50000 0.37000 5 7 16 36 0.48000
0.400 0.78042074 4.056852E+00 1.000000E+00 9.932212E-01 -8.277473E-01 6.639628E+01 0.61747 0.48874 0.37000 5 7 16 36 0.46874
0.500 0.777925382 3.955542E+00 1.000000E+00 9.943901E-01 -7.686919E-01 7.702964E+01 0.60000 0.48000 0.37000 5 7 16 36 0.46000
0.750 0.786471408 3.771458E+00 1.000000E+00 9.965141E-01 -6.613849E-01 9.635109E+01 0.56490 0.46245 0.38755 5 7 16 36 0.45415
1.000 0.804234088 3.640847E+00 1.000000E+00 9.980211E-01 -5.852493E-01 1.100599E+02 0.54000 0.45000 0.40000 5 7 16 36 0.45000
1.500 0.839944334 3.010737E+00 1.000000E+00 9.987325E-01 -5.862774E-01 1.098648E+02 0.53631 0.43155 0.40000 5 7 16 36 0.43524
2.000 0.865068228 2.563667E+00 1.000000E+00 9.992372E-01 -5.870069E-01 1.097264E+02 0.53369 0.41845 0.40000 5 7 16 36 0.42476
3.000 0.893179655 2.563667E+00 1.000000E+00 9.992372E-01 -5.870069E-01 1.097264E+02 0.53000 0.40000 0.40000 5 7 16 36 0.41000
4.000 0.904833501 2.563667E+00 1.000000E+00 9.992372E-01 -5.870069E-01 1.097264E+02 0.53000 0.40000 0.40000 5 7 16 36 0.41000
5.000 0.911805616 2.563667E+00 1.000000E+00 9.992372E-01 -5.870069E-01 1.097264E+02 0.53000 0.40000 0.40000 5 7 16 36 0.41000
7.500 0.929535851 2.563667E+00 1.000000E+00 9.992372E-01 -5.870069E-01 1.097264E+02 0.53000 0.40000 0.40000 5 7 16 36 0.41000
10.00 0.942324350 2.563667E+00 1.000000E+00 9.992372E-01 -5.870069E-01 1.097264E+02 0.53000 0.40000 0.40000 5 7 16 36 0.41000
""")
COEFFS_FS_ROCK_SWISS04 = CoeffsTable(sa_damping=5, table="""\
IMT k_adj a1 a2 b1 b2 Rm phi_11 phi_21 C2 Mc1 Mc2 Rc11 Rc21 mean_phi_ss
pga 1.144220000 6.308282E+00 1.000000E+00 9.814496E-01 -7.784689E-01 7.056087E+01 0.58000 0.47000 0.35000 5 7 16 36 0.46000
0.010 1.144220000 6.308282E+00 1.000000E+00 9.814496E-01 -7.784689E-01 7.056087E+01 0.58000 0.47000 0.35000 5 7 16 36 0.46000
0.050 1.582364006 6.242341E+00 1.000000E+00 9.792917E-01 -7.239180E-01 7.736220E+01 0.55204 0.44903 0.40592 5 7 16 36 0.45301
0.100 1.134260083 5.332961E+00 1.000000E+00 9.742506E-01 -1.092188E+00 4.880096E+01 0.54000 0.44000 0.43000 5 7 16 36 0.45000
0.150 0.997131538 4.545627E+00 1.000000E+00 9.824773E-01 -9.934861E-01 5.436996E+01 0.58095 0.47510 0.40075 5 7 16 36 0.46755
0.200 0.931483355 3.987006E+00 1.000000E+00 9.883142E-01 -9.234564E-01 5.832123E+01 0.61000 0.50000 0.38000 5 7 16 36 0.48000
0.250 0.896609692 3.824292E+00 1.000000E+00 9.902861E-01 -8.590989E-01 6.387936E+01 0.62651 0.50000 0.37450 5 7 16 36 0.48000
0.300 0.879037052 3.691346E+00 1.000000E+00 9.918973E-01 -8.065151E-01 6.842068E+01 0.64000 0.50000 0.37000 5 7 16 36 0.48000
0.400 0.861457717 4.056852E+00 1.000000E+00 9.932212E-01 -8.277473E-01 6.639628E+01 0.61747 0.48874 0.37000 5 7 16 36 0.46874
0.500 0.853567498 3.955542E+00 1.000000E+00 9.943901E-01 -7.686919E-01 7.702964E+01 0.60000 0.48000 0.37000 5 7 16 36 0.46000
0.750 0.848145374 3.771458E+00 1.000000E+00 9.965141E-01 -6.613849E-01 9.635109E+01 0.56490 0.46245 0.38755 5 7 16 36 0.45415
1.000 0.842662116 3.640847E+00 1.000000E+00 9.980211E-01 -5.852493E-01 1.100599E+02 0.54000 0.45000 0.40000 5 7 16 36 0.45000
1.500 0.831445701 3.010737E+00 1.000000E+00 9.987325E-01 -5.862774E-01 1.098648E+02 0.53631 0.43155 0.40000 5 7 16 36 0.43524
2.000 0.827607473 2.563667E+00 1.000000E+00 9.992372E-01 -5.870069E-01 1.097264E+02 0.53369 0.41845 0.40000 5 7 16 36 0.42476
3.000 0.835774855 2.563667E+00 1.000000E+00 9.992372E-01 -5.870069E-01 1.097264E+02 0.53000 0.40000 0.40000 5 7 16 36 0.41000
4.000 0.848240349 2.563667E+00 1.000000E+00 9.992372E-01 -5.870069E-01 1.097264E+02 0.53000 0.40000 0.40000 5 7 16 36 0.41000
5.000 0.861360769 2.563667E+00 1.000000E+00 9.992372E-01 -5.870069E-01 1.097264E+02 0.53000 0.40000 0.40000 5 7 16 36 0.41000
7.500 0.892087590 2.563667E+00 1.000000E+00 9.992372E-01 -5.870069E-01 1.097264E+02 0.53000 0.40000 0.40000 5 7 16 36 0.41000
10.00 0.914551086 2.563667E+00 1.000000E+00 9.992372E-01 -5.870069E-01 1.097264E+02 0.53000 0.40000 0.40000 5 7 16 36 0.41000
""")
| agpl-3.0 |
woodshop/complex-chainer | tests/functions_tests/test_concat.py | 5 | 2078 | import unittest
import numpy
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
if cuda.available:
cuda.init()
class ConcatTestBase(object):
def check_forward(self, xs_data, y_data, axis):
xs = tuple(chainer.Variable(x_data) for x_data in xs_data)
y = functions.concat(xs, axis=axis)
gradient_check.assert_allclose(y_data, y.data, atol=0, rtol=0)
self.assertIsInstance(y.data.shape, tuple)
def test_forward_cpu(self):
self.check_forward(self.xs, self.y, axis=self.axis)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(
[cuda.to_gpu(x.copy()) for x in self.xs],
cuda.to_gpu(self.y), axis=self.axis)
def check_backward(self, xs_data, axis):
xs = tuple(chainer.Variable(x_data) for x_data in xs_data)
y = functions.concat(xs, axis=axis)
y.grad = y.data
y.backward()
for x in xs:
gradient_check.assert_allclose(x.data, x.grad, atol=0, rtol=0)
def test_backward_cpu(self):
self.check_backward(self.xs, axis=self.axis)
@attr.gpu
def test_backward_gpu(self):
self.check_backward([cuda.to_gpu(x.copy()) for x in self.xs],
axis=self.axis)
class TestConcat1(unittest.TestCase, ConcatTestBase):
def setUp(self):
self.y = numpy.arange(42, dtype=numpy.float32).reshape(2, 7, 3)
self.xs = [self.y[:, :2], self.y[:, 2:5], self.y[:, 5:]]
self.axis = 1
class TestConcat2(unittest.TestCase, ConcatTestBase):
def setUp(self):
self.y = numpy.arange(21, dtype=numpy.float32).reshape(7, 3)
self.xs = [self.y[:2], self.y[2:5], self.y[5:]]
self.axis = 0
class TestConcatLastAxis(unittest.TestCase, ConcatTestBase):
def setUp(self):
self.y = numpy.arange(2, dtype=numpy.float32)
self.xs = [self.y[:1], self.y[1:]]
self.axis = 0
testing.run_module(__name__, __file__)
| mit |
elioth010/lugama | src/model/orm/Model.py | 1 | 1482 | '''
Created on Jan 8, 2016
@author: elioth010
'''
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm.session import sessionmaker
from sqlalchemy.sql.expression import text
from model.orm.DB import DB
class Model(DB):
'''
classdocs
'''
base = None
SessionFactory = None
session = None
def __init__(self):
'''
Constructor
'''
self.base = declarative_base()
self.SessionFactory = sessionmaker(bind=self.engine)
self.session = self.SessionFactory()
def save(self):
self.session = self.SessionFactory()
try:
self.session.add(self)
self.session.commit()
except:
self.session.rollback()
raise
def where(self, *args):
self.session = self.SessionFactory()
try:
return self.session.query(self).filter_by(args).all()
except:
self.session.rollback()
raise
def find(self, id_table):
self.session = self.SessionFactory()
try:
return self.session.query(self).filter(text('id='+id_table)).all()
except:
self.session.rollback()
raise
def delete(self):
self.session = self.SessionFactory()
try:
self.session.delete(self)
self.session.commit()
except:
self.session.rollback()
raise
| gpl-2.0 |
mushtaqak/edx-platform | lms/envs/devstack.py | 1 | 6327 | """
Specific overrides to the base prod settings to make development easier.
"""
from .aws import * # pylint: disable=wildcard-import, unused-wildcard-import
# Don't use S3 in devstack, fall back to filesystem
del DEFAULT_FILE_STORAGE
MEDIA_ROOT = "/edx/var/edxapp/uploads"
DEBUG = True
USE_I18N = True
TEMPLATE_DEBUG = True
SITE_NAME = 'localhost:8000'
PLATFORM_NAME = ENV_TOKENS.get('PLATFORM_NAME', 'Devstack')
# By default don't use a worker, execute tasks as if they were local functions
CELERY_ALWAYS_EAGER = True
################################ LOGGERS ######################################
import logging
# Disable noisy loggers
for pkg_name in ['track.contexts', 'track.middleware', 'dd.dogapi']:
logging.getLogger(pkg_name).setLevel(logging.CRITICAL)
################################ EMAIL ########################################
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
FEATURES['ENABLE_INSTRUCTOR_EMAIL'] = True # Enable email for all Studio courses
FEATURES['REQUIRE_COURSE_EMAIL_AUTH'] = False # Give all courses email (don't require django-admin perms)
########################## ANALYTICS TESTING ########################
ANALYTICS_SERVER_URL = "http://127.0.0.1:9000/"
ANALYTICS_API_KEY = ""
# Set this to the dashboard URL in order to display the link from the
# dashboard to the Analytics Dashboard.
ANALYTICS_DASHBOARD_URL = None
################################ DEBUG TOOLBAR ################################
INSTALLED_APPS += ('debug_toolbar', 'debug_toolbar_mongo')
MIDDLEWARE_CLASSES += (
'django_comment_client.utils.QueryCountDebugMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
INTERNAL_IPS = ('127.0.0.1',)
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar_mongo.panel.MongoDebugPanel',
'debug_toolbar.panels.profiling.ProfilingPanel',
)
DEBUG_TOOLBAR_CONFIG = {
'SHOW_TOOLBAR_CALLBACK': 'lms.envs.devstack.should_show_debug_toolbar'
}
def should_show_debug_toolbar(_):
return True # We always want the toolbar on devstack regardless of IP, auth, etc.
########################### PIPELINE #################################
PIPELINE_SASS_ARGUMENTS = '--debug-info --require {proj_dir}/static/sass/bourbon/lib/bourbon.rb'.format(proj_dir=PROJECT_ROOT)
########################### VERIFIED CERTIFICATES #################################
FEATURES['AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING'] = True
FEATURES['ENABLE_PAYMENT_FAKE'] = True
CC_PROCESSOR_NAME = 'CyberSource2'
CC_PROCESSOR = {
'CyberSource2': {
"PURCHASE_ENDPOINT": '/shoppingcart/payment_fake/',
"SECRET_KEY": 'abcd123',
"ACCESS_KEY": 'abcd123',
"PROFILE_ID": 'edx',
}
}
########################### External REST APIs #################################
FEATURES['ENABLE_OAUTH2_PROVIDER'] = True
OAUTH_OIDC_ISSUER = 'http://127.0.0.1:8000/oauth2'
FEATURES['ENABLE_MOBILE_REST_API'] = True
FEATURES['ENABLE_VIDEO_ABSTRACTION_LAYER_API'] = True
########################## SECURITY #######################
FEATURES['ENFORCE_PASSWORD_POLICY'] = False
FEATURES['ENABLE_MAX_FAILED_LOGIN_ATTEMPTS'] = False
FEATURES['SQUELCH_PII_IN_LOGS'] = False
FEATURES['PREVENT_CONCURRENT_LOGINS'] = False
FEATURES['ADVANCED_SECURITY'] = False
PASSWORD_MIN_LENGTH = None
PASSWORD_COMPLEXITY = {}
########################### Milestones #################################
FEATURES['MILESTONES_APP'] = True
########################### Entrance Exams #################################
FEATURES['ENTRANCE_EXAMS'] = True
################################ COURSE LICENSES ################################
FEATURES['LICENSING'] = True
########################## Courseware Search #######################
FEATURES['ENABLE_COURSEWARE_SEARCH'] = False
SEARCH_ENGINE = "search.elastic.ElasticSearchEngine"
########################## Dashboard Search #######################
FEATURES['ENABLE_DASHBOARD_SEARCH'] = True
########################## Certificates Web/HTML View #######################
FEATURES['CERTIFICATES_HTML_VIEW'] = True
########################## Course Discovery #######################
from django.utils.translation import ugettext as _
LANGUAGE_MAP = {'terms': {lang: display for lang, display in ALL_LANGUAGES}, 'name': _('Language')}
COURSE_DISCOVERY_MEANINGS = {
'org': {
'name': _('Organization'),
},
'modes': {
'name': _('Course Type'),
'terms': {
'honor': _('Honor'),
'verified': _('Verified'),
},
},
'language': LANGUAGE_MAP,
}
FEATURES['ENABLE_COURSE_DISCOVERY'] = True
FEATURES['COURSES_ARE_BROWSEABLE'] = True
HOMEPAGE_COURSE_MAX = 9
# Software secure fake page feature flag
FEATURES['ENABLE_SOFTWARE_SECURE_FAKE'] = True
# Setting for the testing of Software Secure Result Callback
VERIFY_STUDENT["SOFTWARE_SECURE"] = {
"API_ACCESS_KEY": "BBBBBBBBBBBBBBBBBBBB",
"API_SECRET_KEY": "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC",
}
########################## Shopping cart ##########################
FEATURES['ENABLE_SHOPPING_CART'] = True
FEATURES['STORE_BILLING_INFO'] = True
FEATURES['ENABLE_PAID_COURSE_REGISTRATION'] = True
FEATURES['ENABLE_COSMETIC_DISPLAY_PRICE'] = True
########################## Third Party Auth #######################
if FEATURES.get('ENABLE_THIRD_PARTY_AUTH') and 'third_party_auth.dummy.DummyBackend' not in AUTHENTICATION_BACKENDS:
AUTHENTICATION_BACKENDS = ['third_party_auth.dummy.DummyBackend'] + list(AUTHENTICATION_BACKENDS)
#####################################################################
# See if the developer has any local overrides.
try:
from .private import * # pylint: disable=wildcard-import
except ImportError:
pass
#####################################################################
# Lastly, run any migrations, if needed.
MODULESTORE = convert_module_store_setting_if_needed(MODULESTORE)
SECRET_KEY = '85920908f28904ed733fe576320db18cabd7b6cd'
| agpl-3.0 |
adbar/htmldate | htmldate/validators.py | 1 | 7114 | # pylint:disable-msg=E0611,I1101
"""
Filters for date parsing and date validators.
"""
## This file is available from https://github.com/adbar/htmldate
## under GNU GPL v3 license
# standard
import datetime
import logging
import time
from collections import Counter
from functools import lru_cache
from .settings import MIN_DATE, MIN_YEAR, LATEST_POSSIBLE, MAX_YEAR
LOGGER = logging.getLogger(__name__)
LOGGER.debug('date settings: %s %s %s', MIN_YEAR, LATEST_POSSIBLE, MAX_YEAR)
@lru_cache(maxsize=32)
def date_validator(date_input, outputformat, earliest=MIN_DATE, latest=LATEST_POSSIBLE):
"""Validate a string w.r.t. the chosen outputformat and basic heuristics"""
# try if date can be parsed using chosen outputformat
if not isinstance(date_input, datetime.date):
# speed-up
try:
if outputformat == '%Y-%m-%d':
dateobject = datetime.datetime(int(date_input[:4]),
int(date_input[5:7]),
int(date_input[8:10]))
# default
else:
dateobject = datetime.datetime.strptime(date_input, outputformat)
except ValueError:
return False
else:
dateobject = date_input
# basic year validation
year = int(datetime.date.strftime(dateobject, '%Y'))
if MIN_YEAR <= year <= MAX_YEAR:
# not newer than today or stored variable
try:
if earliest <= dateobject.date() <= latest:
return True
except AttributeError:
if earliest <= dateobject <= latest:
return True
LOGGER.debug('date not valid: %s', date_input)
return False
def output_format_validator(outputformat):
"""Validate the output format in the settings"""
# test in abstracto
if not isinstance(outputformat, str) or not '%' in outputformat:
logging.error('malformed output format: %s', outputformat)
return False
# test with date object
dateobject = datetime.datetime(2017, 9, 1, 0, 0)
try:
dateobject.strftime(outputformat)
except (NameError, TypeError, ValueError) as err:
logging.error('wrong output format or format type: %s %s', outputformat, err)
return False
return True
@lru_cache(maxsize=32)
def plausible_year_filter(htmlstring, pattern, yearpat, tocomplete=False):
"""Filter the date patterns to find plausible years only"""
# slow!
allmatches = pattern.findall(htmlstring)
occurrences = Counter(allmatches)
toremove = set()
# LOGGER.debug('occurrences: %s', occurrences)
for item in occurrences.keys():
# scrap implausible dates
try:
if tocomplete is False:
potential_year = int(yearpat.search(item).group(1))
else:
lastdigits = yearpat.search(item).group(1)
if lastdigits[0] == '9':
potential_year = int('19' + lastdigits)
else:
potential_year = int('20' + lastdigits)
except AttributeError:
LOGGER.debug('not a year pattern: %s', item)
toremove.add(item)
else:
if potential_year < MIN_YEAR or potential_year > MAX_YEAR:
LOGGER.debug('no potential year: %s', item)
toremove.add(item)
# occurrences.remove(item)
# continue
# preventing dictionary changed size during iteration error
for item in toremove:
del occurrences[item]
return occurrences
def compare_values(reference, attempt, outputformat, original_date):
"""Compare the date expression to a reference"""
timestamp = time.mktime(datetime.datetime.strptime(attempt, outputformat).timetuple())
if original_date is True:
if reference == 0 or timestamp < reference:
reference = timestamp
else:
if timestamp > reference:
reference = timestamp
return reference
@lru_cache(maxsize=32)
def filter_ymd_candidate(bestmatch, pattern, original_date, copyear, outputformat, min_date, max_date):
"""Filter free text candidates in the YMD format"""
if bestmatch is not None:
pagedate = '-'.join([bestmatch.group(1), bestmatch.group(2), bestmatch.group(3)])
if date_validator(pagedate, '%Y-%m-%d', earliest=min_date, latest=max_date) is True:
if copyear == 0 or int(bestmatch.group(1)) >= copyear:
LOGGER.debug('date found for pattern "%s": %s', pattern, pagedate)
return convert_date(pagedate, '%Y-%m-%d', outputformat)
## TODO: test and improve
#if original_date is True:
# if copyear == 0 or int(bestmatch.group(1)) <= copyear:
# LOGGER.debug('date found for pattern "%s": %s', pattern, pagedate)
# return convert_date(pagedate, '%Y-%m-%d', outputformat)
#else:
# if copyear == 0 or int(bestmatch.group(1)) >= copyear:
# LOGGER.debug('date found for pattern "%s": %s', pattern, pagedate)
# return convert_date(pagedate, '%Y-%m-%d', outputformat)
return None
def convert_date(datestring, inputformat, outputformat):
"""Parse date and return string in desired format"""
# speed-up (%Y-%m-%d)
if inputformat == outputformat:
return str(datestring)
# date object (speedup)
if isinstance(datestring, datetime.date):
return datestring.strftime(outputformat)
# normal
dateobject = datetime.datetime.strptime(datestring, inputformat)
return dateobject.strftime(outputformat)
def check_extracted_reference(reference, outputformat, min_date, max_date):
'''Test if the extracted reference date can be returned'''
if reference > 0:
dateobject = datetime.datetime.fromtimestamp(reference)
converted = dateobject.strftime(outputformat)
if date_validator(converted, outputformat, earliest=min_date, latest=max_date) is True:
return converted
return None
def get_min_date(min_date):
'''Validates the minimum date and/or defaults to earliest plausible date'''
if min_date is not None:
try:
# internal conversion from Y-M-D format
min_date = datetime.date(int(min_date[:4]),
int(min_date[5:7]),
int(min_date[8:10]))
except ValueError:
min_date = MIN_DATE
else:
min_date = MIN_DATE
return min_date
def get_max_date(max_date):
'''Validates the maximum date and/or defaults to latest plausible date'''
if max_date is not None:
try:
# internal conversion from Y-M-D format
max_date = datetime.date(int(max_date[:4]),
int(max_date[5:7]),
int(max_date[8:10]))
except ValueError:
max_date = LATEST_POSSIBLE
else:
max_date = LATEST_POSSIBLE
return max_date
| gpl-3.0 |
openqt/algorithms | extras/kaprekar_number.py | 1 | 1328 | # coding=utf-8
"""
卡布列克数
http://group.jobbole.com/26887/
有一种数被称为卡布列克数,其形式如:45 * 45 = 2025 并且 20+25=45,这样 45 就是一个
卡布列克数。
它标准定义如下:
若正整数X在N进制下的平方可以分割为二个数字,而这二个数字相加后恰等于X,那么X就是
N进制下的卡布列克数。
分解后的数字必须是正整数才可以,例如:10*10=100 并且 10+0=10,因为0不是正整数,
所以10不是卡布列克数。
现在题目的要求是给定你一个范围[a,b](b大于等于a,a大于等于0),你需要把这个范围内的
卡布列克数全部输出。
样例如下:
输入:2 100
输出:9 45 55 99
"""
from __future__ import print_function
def is_kaprekar(n):
level, sq = 10, n * n
while level < sq:
a, b = divmod(sq, level)
if b > 0 and a + b == n:
return level
level *= 10
return 0
def kaprekar_number(start, stop=None):
while True:
if is_kaprekar(start):
yield start
if stop and start >= stop:
break
start += 1
if __name__ == '__main__':
print(is_kaprekar(45))
print(is_kaprekar(40))
print(is_kaprekar(100))
print([i for i in kaprekar_number(2, 1000)])
| gpl-3.0 |
mravikumar281/staging-server | baseapp/views/education_medium_views.py | 3 | 3772 | from django.views.generic import ListView, DetailView, CreateView, \
DeleteView, UpdateView, \
ArchiveIndexView, DateDetailView, \
DayArchiveView, MonthArchiveView, \
TodayArchiveView, WeekArchiveView, \
YearArchiveView
from baseapp.models import Education_medium
class Education_mediumView(object):
model = Education_medium
def get_template_names(self):
"""Nest templates within education_medium directory."""
tpl = super(Education_mediumView, self).get_template_names()[0]
app = self.model._meta.app_label
mdl = 'education_medium'
self.template_name = tpl.replace(app, '{0}/{1}'.format(app, mdl))
return [self.template_name]
class Education_mediumDateView(Education_mediumView):
date_field = 'created_date'
month_format = '%m'
class Education_mediumBaseListView(Education_mediumView):
paginate_by = 10
class Education_mediumArchiveIndexView(
Education_mediumDateView, Education_mediumBaseListView, ArchiveIndexView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('baseapp_education_medium_list')
class Education_mediumCreateView(Education_mediumView, CreateView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('baseapp_education_medium_list')
class Education_mediumDateDetailView(Education_mediumDateView, DateDetailView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('baseapp_education_medium_list')
class Education_mediumDayArchiveView(
Education_mediumDateView, Education_mediumBaseListView, DayArchiveView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('baseapp_education_medium_list')
class Education_mediumDeleteView(Education_mediumView, DeleteView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('baseapp_education_medium_list')
class Education_mediumDetailView(Education_mediumView, DetailView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('baseapp_education_medium_list')
class Education_mediumListView(Education_mediumBaseListView, ListView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('baseapp_education_medium_list')
class Education_mediumMonthArchiveView(
Education_mediumDateView, Education_mediumBaseListView, MonthArchiveView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('baseapp_education_medium_list')
class Education_mediumTodayArchiveView(
Education_mediumDateView, Education_mediumBaseListView, TodayArchiveView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('baseapp_education_medium_list')
class Education_mediumUpdateView(Education_mediumView, UpdateView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('baseapp_education_medium_list')
class Education_mediumWeekArchiveView(
Education_mediumDateView, Education_mediumBaseListView, WeekArchiveView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('baseapp_education_medium_list')
class Education_mediumYearArchiveView(
Education_mediumDateView, Education_mediumBaseListView, YearArchiveView):
make_object_list = True
| mit |
robbiet480/home-assistant | tests/components/august/test_camera.py | 13 | 1084 | """The camera tests for the august platform."""
from homeassistant.const import STATE_IDLE
from tests.async_mock import patch
from tests.components.august.mocks import (
_create_august_with_devices,
_mock_doorbell_from_fixture,
)
async def test_create_doorbell(hass, aiohttp_client):
"""Test creation of a doorbell."""
doorbell_one = await _mock_doorbell_from_fixture(hass, "get_doorbell.json")
with patch.object(
doorbell_one, "async_get_doorbell_image", create=False, return_value="image"
):
await _create_august_with_devices(hass, [doorbell_one])
camera_k98gidt45gul_name_camera = hass.states.get(
"camera.k98gidt45gul_name_camera"
)
assert camera_k98gidt45gul_name_camera.state == STATE_IDLE
url = hass.states.get("camera.k98gidt45gul_name_camera").attributes[
"entity_picture"
]
client = await aiohttp_client(hass.http.app)
resp = await client.get(url)
assert resp.status == 200
body = await resp.text()
assert body == "image"
| apache-2.0 |
niklaskorz/pyglet | experimental/mt_media/drivers/silent.py | 28 | 4929 | #!/usr/bin/env python
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import threading
import time
import mt_media
import pyglet
_debug = pyglet.options['debug_media']
class SilentAudioPlayer(mt_media.AbstractAudioPlayer):
# When playing video, length of audio (in secs) to buffer ahead.
_buffer_time = 0.4
# Minimum number of bytes to request from source
_min_update_bytes = 1024
def __init__(self, source_group, player):
super(SilentAudioPlayer, self).__init__(source_group, player)
# Reference timestamp
self._timestamp = 0.
# System time of reference timestamp to interpolate from
self._timestamp_time = time.time()
# Last timestamp recorded by worker thread
self._worker_timestamp = 0.
# Queued events (used by worked exclusively except for clear).
self._events = []
# Lock required for changes to timestamp and events variables above.
self._lock = threading.Lock()
# Actual play state.
self._playing = False
# Be nice to avoid creating this thread if user doesn't care about EOS
# events and there's no video format. XXX
self._worker_thread = threading.Thread(target=self._worker_func)
self._worker_thread.setDaemon(True)
self._worker_thread.start()
def delete(self):
# TODO kill thread
pass
def play(self):
if self._playing:
return
self._playing = True
self._timestamp_time = time.time()
def stop(self):
if not self._playing:
return
self._timestamp = self.get_time()
self._playing = False
def seek(self, timestamp):
self._lock.acquire()
self._timestamp = timestamp
self._worker_timestamp = timestamp
self._timestamp_time = time.time()
self._lock.release()
def clear(self):
self._lock.acquire()
self._events = []
self._lock.release()
def get_time(self):
if self._playing:
return self._timestamp + (time.time() - self._timestamp_time)
else:
return self._timestamp
def _worker_func(self):
# Amount of audio data "buffered" (in secs)
buffered_time = 0.
self._lock.acquire()
self._worker_timestamp = 0.0
self._lock.release()
while True:
self._lock.acquire()
# Use up "buffered" audio based on amount of time passed.
timestamp = self.get_time()
buffered_time -= timestamp - self._worker_timestamp
self._worker_timestamp = timestamp
if _debug:
print 'timestamp: %f' % timestamp
# Dispatch events
events = self._events # local var ok within this lock
while events and events[0].timestamp <= timestamp:
events[0]._sync_dispatch_to_player(self.player)
del events[0]
if events:
next_event_timestamp = events[0].timestamp
else:
next_event_timestamp = None
self._lock.release()
# Calculate how much data to request from source
secs = self._buffer_time - buffered_time
bytes = secs * self.source_group.audio_format.bytes_per_second
if _debug:
print 'need to get %d bytes (%f secs)' % (bytes, secs)
# No need to get data, sleep until next event or buffer update
# time instead.
if bytes < self._min_update_bytes:
sleep_time = buffered_time / 2
if next_event_timestamp is not None:
sleep_time = min(sleep_time,
next_event_timestamp - timestamp)
if _debug:
print 'sleeping for %f' % sleep_time
time.sleep(sleep_time)
continue
# Pull audio data from source
audio_data = self.source_group.get_audio_data(int(bytes))
if not audio_data:
mt_media.MediaEvent(timestamp,
'on_source_group_eos')._sync_dispatch_to_player(self.player)
break
# Pretend to buffer audio data, collect events.
buffered_time += audio_data.duration
self._lock.acquire()
self._events.extend(audio_data.events)
self._lock.release()
if _debug:
print 'got %s secs of audio data' % audio_data.duration
print 'now buffered to %f' % buffered_time
print 'events: %r' % events
class SilentAudioDriver(mt_media.AbstractAudioDriver):
def create_audio_player(self, source_group, player):
return SilentAudioPlayer(source_group, player)
def create_audio_driver():
return SilentAudioDriver()
| bsd-3-clause |
google-research/google-research | simulation_research/signal_processing/spherical/spherical_harmonics.py | 1 | 5602 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""A library for computing spherical harmonics.
The spherical harmonics are special functions defined on the surface of a
sphere, which are often used to solve partial differential equations in many
scientific applications. A physical field defined on the surface of a sphere can
be written as a linear superposition of the spherical harmonics as the latter
form a complete set of orthogonal basis functions. The set of spherical
harmonics denoted `Y_l^m(θ, φ)` is often called Laplace's spherical
harmonics of degree `l` and order `m` and `θ` and `φ` are colatitude and
longitude, respectively. In addition, the spherical harmonics can be expressed
as `Y_l^m(θ, φ) = P_l^m(θ) \exp(i m φ)`, in which
`P_l^m(θ)` is the associated Legendre function with embedded normalization
constant \sqrt(1 / (4 𝛑)). We refer to the function f(θ, φ) with finite induced
norm as the signal on the sphere, where the colatitude θ ∈ [0, π] and longitude
φ ∈ [0, 2π). The signal on the sphere can be written as a linear superpostiion
of the spherical harmoincs, which form a complete set of orthonormal basis
functions for degree l ≥ 0 and order |m| ≤ l. In this library, θ and φ can be
non-uniformly sampled.
"""
import jax.numpy as jnp
import numpy as np
from simulation_research.signal_processing.spherical import associated_legendre_function
class SphericalHarmonics(object):
"""Computes the spherical harmonics on TPUs."""
def __init__(self,
l_max,
theta,
phi):
"""Constructor.
Args:
l_max: The maximum degree of the associated Legendre function. The degrees
are `[0, 1, 2, ..., l_max]`. The orders `m` are `[-l_max, -l_max+1,
0, 1, ..., l_max]`.
theta: A vector containing the sampling points along the colatitude
dimension. The associated Legendre functions are computed at
`cos(θ)`.
phi: A vector containing the sampling points along the longitude, at which
the Vandermonde matrix is computed.
"""
self.l_max = l_max
self.theta = theta
self._cos_theta = jnp.cos(theta)
self.phi = phi
self._legendre = associated_legendre_function.gen_normalized_legendre(
self.l_max, self._cos_theta)
self._vandermonde = self._gen_vandermonde_mat(self.l_max, self.phi)
def _gen_vandermonde_mat(self, l_max, phi):
"""Generates the Vandermonde matrix exp(i m φ).
The Vandermonde matrix has the first dimension along the degrees of the
spherical harmonics and the second dimension along the longitude.
Args:
l_max: See `init`.
phi: See `init`.
Returns:
A complex matrix.
"""
nonnegative_degrees = jnp.arange(l_max+1)
mat_dim0, mat_dim1 = jnp.meshgrid(nonnegative_degrees, phi, indexing='ij')
num_phi = phi.shape[0]
def vandermonde_fn(mat_dim0, mat_dim1, num_pts):
coeff = 1j / num_pts
return jnp.exp(coeff * jnp.multiply(mat_dim0, mat_dim1))
return vandermonde_fn(mat_dim0, mat_dim1, num_phi)
def harmonics_nonnegative_order(self):
"""Computes the spherical harmonics of nonnegative orders.
Returns:
A 4D complex tensor of shape `(l_max + 1, l_max + 1, num_theta, num_phi)`,
where the dimensions are in the sequence of degree, order, colatitude, and
longitude.
"""
return jnp.einsum('ijk,jl->ijkl', self._legendre, self._vandermonde)
def _gen_mask(self):
"""Generates the mask of (-1)^m, m = [0, 1, ..., l_max]."""
mask = np.empty((self.l_max + 1,))
mask[::2] = 1
mask[1::2] = -1
return jnp.asarray((mask))
def harmonics_nonpositive_order(
self, harmonics_nonnegative_order = None):
"""Computes the spherical harmonics of nonpositive orders.
With normalization, the nonnegative order Associated Legendre functions are
`P_l^{-m}(x) = (−1)^m P_l^m(x)`, which implies that
`Y_l^{-m}(θ, φ) = (−1)^m conjugate(Y_l^m(θ, φ))`.
Args:
harmonics_nonnegative_order: A 4D complex tensor representing the
harmonics of nonnegative orders, the shape of which is
`(l_max + 1, l_max + 1, num_theta, num_phi)` andd the dimensions are in
the sequence of degree, order, colatitude, and longitude.
Returns:
A 4D complex tensor of the same shape as `harmonics_nonnegative_order`
representing the harmonics of nonpositive orders.
"""
if harmonics_nonnegative_order is None:
harmonics_nonnegative_order = self.harmonics_nonnegative_order()
mask = self._gen_mask()
return jnp.einsum(
'j,ijkl->ijkl', mask, jnp.conjugate(harmonics_nonnegative_order))
@property
def associated_legendre_fn(self):
"""Associated Legendre function values.
Returns:
A 3D tensor of shape `(l_max + 1, l_max + 1, num_theta)` containing the
values of the associated Legendre functions, the dimensions of which is in
the sequence of degree, order, and colatitude.
"""
return self._legendre
| apache-2.0 |
supermurat/hamsi-manager | Bars/ToolsBar.py | 1 | 8838 | # This file is part of HamsiManager.
#
# Copyright (c) 2010 - 2015 Murat Demir <mopened@gmail.com>
#
# Hamsi Manager is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Hamsi Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HamsiManager; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from Core import Universals as uni
from Core.MyObjects import *
from Core import ReportBug
import Bars
class ToolsBar(MToolBar):
def __init__(self, _parent):
MToolBar.__init__(self, _parent)
_parent.addToolBar(Mt.TopToolBarArea, self)
self.setWindowTitle(translate("ToolsBar", "Tools"))
self.setObjectName("Tools")
self.clearEmptyDirectories = MAction(MIcon("Images:clearEmptyDirectories.png"),
translate("ToolsBar", "Clear Empty Directories"), self)
self.clearEmptyDirectories.setObjectName("Clear Empty Directories")
self.clearEmptyDirectories.setToolTip(
translate("ToolsBar", "Clears the folder contents based on the criteria set."))
if uni.isActiveDirectoryCover:
self.actCheckIcon = MAction(MIcon("Images:checkIcon.png"),
translate("ToolsBar", "Check Icon"), self)
self.actCheckIcon.setObjectName("Check Icon")
self.actCheckIcon.setToolTip(translate("ToolsBar", "Checks the icon for the folder you are currently in."))
self.actHash = MAction(MIcon("Images:hash.png"),
translate("ToolsBar", "Hash"), self)
self.actHash.setObjectName("Hash")
self.actHash.setToolTip(translate("ToolsBar", "Hash manager"))
self.actPack = MAction(MIcon("Images:pack.png"),
translate("ToolsBar", "Pack"), self)
self.actPack.setObjectName("Pack")
self.actPack.setToolTip(translate("ToolsBar", "Packs the current folder."))
self.actFileTree = MAction(MIcon("Images:fileTree.png"),
translate("ToolsBar", "File Tree"), self)
self.actFileTree.setObjectName("File Tree")
self.actFileTree.setToolTip(translate("ToolsBar", "Get file tree of current folder."))
self.actClear = MAction(MIcon("Images:clear.png"),
translate("ToolsBar", "Clear"), self)
self.actClear.setObjectName("Clear")
self.actClear.setToolTip(translate("ToolsBar", "Clears the current folder."))
self.actTextCorrector = MAction(MIcon("Images:textCorrector.png"),
translate("ToolsBar", "Text Corrector"), self)
self.actTextCorrector.setObjectName("Text Corrector")
self.actTextCorrector.setToolTip(translate("ToolsBar", "Corrects text files."))
self.actRemoveOnlySubFiles = MAction(MIcon("Images:removeOnlySubFiles.png"),
translate("ToolsBar", "Remove Sub Files"), self)
self.actRemoveOnlySubFiles.setObjectName("Remove Sub Files")
self.actRemoveOnlySubFiles.setToolTip(
translate("ToolsBar", "Remove only all sub files.Do not will remove directory and subfolders."))
self.actSearch = MAction(MIcon("Images:search.png"),
translate("ToolsBar", "Search"), self)
self.actSearch.setObjectName("Search")
self.actSearch.setToolTip(translate("ToolsBar", "Special search tool"))
self.actScriptManager = MAction(MIcon("Images:scriptManager.png"),
translate("ToolsBar", "Script Manager"), self)
self.actScriptManager.setObjectName("Script Manager")
self.actScriptManager.setToolTip(translate("ToolsBar", "You can do what you want."))
if uni.getBoolValue("isSaveActions"):
self.actLastActions = MAction(MIcon("Images:lastActions.png"),
translate("ToolsBar", "Show Last Actions"), self)
self.actLastActions.setObjectName("Show Last Actions")
self.actLastActions.setToolTip(translate("ToolsBar", "You can see last actions."))
if uni.isActiveAmarok and uni.getBoolValue("amarokIsUseHost") is False:
self.actAmarokEmbeddedDBConfigurator = MAction(MIcon("Images:amarokEmbeddedDBConfigurator.png"),
translate("ToolsBar",
"Amarok Embedded Database Configurator"), self)
self.actAmarokEmbeddedDBConfigurator.setObjectName("Amarok Embedded Database Configurator")
self.actAmarokEmbeddedDBConfigurator.setToolTip(translate("ToolsBar", "Packs the current folder."))
self.addAction(self.actHash)
self.addAction(self.actPack)
self.addAction(self.actFileTree)
self.addAction(self.actClear)
self.addAction(self.actTextCorrector)
self.addAction(self.actSearch)
self.addAction(self.actScriptManager)
if uni.getBoolValue("isSaveActions"):
self.addAction(self.actLastActions)
if uni.isActiveAmarok and uni.getBoolValue("amarokIsUseHost") is False:
self.addAction(self.actAmarokEmbeddedDBConfigurator)
self.addSeparator()
self.addAction(self.clearEmptyDirectories)
self.addAction(self.actRemoveOnlySubFiles)
if uni.isActiveDirectoryCover:
self.addAction(self.actCheckIcon)
self.setIconSize(MSize(16, 16))
getMainWindow().Menu.mTools = MMenu(translate("MenuBar", "Tools"), self)
getMainWindow().Menu.mTools.setObjectName("Tools")
getMainWindow().Menu.mTools.addAction(Bars.getCopyOfMAction(self.actHash))
getMainWindow().Menu.mTools.addAction(Bars.getCopyOfMAction(self.actPack))
getMainWindow().Menu.mTools.addAction(Bars.getCopyOfMAction(self.actFileTree))
getMainWindow().Menu.mTools.addAction(Bars.getCopyOfMAction(self.actClear))
getMainWindow().Menu.mTools.addAction(Bars.getCopyOfMAction(self.actTextCorrector))
getMainWindow().Menu.mTools.addAction(Bars.getCopyOfMAction(self.actSearch))
getMainWindow().Menu.mTools.addAction(Bars.getCopyOfMAction(self.actScriptManager))
if uni.getBoolValue("isSaveActions"):
getMainWindow().Menu.mTools.addAction(Bars.getCopyOfMAction(self.actLastActions))
if uni.isActiveAmarok and uni.getBoolValue("amarokIsUseHost") is False:
getMainWindow().Menu.mTools.addAction(Bars.getCopyOfMAction(self.actAmarokEmbeddedDBConfigurator))
getMainWindow().Menu.mTools.addSeparator()
getMainWindow().Menu.mTools.addAction(Bars.getCopyOfMAction(self.clearEmptyDirectories))
getMainWindow().Menu.mTools.addAction(Bars.getCopyOfMAction(self.actRemoveOnlySubFiles))
if uni.isActiveDirectoryCover:
getMainWindow().Menu.mTools.addAction(Bars.getCopyOfMAction(self.actCheckIcon))
getMainWindow().Menu.insertMenu(getMainWindow().Menu.mSettings.menuAction(), getMainWindow().Menu.mTools)
self.createScriptsMenu(_parent)
MObject.connect(self, SIGNAL("actionTriggered(QAction *)"), Bars.clickedAnAction)
def createScriptsMenu(self, _parent):
getMainWindow().Menu.mScripts = MMenu(translate("MenuBar", "Scripts"), self)
getMainWindow().Menu.mScripts.setObjectName("Scripts")
from Core import Scripts
_parent.scriptList = Scripts.getScriptList()
for scriptName in _parent.scriptList:
actScript = MAction(str(scriptName), getMainWindow().Menu.mScripts)
actScript.setObjectName(str(scriptName))
actScript.setToolTip(str(str(translate("ToolsBar", "Execute \"%s\" Named Script")) % scriptName))
getMainWindow().Menu.mScripts.addAction(actScript)
actScriptManager = MAction(MIcon("Images:scriptManager.png"),
translate("ToolsBar", "Script Manager"), self)
actScriptManager.setObjectName("Script Manager")
actScriptManager.setToolTip(translate("ToolsBar", "You can do what you want."))
getMainWindow().Menu.mScripts.addAction(actScriptManager)
getMainWindow().Menu.insertMenu(getMainWindow().Menu.mSettings.menuAction(), getMainWindow().Menu.mScripts)
| gpl-3.0 |
pwendell/mesos | third_party/zookeeper-3.3.1/src/contrib/zkpython/src/test/clientid_test.py | 164 | 1739 | #!/usr/bin/python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest, threading
import zookeeper, zktestbase
class ClientidTest(zktestbase.TestBase):
"""Test whether clientids work"""
def setUp(self):
pass
def testclientid(self):
cv = threading.Condition()
self.connected = False
def connection_watcher(handle, type, state, path):
cv.acquire()
self.connected = True
cv.notify()
cv.release()
cv.acquire()
self.handle = zookeeper.init(self.host, connection_watcher,10000,(123456,"mypassword"))
self.assertEqual(self.handle, zookeeper.OK)
cv.wait(15.0)
cv.release()
self.assertEqual(self.connected, True, "Connection timed out to " + self.host)
(cid,passwd) = zookeeper.client_id(self.handle)
self.assertEqual(cid,123456)
self.assertEqual(passwd,"mypassword")
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
osall2001/slimit | src/slimit/lexer.py | 7 | 15047 | ###############################################################################
#
# Copyright (c) 2011 Ruslan Spivak
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
__author__ = 'Ruslan Spivak <ruslan.spivak@gmail.com>'
import ply.lex
from slimit.unicode_chars import (
LETTER,
DIGIT,
COMBINING_MARK,
CONNECTOR_PUNCTUATION,
)
# See "Regular Expression Literals" at
# http://www.mozilla.org/js/language/js20-2002-04/rationale/syntax.html
TOKENS_THAT_IMPLY_DIVISON = frozenset([
'ID',
'NUMBER',
'STRING',
'REGEX',
'TRUE',
'FALSE',
'NULL',
'THIS',
'PLUSPLUS',
'MINUSMINUS',
'RPAREN',
'RBRACE',
'RBRACKET',
])
class Lexer(object):
"""A JavaScript lexer.
>>> from slimit.lexer import Lexer
>>> lexer = Lexer()
Lexer supports iteration:
>>> lexer.input('a = 1;')
>>> for token in lexer:
... print token
...
LexToken(ID,'a',1,0)
LexToken(EQ,'=',1,2)
LexToken(NUMBER,'1',1,4)
LexToken(SEMI,';',1,5)
Or call one token at a time with 'token' method:
>>> lexer.input('a = 1;')
>>> while True:
... token = lexer.token()
... if not token:
... break
... print token
...
LexToken(ID,'a',1,0)
LexToken(EQ,'=',1,2)
LexToken(NUMBER,'1',1,4)
LexToken(SEMI,';',1,5)
>>> lexer.input('a = 1;')
>>> token = lexer.token()
>>> token.type, token.value, token.lineno, token.lexpos
('ID', 'a', 1, 0)
For more information see:
http://www.ecma-international.org/publications/files/ECMA-ST/ECMA-262.pdf
"""
def __init__(self):
self.prev_token = None
self.cur_token = None
self.next_tokens = []
self.build()
def build(self, **kwargs):
"""Build the lexer."""
self.lexer = ply.lex.lex(object=self, **kwargs)
def input(self, text):
self.lexer.input(text)
def token(self):
if self.next_tokens:
return self.next_tokens.pop()
lexer = self.lexer
while True:
pos = lexer.lexpos
try:
char = lexer.lexdata[pos]
while char in ' \t':
pos += 1
char = lexer.lexdata[pos]
next_char = lexer.lexdata[pos + 1]
except IndexError:
tok = self._get_update_token()
if tok is not None and tok.type == 'LINE_TERMINATOR':
continue
else:
return tok
if char != '/' or (char == '/' and next_char in ('/', '*')):
tok = self._get_update_token()
if tok.type in ('LINE_TERMINATOR',
'LINE_COMMENT', 'BLOCK_COMMENT'):
continue
else:
return tok
# current character is '/' which is either division or regex
cur_token = self.cur_token
is_division_allowed = (
cur_token is not None and
cur_token.type in TOKENS_THAT_IMPLY_DIVISON
)
if is_division_allowed:
return self._get_update_token()
else:
self.prev_token = self.cur_token
self.cur_token = self._read_regex()
return self.cur_token
def auto_semi(self, token):
if (token is None or token.type == 'RBRACE'
or self._is_prev_token_lt()
):
if token:
self.next_tokens.append(token)
return self._create_semi_token(token)
def _is_prev_token_lt(self):
return self.prev_token and self.prev_token.type == 'LINE_TERMINATOR'
def _read_regex(self):
self.lexer.begin('regex')
token = self.lexer.token()
self.lexer.begin('INITIAL')
return token
def _get_update_token(self):
self.prev_token = self.cur_token
self.cur_token = self.lexer.token()
# insert semicolon before restricted tokens
# See section 7.9.1 ECMA262
if (self.cur_token is not None
and self.cur_token.type == 'LINE_TERMINATOR'
and self.prev_token is not None
and self.prev_token.type in ['BREAK', 'CONTINUE',
'RETURN', 'THROW']
):
return self._create_semi_token(self.cur_token)
return self.cur_token
def _create_semi_token(self, orig_token):
token = ply.lex.LexToken()
token.type = 'SEMI'
token.value = ';'
if orig_token is not None:
token.lineno = orig_token.lineno
token.lexpos = orig_token.lexpos
else:
token.lineno = 0
token.lexpos = 0
return token
# iterator protocol
def __iter__(self):
return self
def next(self):
token = self.token()
if not token:
raise StopIteration
return token
states = (
('regex', 'exclusive'),
)
keywords = (
'BREAK', 'CASE', 'CATCH', 'CONTINUE', 'DEBUGGER', 'DEFAULT', 'DELETE',
'DO', 'ELSE', 'FINALLY', 'FOR', 'FUNCTION', 'IF', 'IN',
'INSTANCEOF', 'NEW', 'RETURN', 'SWITCH', 'THIS', 'THROW', 'TRY',
'TYPEOF', 'VAR', 'VOID', 'WHILE', 'WITH', 'NULL', 'TRUE', 'FALSE',
# future reserved words - well, it's uncommented now to make
# IE8 happy because it chokes up on minification:
# obj["class"] -> obj.class
'CLASS', 'CONST', 'ENUM', 'EXPORT', 'EXTENDS', 'IMPORT', 'SUPER',
)
keywords_dict = dict((key.lower(), key) for key in keywords)
tokens = (
# Punctuators
'PERIOD', 'COMMA', 'SEMI', 'COLON', # . , ; :
'PLUS', 'MINUS', 'MULT', 'DIV', 'MOD', # + - * / %
'BAND', 'BOR', 'BXOR', 'BNOT', # & | ^ ~
'CONDOP', # conditional operator ?
'NOT', # !
'LPAREN', 'RPAREN', # ( and )
'LBRACE', 'RBRACE', # { and }
'LBRACKET', 'RBRACKET', # [ and ]
'EQ', 'EQEQ', 'NE', # = == !=
'STREQ', 'STRNEQ', # === and !==
'LT', 'GT', # < and >
'LE', 'GE', # <= and >=
'OR', 'AND', # || and &&
'PLUSPLUS', 'MINUSMINUS', # ++ and --
'LSHIFT', # <<
'RSHIFT', 'URSHIFT', # >> and >>>
'PLUSEQUAL', 'MINUSEQUAL', # += and -=
'MULTEQUAL', 'DIVEQUAL', # *= and /=
'LSHIFTEQUAL', # <<=
'RSHIFTEQUAL', 'URSHIFTEQUAL', # >>= and >>>=
'ANDEQUAL', 'MODEQUAL', # &= and %=
'XOREQUAL', 'OREQUAL', # ^= and |=
# Terminal types
'NUMBER', 'STRING', 'ID', 'REGEX',
# Properties
'GETPROP', 'SETPROP',
# Comments
'LINE_COMMENT', 'BLOCK_COMMENT',
'LINE_TERMINATOR',
) + keywords
# adapted from https://bitbucket.org/ned/jslex
t_regex_REGEX = r"""(?:
/ # opening slash
# First character is..
(?: [^*\\/[] # anything but * \ / or [
| \\. # or an escape sequence
| \[ # or a class, which has
(?: [^\]\\] # anything but \ or ]
| \\. # or an escape sequence
)* # many times
\]
)
# Following characters are same, except for excluding a star
(?: [^\\/[] # anything but \ / or [
| \\. # or an escape sequence
| \[ # or a class, which has
(?: [^\]\\] # anything but \ or ]
| \\. # or an escape sequence
)* # many times
\]
)* # many times
/ # closing slash
[a-zA-Z0-9]* # trailing flags
)
"""
t_regex_ignore = ' \t'
def t_regex_error(self, token):
raise TypeError(
"Error parsing regular expression '%s' at %s" % (
token.value, token.lineno)
)
# Punctuators
t_PERIOD = r'\.'
t_COMMA = r','
t_SEMI = r';'
t_COLON = r':'
t_PLUS = r'\+'
t_MINUS = r'-'
t_MULT = r'\*'
t_DIV = r'/'
t_MOD = r'%'
t_BAND = r'&'
t_BOR = r'\|'
t_BXOR = r'\^'
t_BNOT = r'~'
t_CONDOP = r'\?'
t_NOT = r'!'
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_LBRACE = r'{'
t_RBRACE = r'}'
t_LBRACKET = r'\['
t_RBRACKET = r'\]'
t_EQ = r'='
t_EQEQ = r'=='
t_NE = r'!='
t_STREQ = r'==='
t_STRNEQ = r'!=='
t_LT = r'<'
t_GT = r'>'
t_LE = r'<='
t_GE = r'>='
t_OR = r'\|\|'
t_AND = r'&&'
t_PLUSPLUS = r'\+\+'
t_MINUSMINUS = r'--'
t_LSHIFT = r'<<'
t_RSHIFT = r'>>'
t_URSHIFT = r'>>>'
t_PLUSEQUAL = r'\+='
t_MINUSEQUAL = r'-='
t_MULTEQUAL = r'\*='
t_DIVEQUAL = r'/='
t_LSHIFTEQUAL = r'<<='
t_RSHIFTEQUAL = r'>>='
t_URSHIFTEQUAL = r'>>>='
t_ANDEQUAL = r'&='
t_MODEQUAL = r'%='
t_XOREQUAL = r'\^='
t_OREQUAL = r'\|='
t_LINE_COMMENT = r'//[^\r\n]*'
t_BLOCK_COMMENT = r'/\*[^*]*\*+([^/*][^*]*\*+)*/'
t_LINE_TERMINATOR = r'[\n\r]+'
t_ignore = ' \t'
t_NUMBER = r"""
(?:
0[xX][0-9a-fA-F]+ # hex_integer_literal
| 0[0-7]+ # or octal_integer_literal (spec B.1.1)
| (?: # or decimal_literal
(?:0|[1-9][0-9]*) # decimal_integer_literal
\. # dot
[0-9]* # decimal_digits_opt
(?:[eE][+-]?[0-9]+)? # exponent_part_opt
|
\. # dot
[0-9]+ # decimal_digits
(?:[eE][+-]?[0-9]+)? # exponent_part_opt
|
(?:0|[1-9][0-9]*) # decimal_integer_literal
(?:[eE][+-]?[0-9]+)? # exponent_part_opt
)
)
"""
string = r"""
(?:
# double quoted string
(?:" # opening double quote
(?: [^"\\\n\r] # no \, line terminators or "
| \\[a-zA-Z!-\/:-@\[-`{-~] # or escaped characters
| \\x[0-9a-fA-F]{2} # or hex_escape_sequence
| \\u[0-9a-fA-F]{4} # or unicode_escape_sequence
)*? # zero or many times
(?: \\\n # multiline ?
(?:
[^"\\\n\r] # no \, line terminators or "
| \\[a-zA-Z!-\/:-@\[-`{-~] # or escaped characters
| \\x[0-9a-fA-F]{2} # or hex_escape_sequence
| \\u[0-9a-fA-F]{4} # or unicode_escape_sequence
)*? # zero or many times
)*
") # closing double quote
|
# single quoted string
(?:' # opening single quote
(?: [^'\\\n\r] # no \, line terminators or '
| \\[a-zA-Z!-\/:-@\[-`{-~] # or escaped characters
| \\x[0-9a-fA-F]{2} # or hex_escape_sequence
| \\u[0-9a-fA-F]{4} # or unicode_escape_sequence
)*? # zero or many times
(?: \\\n # multiline ?
(?:
[^'\\\n\r] # no \, line terminators or '
| \\[a-zA-Z!-\/:-@\[-`{-~] # or escaped characters
| \\x[0-9a-fA-F]{2} # or hex_escape_sequence
| \\u[0-9a-fA-F]{4} # or unicode_escape_sequence
)*? # zero or many times
)*
') # closing single quote
)
""" # "
@ply.lex.TOKEN(string)
def t_STRING(self, token):
# remove escape + new line sequence used for strings
# written across multiple lines of code
token.value = token.value.replace('\\\n', '')
return token
# XXX: <ZWNJ> <ZWJ> ?
identifier_start = r'(?:' + r'[a-zA-Z_$]' + r'|' + LETTER + r')+'
identifier_part = (
r'(?:' + COMBINING_MARK + r'|' + r'[0-9a-zA-Z_$]' + r'|' + DIGIT +
r'|' + CONNECTOR_PUNCTUATION + r')*'
)
identifier = identifier_start + identifier_part
getprop = r'get' + r'(?=\s' + identifier + r')'
@ply.lex.TOKEN(getprop)
def t_GETPROP(self, token):
return token
setprop = r'set' + r'(?=\s' + identifier + r')'
@ply.lex.TOKEN(setprop)
def t_SETPROP(self, token):
return token
@ply.lex.TOKEN(identifier)
def t_ID(self, token):
token.type = self.keywords_dict.get(token.value, 'ID')
return token
def t_error(self, token):
print 'Illegal character %r at %s:%s after %s' % (
token.value[0], token.lineno, token.lexpos, self.prev_token)
token.lexer.skip(1)
| mit |
slohse/ansible | lib/ansible/modules/system/iptables.py | 13 | 23333 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2015, Linus Unnebäck <linus@folkdatorn.se>
# Copyright: (c) 2017, Sébastien DA ROCHA <sebastien@da-rocha.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: iptables
short_description: Modify iptables rules
version_added: "2.0"
author:
- Linus Unnebäck (@LinusU) <linus@folkdatorn.se>
- Sébastien DA ROCHA (@sebastiendarocha)
description:
- C(iptables) is used to set up, maintain, and inspect the tables of IP packet
filter rules in the Linux kernel.
- This module does not handle the saving and/or loading of rules, but rather
only manipulates the current rules that are present in memory. This is the
same as the behaviour of the C(iptables) and C(ip6tables) command which
this module uses internally.
notes:
- This module just deals with individual rules. If you need advanced
chaining of rules the recommended way is to template the iptables restore
file.
options:
table:
description:
- This option specifies the packet matching table which the command
should operate on. If the kernel is configured with automatic module
loading, an attempt will be made to load the appropriate module for
that table if it is not already there.
choices: [ filter, nat, mangle, raw, security ]
default: filter
state:
description:
- Whether the rule should be absent or present.
choices: [ absent, present ]
default: present
action:
description:
- Whether the rule should be appended at the bottom or inserted at the top.
- If the rule already exists the chain won't be modified.
choices: [ append, insert ]
default: append
version_added: "2.2"
rule_num:
description:
- Insert the rule as the given rule number. This works only with
action = 'insert'.
version_added: "2.5"
ip_version:
description:
- Which version of the IP protocol this rule should apply to.
choices: [ ipv4, ipv6 ]
default: ipv4
chain:
description:
- "Specify the iptables chain to modify. This could be a user-defined chain or one of the standard iptables chains:"
- C(INPUT)
- C(FORWARD)
- C(OUTPUT)
- C(PREROUTING)
- C(POSTROUTING)
- C(SECMARK)
- C(CONNSECMARK)
protocol:
description:
- The protocol of the rule or of the packet to check.
- The specified protocol can be one of tcp, udp, udplite, icmp, esp,
ah, sctp or the special keyword "all", or it can be a numeric value,
representing one of these protocols or a different one. A protocol
name from /etc/protocols is also allowed. A "!" argument before the
protocol inverts the test. The number zero is equivalent to all.
"all" will match with all protocols and is taken as default when this
option is omitted.
source:
description:
- Source specification.
- Address can be either a network name, a hostname, a network IP address
(with /mask), or a plain IP address.
- Hostnames will be resolved once only, before the rule is submitted to
the kernel. Please note that specifying any name to be resolved with
a remote query such as DNS is a really bad idea.
- The mask can be either a network mask or a plain number, specifying
the number of 1's at the left side of the network mask. Thus, a mask
of 24 is equivalent to 255.255.255.0. A "!" argument before the
address specification inverts the sense of the address.
destination:
description:
- Destination specification.
- Address can be either a network name, a hostname, a network IP address
(with /mask), or a plain IP address.
- Hostnames will be resolved once only, before the rule is submitted to
the kernel. Please note that specifying any name to be resolved with
a remote query such as DNS is a really bad idea.
- The mask can be either a network mask or a plain number, specifying
the number of 1's at the left side of the network mask. Thus, a mask
of 24 is equivalent to 255.255.255.0. A "!" argument before the
address specification inverts the sense of the address.
tcp_flags:
description:
- TCP flags specification.
- C(tcp_flags) expects a dict with the two keys C(flags) and C(flags_set).
default: {}
version_added: "2.4"
suboptions:
flags:
description:
- List of flags you want to examine.
flags_set:
description:
- Flags to be set.
match:
description:
- Specifies a match to use, that is, an extension module that tests for
a specific property. The set of matches make up the condition under
which a target is invoked. Matches are evaluated first to last if
specified as an array and work in short-circuit fashion, i.e. if one
extension yields false, evaluation will stop.
default: []
jump:
description:
- This specifies the target of the rule; i.e., what to do if the packet
matches it. The target can be a user-defined chain (other than the one
this rule is in), one of the special builtin targets which decide the
fate of the packet immediately, or an extension (see EXTENSIONS
below). If this option is omitted in a rule (and the goto parameter
is not used), then matching the rule will have no effect on the
packet's fate, but the counters on the rule will be incremented.
log_prefix:
description:
- Specifies a log text for the rule. Only make sense with a LOG jump.
version_added: "2.5"
goto:
description:
- This specifies that the processing should continue in a user specified
chain. Unlike the jump argument return will not continue processing in
this chain but instead in the chain that called us via jump.
in_interface:
description:
- Name of an interface via which a packet was received (only for packets
entering the INPUT, FORWARD and PREROUTING chains). When the "!"
argument is used before the interface name, the sense is inverted. If
the interface name ends in a "+", then any interface which begins with
this name will match. If this option is omitted, any interface name
will match.
out_interface:
description:
- Name of an interface via which a packet is going to be sent (for
packets entering the FORWARD, OUTPUT and POSTROUTING chains). When the
"!" argument is used before the interface name, the sense is inverted.
If the interface name ends in a "+", then any interface which begins
with this name will match. If this option is omitted, any interface
name will match.
fragment:
description:
- This means that the rule only refers to second and further fragments
of fragmented packets. Since there is no way to tell the source or
destination ports of such a packet (or ICMP type), such a packet will
not match any rules which specify them. When the "!" argument precedes
fragment argument, the rule will only match head fragments, or
unfragmented packets.
set_counters:
description:
- This enables the administrator to initialize the packet and byte
counters of a rule (during INSERT, APPEND, REPLACE operations).
source_port:
description:
- Source port or port range specification. This can either be a service
name or a port number. An inclusive range can also be specified, using
the format first:last. If the first port is omitted, '0' is assumed;
if the last is omitted, '65535' is assumed. If the first port is
greater than the second one they will be swapped.
destination_port:
description:
- "Destination port or port range specification. This can either be
a service name or a port number. An inclusive range can also be
specified, using the format first:last. If the first port is omitted,
'0' is assumed; if the last is omitted, '65535' is assumed. If the
first port is greater than the second one they will be swapped.
This is only valid if the rule also specifies one of the following
protocols: tcp, udp, dccp or sctp."
to_ports:
description:
- "This specifies a destination port or range of ports to use: without
this, the destination port is never altered. This is only valid if the
rule also specifies one of the following protocols: tcp, udp, dccp or
sctp."
to_destination:
description:
- This specifies a destination address to use with DNAT.
- Without this, the destination address is never altered.
version_added: "2.1"
to_source:
description:
- This specifies a source address to use with SNAT.
- Without this, the source address is never altered.
version_added: "2.2"
syn:
description:
- This allows matching packets that have the SYN bit set and the ACK
and RST bits unset.
- When negated, this matches all packets with the RST or the ACK bits set.
choices: [ ignore, match, negate ]
default: ignore
version_added: "2.5"
set_dscp_mark:
description:
- This allows specifying a DSCP mark to be added to packets.
It takes either an integer or hex value.
- Mutually exclusive with C(set_dscp_mark_class).
version_added: "2.1"
set_dscp_mark_class:
description:
- This allows specifying a predefined DiffServ class which will be
translated to the corresponding DSCP mark.
- Mutually exclusive with C(set_dscp_mark).
version_added: "2.1"
comment:
description:
- This specifies a comment that will be added to the rule.
ctstate:
description:
- "C(ctstate) is a list of the connection states to match in the conntrack
module. Possible states are:"
- C(INVALID)
- C(NEW)
- C(ESTABLISHED)
- C(RELATED)
- C(UNTRACKED)
- C(SNAT)
- C(DNAT)
choices: [ DNAT, ESTABLISHED, INVALID, NEW, RELATED, SNAT, UNTRACKED ]
default: []
limit:
description:
- Specifies the maximum average number of matches to allow per second.
- The number can specify units explicitly, using `/second', `/minute',
`/hour' or `/day', or parts of them (so `5/second' is the same as
`5/s').
limit_burst:
description:
- Specifies the maximum burst before the above limit kicks in.
version_added: "2.1"
uid_owner:
description:
- Specifies the UID or username to use in match by owner rule. From
Ansible 2.6 when the C(!) argument is prepended then the it inverts
the rule to apply instead to all users except that one specified.
version_added: "2.1"
reject_with:
description:
- 'Specifies the error packet type to return while rejecting. It implies
"jump: REJECT"'
version_added: "2.1"
icmp_type:
description:
- This allows specification of the ICMP type, which can be a numeric
ICMP type, type/code pair, or one of the ICMP type names shown by the
command 'iptables -p icmp -h'
version_added: "2.2"
flush:
description:
- Flushes the specified table and chain of all rules.
- If no chain is specified then the entire table is purged.
- Ignores all other parameters.
version_added: "2.2"
policy:
description:
- Set the policy for the chain to the given target.
- Only built-in chains can have policies.
- This parameter requires the C(chain) parameter.
- Ignores all other parameters.
choices: [ ACCEPT, DROP, QUEUE, RETURN ]
version_added: "2.2"
'''
EXAMPLES = '''
# Block specific IP
- iptables:
chain: INPUT
source: 8.8.8.8
jump: DROP
become: yes
# Forward port 80 to 8600
- iptables:
table: nat
chain: PREROUTING
in_interface: eth0
protocol: tcp
match: tcp
destination_port: 80
jump: REDIRECT
to_ports: 8600
comment: Redirect web traffic to port 8600
become: yes
# Allow related and established connections
- iptables:
chain: INPUT
ctstate: ESTABLISHED,RELATED
jump: ACCEPT
become: yes
# Allow new incoming SYN packets on TCP port 22 (SSH).
- iptables:
chain: INPUT
protocol: tcp
destination_port: 22
ctstate: NEW
syn: match
jump: ACCEPT
comment: Accept new SSH connections.
# Tag all outbound tcp packets with DSCP mark 8
- iptables:
chain: OUTPUT
jump: DSCP
table: mangle
set_dscp_mark: 8
protocol: tcp
# Tag all outbound tcp packets with DSCP DiffServ class CS1
- iptables:
chain: OUTPUT
jump: DSCP
table: mangle
set_dscp_mark_class: CS1
protocol: tcp
# Insert a rule on line 5
- iptables:
chain: INPUT
protocol: tcp
destination_port: 8080
jump: ACCEPT
rule_num: 5
# Set the policy for the INPUT chain to DROP
- iptables:
chain: INPUT
policy: DROP
# Reject tcp with tcp-reset
- iptables:
chain: INPUT
protocol: tcp
reject_with: tcp-reset
ip_version: ipv4
# Set tcp flags
- iptables:
chain: OUTPUT
jump: DROP
protocol: tcp
tcp_flags:
flags: ALL
flags_set:
- ACK
- RST
- SYN
- FIN
'''
import re
from ansible.module_utils.basic import AnsibleModule
BINS = dict(
ipv4='iptables',
ipv6='ip6tables',
)
ICMP_TYPE_OPTIONS = dict(
ipv4='--icmp-type',
ipv6='--icmpv6-type',
)
def append_param(rule, param, flag, is_list):
if is_list:
for item in param:
append_param(rule, item, flag, False)
else:
if param is not None:
if param[0] == '!':
rule.extend(['!', flag, param[1:]])
else:
rule.extend([flag, param])
def append_tcp_flags(rule, param, flag):
if param:
if 'flags' in param and 'flags_set' in param:
rule.extend([flag, ','.join(param['flags']), ','.join(param['flags_set'])])
def append_match_flag(rule, param, flag, negatable):
if param == 'match':
rule.extend([flag])
elif negatable and param == 'negate':
rule.extend(['!', flag])
def append_csv(rule, param, flag):
if param:
rule.extend([flag, ','.join(param)])
def append_match(rule, param, match):
if param:
rule.extend(['-m', match])
def append_jump(rule, param, jump):
if param:
rule.extend(['-j', jump])
def construct_rule(params):
rule = []
append_param(rule, params['protocol'], '-p', False)
append_param(rule, params['source'], '-s', False)
append_param(rule, params['destination'], '-d', False)
append_param(rule, params['match'], '-m', True)
append_tcp_flags(rule, params['tcp_flags'], '--tcp-flags')
append_param(rule, params['jump'], '-j', False)
append_param(rule, params['log_prefix'], '--log-prefix', False)
append_param(rule, params['to_destination'], '--to-destination', False)
append_param(rule, params['to_source'], '--to-source', False)
append_param(rule, params['goto'], '-g', False)
append_param(rule, params['in_interface'], '-i', False)
append_param(rule, params['out_interface'], '-o', False)
append_param(rule, params['fragment'], '-f', False)
append_param(rule, params['set_counters'], '-c', False)
append_param(rule, params['source_port'], '--source-port', False)
append_param(rule, params['destination_port'], '--destination-port', False)
append_param(rule, params['to_ports'], '--to-ports', False)
append_param(rule, params['set_dscp_mark'], '--set-dscp', False)
append_param(
rule,
params['set_dscp_mark_class'],
'--set-dscp-class',
False)
append_match_flag(rule, params['syn'], '--syn', True)
append_match(rule, params['comment'], 'comment')
append_param(rule, params['comment'], '--comment', False)
if 'conntrack' in params['match']:
append_csv(rule, params['ctstate'], '--ctstate')
elif 'state' in params['match']:
append_csv(rule, params['ctstate'], '--state')
elif params['ctstate']:
append_match(rule, params['ctstate'], 'conntrack')
append_csv(rule, params['ctstate'], '--ctstate')
append_match(rule, params['limit'] or params['limit_burst'], 'limit')
append_param(rule, params['limit'], '--limit', False)
append_param(rule, params['limit_burst'], '--limit-burst', False)
append_match(rule, params['uid_owner'], 'owner')
append_match_flag(rule, params['uid_owner'], '--uid-owner', True)
append_param(rule, params['uid_owner'], '--uid-owner', False)
if params['jump'] is None:
append_jump(rule, params['reject_with'], 'REJECT')
append_param(rule, params['reject_with'], '--reject-with', False)
append_param(
rule,
params['icmp_type'],
ICMP_TYPE_OPTIONS[params['ip_version']],
False)
return rule
def push_arguments(iptables_path, action, params, make_rule=True):
cmd = [iptables_path]
cmd.extend(['-t', params['table']])
cmd.extend([action, params['chain']])
if action == '-I' and params['rule_num']:
cmd.extend([params['rule_num']])
if make_rule:
cmd.extend(construct_rule(params))
return cmd
def check_present(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-C', params)
rc, _, __ = module.run_command(cmd, check_rc=False)
return (rc == 0)
def append_rule(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-A', params)
module.run_command(cmd, check_rc=True)
def insert_rule(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-I', params)
module.run_command(cmd, check_rc=True)
def remove_rule(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-D', params)
module.run_command(cmd, check_rc=True)
def flush_table(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-F', params, make_rule=False)
module.run_command(cmd, check_rc=True)
def set_chain_policy(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-P', params, make_rule=False)
cmd.append(params['policy'])
module.run_command(cmd, check_rc=True)
def get_chain_policy(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-L', params)
rc, out, _ = module.run_command(cmd, check_rc=True)
chain_header = out.split("\n")[0]
result = re.search(r'\(policy ([A-Z]+)\)', chain_header)
if result:
return result.group(1)
return None
def main():
module = AnsibleModule(
supports_check_mode=True,
argument_spec=dict(
table=dict(type='str', default='filter', choices=['filter', 'nat', 'mangle', 'raw', 'security']),
state=dict(type='str', default='present', choices=['absent', 'present']),
action=dict(type='str', default='append', choices=['append', 'insert']),
ip_version=dict(type='str', default='ipv4', choices=['ipv4', 'ipv6']),
chain=dict(type='str'),
rule_num=dict(type='str'),
protocol=dict(type='str'),
source=dict(type='str'),
to_source=dict(type='str'),
destination=dict(type='str'),
to_destination=dict(type='str'),
match=dict(type='list', default=[]),
tcp_flags=dict(type='dict',
options=dict(
flags=dict(type='list'),
flags_set=dict(type='list'))
),
jump=dict(type='str'),
log_prefix=dict(type='str'),
goto=dict(type='str'),
in_interface=dict(type='str'),
out_interface=dict(type='str'),
fragment=dict(type='str'),
set_counters=dict(type='str'),
source_port=dict(type='str'),
destination_port=dict(type='str'),
to_ports=dict(type='str'),
set_dscp_mark=dict(type='str'),
set_dscp_mark_class=dict(type='str'),
comment=dict(type='str'),
ctstate=dict(type='list', default=[]),
limit=dict(type='str'),
limit_burst=dict(type='str'),
uid_owner=dict(type='str'),
reject_with=dict(type='str'),
icmp_type=dict(type='str'),
syn=dict(type='str', default='ignore', choices=['ignore', 'match', 'negate']),
flush=dict(type='bool', default=False),
policy=dict(type='str', choices=['ACCEPT', 'DROP', 'QUEUE', 'RETURN']),
),
mutually_exclusive=(
['set_dscp_mark', 'set_dscp_mark_class'],
['flush', 'policy'],
),
)
args = dict(
changed=False,
failed=False,
ip_version=module.params['ip_version'],
table=module.params['table'],
chain=module.params['chain'],
flush=module.params['flush'],
rule=' '.join(construct_rule(module.params)),
state=module.params['state'],
)
ip_version = module.params['ip_version']
iptables_path = module.get_bin_path(BINS[ip_version], True)
# Check if chain option is required
if args['flush'] is False and args['chain'] is None:
module.fail_json(msg="Either chain or flush parameter must be specified.")
# Flush the table
if args['flush'] is True:
args['changed'] = True
if not module.check_mode:
flush_table(iptables_path, module, module.params)
# Set the policy
elif module.params['policy']:
current_policy = get_chain_policy(iptables_path, module, module.params)
if not current_policy:
module.fail_json(msg='Can\'t detect current policy')
changed = current_policy != module.params['policy']
args['changed'] = changed
if changed and not module.check_mode:
set_chain_policy(iptables_path, module, module.params)
else:
insert = (module.params['action'] == 'insert')
rule_is_present = check_present(iptables_path, module, module.params)
should_be_present = (args['state'] == 'present')
# Check if target is up to date
args['changed'] = (rule_is_present != should_be_present)
if args['changed'] is False:
# Target is already up to date
module.exit_json(**args)
# Check only; don't modify
if not module.check_mode:
if should_be_present:
if insert:
insert_rule(iptables_path, module, module.params)
else:
append_rule(iptables_path, module, module.params)
else:
remove_rule(iptables_path, module, module.params)
module.exit_json(**args)
if __name__ == '__main__':
main()
| gpl-3.0 |
sburnett/seattle | repy/virtual_namespace.py | 3 | 3662 | """
<Author>
Armon Dadgar
<Start Date>
October 21st, 2009
<Description>
This module provides the VirtualNamespace object. This object allows
arbitrary code to be checked for safety, and evaluated within a
specified global context.
"""
# Used for safety checking
import safe
# Used to check that an API call is allowed
import restrictions
# This is to work around safe...
safe_compile = compile
# Functional constructor for VirtualNamespace
def get_VirtualNamespace(code, name="<string>"):
# Check if this is allows
restrictions.assertisallowed('VirtualNamespace')
return VirtualNamespace(code,name)
# This class is used to represent a namespace
class VirtualNamespace(object):
"""
The VirtualNamespace class is used as a wrapper around an arbitrary
code string that has been verified for safety. The namespace provides
a method of evaluating the code with an arbitrary global context.
"""
# Constructor
def __init__(self, code, name="<string>"):
"""
<Purpose>
Initializes the VirtualNamespace class.
<Arguments>
code:
(String) The code to run in the namespace
name:
(String, optional) The name to use for the code. When the module is
being executed, if there is an exception, this name will appear in
the traceback.
<Exceptions>
A safety check is performed on the code, and a ValueError exception will be raised
if the code fails the safety check.
If code or name are not string types, a TypeError exception will be raised.
"""
# Check for the code
# Do a type check
if type(code) is not str:
raise TypeError, "Code must be a string!"
if type(name) is not str:
raise TypeError, "Name must be a string!"
# prepend an encoding string to protect against bugs in that code (#982)
code = "# coding: utf-8\n\n" + code
# Remove any windows carriage returns
code = code.replace('\r\n','\n')
# Do a safety check
try:
safe.serial_safe_check(code)
except Exception, e:
raise ValueError, "Code failed safety check! Error: "+str(e)
# All good, store the compiled byte code
self.code = safe_compile(code,name,"exec")
# Evaluates the virtual namespace
def evaluate(self,context):
"""
<Purpose>
Evaluates the wrapped code within a context.
<Arguments>
context: A global context to use when executing the code.
This should be a SafeDict object, but if a dict object is provided
it will automatically be converted to a SafeDict object.
<Exceptions>
Any that may be raised by the code that is being evaluated.
A TypeError exception will be raised if the provided context is not
a safe dictionary object or a ValueError exception if the
context is a dict but cannot be converted into a SafeDict.
<Returns>
The context dictionary that was used during evaluation.
If the context was a dict object, this will be a new
SafeDict object. If the context was a SafeDict object,
then this will return the same context object.
"""
# Try to convert a normal dict into a SafeDict
if type(context) is dict:
try:
context = safe.SafeDict(context)
except Exception, e:
raise ValueError, "Provided context is not safe! Exception: "+str(e)
# Type check
if not isinstance(context, safe.SafeDict):
raise TypeError, "Provided context is not a safe dictionary!"
# Call safe_run with the underlying dictionary
safe.safe_run(self.code, context.__under__)
# Return the dictionary we used
return context
| mit |
pigeonflight/strider-plone | docker/appengine/lib/django-1.2/tests/regressiontests/humanize/tests.py | 39 | 3296 | import unittest
from datetime import timedelta, date
from django.template import Template, Context, add_to_builtins
from django.utils.dateformat import DateFormat
from django.utils.translation import ugettext as _
from django.utils.html import escape
add_to_builtins('django.contrib.humanize.templatetags.humanize')
class HumanizeTests(unittest.TestCase):
def humanize_tester(self, test_list, result_list, method):
# Using max below ensures we go through both lists
# However, if the lists are not equal length, this raises an exception
for index in xrange(max(len(test_list), len(result_list))):
test_content = test_list[index]
t = Template('{{ test_content|%s }}' % method)
rendered = t.render(Context(locals())).strip()
self.assertEqual(rendered, escape(result_list[index]),
msg="%s test failed, produced %s, should've produced %s" % (method, rendered, result_list[index]))
def test_ordinal(self):
test_list = ('1','2','3','4','11','12',
'13','101','102','103','111',
'something else', None)
result_list = ('1st', '2nd', '3rd', '4th', '11th',
'12th', '13th', '101st', '102nd', '103rd',
'111th', 'something else', None)
self.humanize_tester(test_list, result_list, 'ordinal')
def test_intcomma(self):
test_list = (100, 1000, 10123, 10311, 1000000, 1234567.25,
'100', '1000', '10123', '10311', '1000000', '1234567.1234567',
None)
result_list = ('100', '1,000', '10,123', '10,311', '1,000,000', '1,234,567.25',
'100', '1,000', '10,123', '10,311', '1,000,000', '1,234,567.1234567',
None)
self.humanize_tester(test_list, result_list, 'intcomma')
def test_intword(self):
test_list = ('100', '1000000', '1200000', '1290000',
'1000000000','2000000000','6000000000000',
None)
result_list = ('100', '1.0 million', '1.2 million', '1.3 million',
'1.0 billion', '2.0 billion', '6.0 trillion',
None)
self.humanize_tester(test_list, result_list, 'intword')
def test_apnumber(self):
test_list = [str(x) for x in range(1, 11)]
test_list.append(None)
result_list = (u'one', u'two', u'three', u'four', u'five', u'six',
u'seven', u'eight', u'nine', u'10', None)
self.humanize_tester(test_list, result_list, 'apnumber')
def test_naturalday(self):
from django.template import defaultfilters
today = date.today()
yesterday = today - timedelta(days=1)
tomorrow = today + timedelta(days=1)
someday = today - timedelta(days=10)
notdate = u"I'm not a date value"
test_list = (today, yesterday, tomorrow, someday, notdate, None)
someday_result = defaultfilters.date(someday)
result_list = (_(u'today'), _(u'yesterday'), _(u'tomorrow'),
someday_result, u"I'm not a date value", None)
self.humanize_tester(test_list, result_list, 'naturalday')
if __name__ == '__main__':
unittest.main()
| mit |
wangyanxing/Judge-at-fgdsb | judge/python/tests/coin_change_2.py | 1 | 1260 | from common import *
from solution import *
import copy
import sys
import datetime
num_test = 303
true, false = True, False
in_0 = []
in_org_0 = []
in_1 = []
in_org_1 = []
out = []
def load_test():
f = open('judge/tests/coin-change-2.txt', 'r')
global in_0, in_org_0
in_0 = read_int_matrix(f)
in_org_0 = copy.deepcopy(in_0)
global in_1, in_org_1
in_1 = read_int_array(f)
in_org_1 = copy.deepcopy(in_1)
global out
out = read_int_array(f)
f.close
def judge():
load_test()
capture_stdout()
start_time = datetime.datetime.now()
for i in range(num_test):
print ('Testing case #' + str(i+1))
answer = count_changes(in_0[i], in_1[i])
if (answer != out[i]):
release_stdout()
out_str = str(i+1) + " / " + str(num_test) + ";"
out_str += str(in_org_0[i])
out_str += ", "
out_str += str(in_org_1[i])
out_str += ";"
out_str += str(answer)
out_str += ";"
out_str += str(out[i])
print(out_str)
return
release_stdout()
delta = datetime.datetime.now() - start_time
runtime = str(int(delta.total_seconds() * 1000))
print('Accepted;' + runtime)
| mit |
cristiana214/cristianachavez214-cristianachavez | python/src/Tools/modulator/modulator.py | 37 | 14438 | #! /usr/bin/env python
#
# Modulator - Generate skeleton modules.
#
# The user fills out some forms with information about what the module
# should support (methods, objects), names of these things, prefixes to
# use for C code, whether the objects should also support access as numbers,
# etc etc etc.
# When the user presses 'Generate code' we generate a complete skeleton
# module in C.
#
# Alternatively, the selections made can be save to a python sourcefile and
# this sourcefile can be passed on the command line (resulting in the same
# skeleton C code).
#
# Jack Jansen, CWI, October 1994.
#
import sys, os
if os.name <> 'mac':
sys.path.append(os.path.join(os.environ['HOME'],
'src/python/Tools/modulator'))
from Tkinter import *
from Tkextra import *
from ScrolledListbox import ScrolledListbox
import sys
import genmodule
import string
oops = 'oops'
IDENTSTARTCHARS = string.letters + '_'
IDENTCHARS = string.letters + string.digits + '_'
# Check that string is a legal C identifier
def checkid(str):
if not str: return 0
if not str[0] in IDENTSTARTCHARS:
return 0
for c in str[1:]:
if not c in IDENTCHARS:
return 0
return 1
def getlistlist(list):
rv = []
n = list.size()
for i in range(n):
rv.append(list.get(i))
return rv
class UI:
def __init__(self):
self.main = Frame()
self.main.pack()
self.main.master.title('Modulator: Module view')
self.cmdframe = Frame(self.main, {'relief':'raised', 'bd':'0.5m',
Pack:{'side':'top',
'fill':'x'}})
self.objframe = Frame(self.main, {Pack:{'side':'top', 'fill':'x',
'expand':1}})
self.check_button = Button(self.cmdframe,
{'text':'Check', 'command':self.cb_check,
Pack:{'side':'left', 'padx':'0.5m'}})
self.save_button = Button(self.cmdframe,
{'text':'Save...', 'command':self.cb_save,
Pack:{'side':'left', 'padx':'0.5m'}})
self.code_button = Button(self.cmdframe,
{'text':'Generate code...',
'command':self.cb_gencode,
Pack:{'side':'left', 'padx':'0.5m'}})
self.quit_button = Button(self.cmdframe,
{'text':'Quit',
'command':self.cb_quit,
Pack:{'side':'right', 'padx':'0.5m'}})
self.module = UI_module(self)
self.objects = []
self.modified = 0
def run(self):
self.main.mainloop()
def cb_quit(self, *args):
if self.modified:
if not askyn('You have not saved\nAre you sure you want to quit?'):
return
sys.exit(0)
def cb_check(self, *args):
try:
self.module.synchronize()
for o in self.objects:
o.synchronize()
except oops:
pass
def cb_save(self, *args):
try:
pycode = self.module.gencode('m', self.objects)
except oops:
return
fn = askfile('Python file name: ')
if not fn:
return
fp = open(fn, 'w')
fp.write(pycode)
fp.close()
def cb_gencode(self, *args):
try:
pycode = self.module.gencode('m', self.objects)
except oops:
pass
fn = askfile('C file name: ')
if not fn:
return
fp = open(fn, 'w')
try:
exec pycode
except:
message('An error occurred:-)')
return
genmodule.write(fp, m)
fp.close()
class UI_module:
def __init__(self, parent):
self.parent = parent
self.frame = Frame(parent.objframe, {'relief':'raised', 'bd':'0.2m',
Pack:{'side':'top',
'fill':'x'}})
self.f1 = Frame(self.frame, {Pack:{'side':'top', 'pady':'0.5m',
'fill':'x'}})
self.f2 = Frame(self.frame, {Pack:{'side':'top', 'pady':'0.5m',
'fill':'x'}})
self.f3 = Frame(self.frame, {Pack:{'side':'top', 'pady':'0.5m',
'fill':'x'}})
self.f4 = Frame(self.frame, {Pack:{'side':'top', 'pady':'0.5m',
'fill':'x'}})
self.l1 = Label(self.f1, {'text':'Module:', Pack:{'side':'left',
'padx':'0.5m'}})
self.name_entry = Entry(self.f1, {'relief':'sunken',
Pack:{'side':'left', 'padx':'0.5m', 'expand':1}})
self.l2 = Label(self.f1, {'text':'Abbrev:', Pack:{'side':'left',
'padx':'0.5m'}})
self.abbrev_entry = Entry(self.f1, {'relief':'sunken', 'width':5,
Pack:{'side':'left', 'padx':'0.5m'}})
self.l3 = Label(self.f2, {'text':'Methods:', Pack:{'side':'left',
'padx':'0.5m'}})
self.method_list = ScrolledListbox(self.f2, {'relief':'sunken','bd':2,
Pack:{'side':'left', 'expand':1,
'padx':'0.5m', 'fill':'both'}})
self.l4 = Label(self.f3, {'text':'Add method:', Pack:{'side':'left',
'padx':'0.5m'}})
self.method_entry = Entry(self.f3, {'relief':'sunken',
Pack:{'side':'left', 'padx':'0.5m', 'expand':1}})
self.method_entry.bind('<Return>', self.cb_method)
self.delete_button = Button(self.f3, {'text':'Delete method',
'command':self.cb_delmethod,
Pack:{'side':'left',
'padx':'0.5m'}})
self.newobj_button = Button(self.f4, {'text':'new object',
'command':self.cb_newobj,
Pack:{'side':'left',
'padx':'0.5m'}})
def cb_delmethod(self, *args):
list = self.method_list.curselection()
for i in list:
self.method_list.delete(i)
def cb_newobj(self, *arg):
self.parent.objects.append(UI_object(self.parent))
def cb_method(self, *arg):
name = self.method_entry.get()
if not name:
return
self.method_entry.delete('0', 'end')
self.method_list.insert('end', name)
def synchronize(self):
n = self.name_entry.get()
if not n:
message('Module name not set')
raise oops
if not checkid(n):
message('Module name not an identifier:\n'+n)
raise oops
if not self.abbrev_entry.get():
self.abbrev_entry.insert('end', n)
m = getlistlist(self.method_list)
for n in m:
if not checkid(n):
message('Method name not an identifier:\n'+n)
raise oops
def gencode(self, name, objects):
rv = ''
self.synchronize()
for o in objects:
o.synchronize()
onames = []
for i in range(len(objects)):
oname = 'o%d' % (i+1)
rv = rv + objects[i].gencode(oname)
onames.append(oname)
rv = rv + '%s = genmodule.module()\n' % (name,)
rv = rv + '%s.name = %r\n' % (name, self.name_entry.get())
rv = rv + '%s.abbrev = %r\n' % (name, self.abbrev_entry.get())
rv = rv + '%s.methodlist = %r\n' % (name, getlistlist(self.method_list))
rv = rv + '%s.objects = [%s]\n' % (name, ','.join(onames))
rv = rv + '\n'
return rv
object_number = 0
class UI_object:
def __init__(self, parent):
global object_number
object_number = object_number + 1
self.num = object_number
self.vpref = 'o%r_' % self.num
self.frame = Toplevel(parent.objframe)
# self.frame.pack()
self.frame.title('Modulator: object view')
# self.frame = Frame(parent.objframe, {'relief':'raised', 'bd':'0.2m',
# Pack:{'side':'top',
# 'fill':'x'}})
self.f1 = Frame(self.frame, {Pack:{'side':'top', 'pady':'0.5m',
'fill':'x'}})
self.f2 = Frame(self.frame, {Pack:{'side':'top', 'pady':'0.5m',
'fill':'x'}})
self.f3 = Frame(self.frame, {Pack:{'side':'top', 'pady':'0.5m',
'fill':'x'}})
self.f4 = Frame(self.frame, {Pack:{'side':'top', 'pady':'0.5m',
'fill':'x'}})
self.l1 = Label(self.f1, {'text':'Object:', Pack:{'side':'left',
'padx':'0.5m'}})
self.name_entry = Entry(self.f1, {'relief':'sunken',
Pack:{'side':'left', 'padx':'0.5m', 'expand':1}})
self.l2 = Label(self.f1, {'text':'Abbrev:', Pack:{'side':'left',
'padx':'0.5m'}})
self.abbrev_entry = Entry(self.f1, {'relief':'sunken', 'width':5,
Pack:{'side':'left', 'padx':'0.5m'}})
self.l3 = Label(self.f2, {'text':'Methods:', Pack:{'side':'left',
'padx':'0.5m'}})
self.method_list = ScrolledListbox(self.f2, {'relief':'sunken','bd':2,
Pack:{'side':'left', 'expand':1,
'padx':'0.5m', 'fill':'both'}})
self.l4 = Label(self.f3, {'text':'Add method:', Pack:{'side':'left',
'padx':'0.5m'}})
self.method_entry = Entry(self.f3, {'relief':'sunken',
Pack:{'side':'left', 'padx':'0.5m', 'expand':1}})
self.method_entry.bind('<Return>', self.cb_method)
self.delete_button = Button(self.f3, {'text':'Delete method',
'command':self.cb_delmethod,
Pack:{'side':'left',
'padx':'0.5m'}})
self.l5 = Label(self.f4, {'text':'functions:',
Pack:{'side':'left',
'padx':'0.5m'}})
self.f5 = Frame(self.f4, {Pack:{'side':'left', 'pady':'0.5m',
'fill':'both'}})
self.l6 = Label(self.f4, {'text':'Types:',
Pack:{'side':'left', 'padx':'0.5m'}})
self.f6 = Frame(self.f4, {Pack:{'side':'left', 'pady':'0.5m',
'fill':'x'}})
self.funcs = {}
for i in genmodule.FUNCLIST:
vname = self.vpref+i
self.f5.setvar(vname, 0)
b = Checkbutton(self.f5, {'variable':vname, 'text':i,
Pack:{'side':'top', 'pady':'0.5m',
'anchor':'w','expand':1}})
self.funcs[i] = b
self.f5.setvar(self.vpref+'new', 1)
self.types = {}
for i in genmodule.TYPELIST:
vname = self.vpref + i
self.f6.setvar(vname, 0)
b = Checkbutton(self.f6, {'variable':vname, 'text':i,
Pack:{'side':'top', 'pady':'0.5m',
'anchor':'w'}})
self.types[i] = b
def cb_method(self, *arg):
name = self.method_entry.get()
if not name:
return
self.method_entry.delete('0', 'end')
self.method_list.insert('end', name)
def cb_delmethod(self, *args):
list = self.method_list.curselection()
for i in list:
self.method_list.delete(i)
def synchronize(self):
n = self.name_entry.get()
if not n:
message('Object name not set')
raise oops
if not self.abbrev_entry.get():
self.abbrev_entry.insert('end', n)
n = self.abbrev_entry.get()
if not checkid(n):
message('Abbreviation not an identifier:\n'+n)
raise oops
m = getlistlist(self.method_list)
for n in m:
if not checkid(n):
message('Method name not an identifier:\n'+n)
raise oops
if m:
self.f5.setvar(self.vpref+'tp_getattr', 1)
pass
def gencode(self, name):
rv = ''
rv = rv + '%s = genmodule.object()\n' % (name,)
rv = rv + '%s.name = %r\n' % (name, self.name_entry.get())
rv = rv + '%s.abbrev = %r\n' % (name, self.abbrev_entry.get())
rv = rv + '%s.methodlist = %r\n' % (name, getlistlist(self.method_list))
fl = []
for fn in genmodule.FUNCLIST:
vname = self.vpref + fn
if self.f5.getvar(vname) == '1':
fl.append(fn)
rv = rv + '%s.funclist = %r\n' % (name, fl)
fl = []
for fn in genmodule.TYPELIST:
vname = self.vpref + fn
if self.f5.getvar(vname) == '1':
fl.append(fn)
rv = rv + '%s.typelist = %r\n' % (name, fl)
rv = rv + '\n'
return rv
def main():
if len(sys.argv) < 2:
ui = UI()
ui.run()
elif len(sys.argv) == 2:
fp = open(sys.argv[1])
pycode = fp.read()
try:
exec pycode
except:
sys.stderr.write('An error occurred:-)\n')
sys.exit(1)
##genmodule.write(sys.stdout, m)
else:
sys.stderr.write('Usage: modulator [file]\n')
sys.exit(1)
main()
| apache-2.0 |
chrisfranzen/django | django/db/models/sql/aggregates.py | 195 | 3977 | """
Classes to represent the default SQL aggregate functions
"""
from django.db.models.fields import IntegerField, FloatField
# Fake fields used to identify aggregate types in data-conversion operations.
ordinal_aggregate_field = IntegerField()
computed_aggregate_field = FloatField()
class Aggregate(object):
"""
Default SQL Aggregate.
"""
is_ordinal = False
is_computed = False
sql_template = '%(function)s(%(field)s)'
def __init__(self, col, source=None, is_summary=False, **extra):
"""Instantiate an SQL aggregate
* col is a column reference describing the subject field
of the aggregate. It can be an alias, or a tuple describing
a table and column name.
* source is the underlying field or aggregate definition for
the column reference. If the aggregate is not an ordinal or
computed type, this reference is used to determine the coerced
output type of the aggregate.
* extra is a dictionary of additional data to provide for the
aggregate definition
Also utilizes the class variables:
* sql_function, the name of the SQL function that implements the
aggregate.
* sql_template, a template string that is used to render the
aggregate into SQL.
* is_ordinal, a boolean indicating if the output of this aggregate
is an integer (e.g., a count)
* is_computed, a boolean indicating if this output of this aggregate
is a computed float (e.g., an average), regardless of the input
type.
"""
self.col = col
self.source = source
self.is_summary = is_summary
self.extra = extra
# Follow the chain of aggregate sources back until you find an
# actual field, or an aggregate that forces a particular output
# type. This type of this field will be used to coerce values
# retrieved from the database.
tmp = self
while tmp and isinstance(tmp, Aggregate):
if getattr(tmp, 'is_ordinal', False):
tmp = ordinal_aggregate_field
elif getattr(tmp, 'is_computed', False):
tmp = computed_aggregate_field
else:
tmp = tmp.source
self.field = tmp
def relabel_aliases(self, change_map):
if isinstance(self.col, (list, tuple)):
self.col = (change_map.get(self.col[0], self.col[0]), self.col[1])
def as_sql(self, qn, connection):
"Return the aggregate, rendered as SQL."
if hasattr(self.col, 'as_sql'):
field_name = self.col.as_sql(qn, connection)
elif isinstance(self.col, (list, tuple)):
field_name = '.'.join([qn(c) for c in self.col])
else:
field_name = self.col
params = {
'function': self.sql_function,
'field': field_name
}
params.update(self.extra)
return self.sql_template % params
class Avg(Aggregate):
is_computed = True
sql_function = 'AVG'
class Count(Aggregate):
is_ordinal = True
sql_function = 'COUNT'
sql_template = '%(function)s(%(distinct)s%(field)s)'
def __init__(self, col, distinct=False, **extra):
super(Count, self).__init__(col, distinct=distinct and 'DISTINCT ' or '', **extra)
class Max(Aggregate):
sql_function = 'MAX'
class Min(Aggregate):
sql_function = 'MIN'
class StdDev(Aggregate):
is_computed = True
def __init__(self, col, sample=False, **extra):
super(StdDev, self).__init__(col, **extra)
self.sql_function = sample and 'STDDEV_SAMP' or 'STDDEV_POP'
class Sum(Aggregate):
sql_function = 'SUM'
class Variance(Aggregate):
is_computed = True
def __init__(self, col, sample=False, **extra):
super(Variance, self).__init__(col, **extra)
self.sql_function = sample and 'VAR_SAMP' or 'VAR_POP'
| bsd-3-clause |
fernandezcuesta/ansible | lib/ansible/playbook/handler.py | 133 | 1974 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.playbook.attribute import FieldAttribute
from ansible.playbook.task import Task
class Handler(Task):
_listen = FieldAttribute(isa='list')
def __init__(self, block=None, role=None, task_include=None):
self._flagged_hosts = []
super(Handler, self).__init__(block=block, role=role, task_include=task_include)
def __repr__(self):
''' returns a human readable representation of the handler '''
return "HANDLER: %s" % self.get_name()
@staticmethod
def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None):
t = Handler(block=block, role=role, task_include=task_include)
return t.load_data(data, variable_manager=variable_manager, loader=loader)
def flag_for_host(self, host):
# assert instanceof(host, Host)
if host not in self._flagged_hosts:
self._flagged_hosts.append(host)
def has_triggered(self, host):
return host in self._flagged_hosts
def serialize(self):
result = super(Handler, self).serialize()
result['is_handler'] = True
return result
| gpl-3.0 |
nvoron23/hue | apps/jobsub/setup.py | 39 | 1217 | # Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
from hueversion import VERSION
setup(
name = "jobsub",
version = VERSION,
url = 'http://github.com/cloudera/hue',
author = "Hue",
description = "Hadoop Job Submission",
packages = find_packages('src'),
package_dir = {'': 'src'},
install_requires = ['setuptools', 'desktop', 'oozie'],
entry_points = { 'desktop.sdk.application': 'jobsub=jobsub' },
)
| apache-2.0 |
aforalee/RRally | rally/common/sshutils.py | 5 | 10404 | # Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""High level ssh library.
Usage examples:
Execute command and get output:
ssh = sshclient.SSH("root", "example.com", port=33)
status, stdout, stderr = ssh.execute("ps ax")
if status:
raise Exception("Command failed with non-zero status.")
print stdout.splitlines()
Execute command with huge output:
class PseudoFile(object):
def write(chunk):
if "error" in chunk:
email_admin(chunk)
ssh = sshclient.SSH("root", "example.com")
ssh.run("tail -f /var/log/syslog", stdout=PseudoFile(), timeout=False)
Execute local script on remote side:
ssh = sshclient.SSH("user", "example.com")
status, out, err = ssh.execute("/bin/sh -s arg1 arg2",
stdin=open("~/myscript.sh", "r"))
Upload file:
ssh = sshclient.SSH("user", "example.com")
ssh.run("cat > ~/upload/file.gz", stdin=open("/store/file.gz", "rb"))
Eventlet:
eventlet.monkey_patch(select=True, time=True)
or
eventlet.monkey_patch()
or
sshclient = eventlet.import_patched("opentstack.common.sshclient")
"""
import os
import select
import socket
import time
import paramiko
import six
from rally.common.i18n import _
from rally.common import log as logging
LOG = logging.getLogger(__name__)
class SSHError(Exception):
pass
class SSHTimeout(SSHError):
pass
class SSH(object):
"""Represent ssh connection."""
def __init__(self, user, host, port=22, pkey=None,
key_filename=None, password=None):
"""Initialize SSH client.
:param user: ssh username
:param host: hostname or ip address of remote ssh server
:param port: remote ssh port
:param pkey: RSA or DSS private key string or file object
:param key_filename: private key filename
:param password: password
"""
self.user = user
self.host = host
self.port = port
self.pkey = self._get_pkey(pkey) if pkey else None
self.password = password
self.key_filename = key_filename
self._client = False
def _get_pkey(self, key):
if isinstance(key, six.string_types):
key = six.moves.StringIO(key)
errors = []
for key_class in (paramiko.rsakey.RSAKey, paramiko.dsskey.DSSKey):
try:
return key_class.from_private_key(key)
except paramiko.SSHException as e:
errors.append(e)
raise SSHError("Invalid pkey: %s" % (errors))
def _get_client(self):
if self._client:
return self._client
try:
self._client = paramiko.SSHClient()
self._client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self._client.connect(self.host, username=self.user,
port=self.port, pkey=self.pkey,
key_filename=self.key_filename,
password=self.password, timeout=1)
return self._client
except Exception as e:
message = _("Exception %(exception_type)s was raised "
"during connect to %(user)s@%(host)s:%(port)s. "
"Exception value is: %(exception)r")
self._client = False
raise SSHError(message % {"exception": e,
"user": self.user,
"host": self.host,
"port": self.port,
"exception_type": type(e)})
def close(self):
self._client.close()
self._client = False
def run(self, cmd, stdin=None, stdout=None, stderr=None,
raise_on_error=True, timeout=3600):
"""Execute specified command on the server.
:param cmd: Command to be executed.
:param stdin: Open file or string to pass to stdin.
:param stdout: Open file to connect to stdout.
:param stderr: Open file to connect to stderr.
:param raise_on_error: If False then exit code will be return. If True
then exception will be raized if non-zero code.
:param timeout: Timeout in seconds for command execution.
Default 1 hour. No timeout if set to 0.
"""
client = self._get_client()
if isinstance(stdin, six.string_types):
stdin = six.moves.StringIO(stdin)
return self._run(client, cmd, stdin=stdin, stdout=stdout,
stderr=stderr, raise_on_error=raise_on_error,
timeout=timeout)
def _run(self, client, cmd, stdin=None, stdout=None, stderr=None,
raise_on_error=True, timeout=3600):
if isinstance(cmd, (list, tuple)):
cmd = " ".join(six.moves.shlex_quote(str(p)) for p in cmd)
transport = client.get_transport()
session = transport.open_session()
session.exec_command(cmd)
start_time = time.time()
data_to_send = ""
stderr_data = None
# If we have data to be sent to stdin then `select' should also
# check for stdin availability.
if stdin and not stdin.closed:
writes = [session]
else:
writes = []
while True:
# Block until data can be read/write.
r, w, e = select.select([session], writes, [session], 1)
if session.recv_ready():
data = session.recv(4096)
LOG.debug("stdout: %r" % data)
if stdout is not None:
stdout.write(data.decode("utf8"))
continue
if session.recv_stderr_ready():
stderr_data = session.recv_stderr(4096)
LOG.debug("stderr: %r" % stderr_data)
if stderr is not None:
stderr.write(stderr_data.decode("utf8"))
continue
if session.send_ready():
if stdin is not None and not stdin.closed:
if not data_to_send:
data_to_send = stdin.read(4096)
if not data_to_send:
stdin.close()
session.shutdown_write()
writes = []
continue
sent_bytes = session.send(data_to_send)
LOG.debug("sent: %s" % data_to_send[:sent_bytes])
data_to_send = data_to_send[sent_bytes:]
if session.exit_status_ready():
break
if timeout and (time.time() - timeout) > start_time:
args = {"cmd": cmd, "host": self.host}
raise SSHTimeout(_("Timeout executing command "
"'%(cmd)s' on host %(host)s") % args)
if e:
raise SSHError("Socket error.")
exit_status = session.recv_exit_status()
if 0 != exit_status and raise_on_error:
fmt = _("Command '%(cmd)s' failed with exit_status %(status)d.")
details = fmt % {"cmd": cmd, "status": exit_status}
if stderr_data:
details += _(" Last stderr data: '%s'.") % stderr_data
raise SSHError(details)
return exit_status
def execute(self, cmd, stdin=None, timeout=3600):
"""Execute the specified command on the server.
:param cmd: Command to be executed, can be a list.
:param stdin: Open file to be sent on process stdin.
:param timeout: Timeout for execution of the command.
:returns: tuple (exit_status, stdout, stderr)
"""
stdout = six.moves.StringIO()
stderr = six.moves.StringIO()
exit_status = self.run(cmd, stderr=stderr,
stdout=stdout, stdin=stdin,
timeout=timeout, raise_on_error=False)
stdout.seek(0)
stderr.seek(0)
return (exit_status, stdout.read(), stderr.read())
def wait(self, timeout=120, interval=1):
"""Wait for the host will be available via ssh."""
start_time = time.time()
while True:
try:
return self.execute("uname")
except (socket.error, SSHError) as e:
LOG.debug("Ssh is still unavailable: %r" % e)
time.sleep(interval)
if time.time() > (start_time + timeout):
raise SSHTimeout(_("Timeout waiting for '%s'") % self.host)
def _put_file_sftp(self, localpath, remotepath, mode=None):
client = self._get_client()
with client.open_sftp() as sftp:
sftp.put(localpath, remotepath)
if mode is None:
mode = 0o777 & os.stat(localpath).st_mode
sftp.chmod(remotepath, mode)
def _put_file_shell(self, localpath, remotepath, mode=None):
cmd = ["cat > %s" % remotepath]
if mode is not None:
cmd.append("chmod 0%o %s" % (mode, remotepath))
with open(localpath, "rb") as localfile:
cmd = "; ".join(cmd)
self.run(cmd, stdin=localfile)
def put_file(self, localpath, remotepath, mode=None):
"""Copy specified local file to the server.
:param localpath: Local filename.
:param remotepath: Remote filename.
:param mode: Permissions to set after upload
"""
try:
self._put_file_sftp(localpath, remotepath, mode=mode)
except paramiko.SSHException:
self._put_file_shell(localpath, remotepath, mode=mode)
| apache-2.0 |
fkammer/three.js | utils/exporters/blender/addons/io_three/exporter/texture.py | 173 | 1407 | from .. import constants, logger
from . import base_classes, image, api
class Texture(base_classes.BaseNode):
"""Class that wraps a texture node"""
def __init__(self, node, parent):
logger.debug("Texture().__init__(%s)", node)
base_classes.BaseNode.__init__(self, node, parent, constants.TEXTURE)
num = constants.NUMERIC
img_inst = self.scene.image(api.texture.file_name(self.node))
if not img_inst:
image_node = api.texture.image_node(self.node)
img_inst = image.Image(image_node.name, self.scene)
self.scene[constants.IMAGES].append(img_inst)
self[constants.IMAGE] = img_inst[constants.UUID]
wrap = api.texture.wrap(self.node)
self[constants.WRAP] = (num[wrap[0]], num[wrap[1]])
if constants.WRAPPING.REPEAT in wrap:
self[constants.REPEAT] = api.texture.repeat(self.node)
self[constants.ANISOTROPY] = api.texture.anisotropy(self.node)
self[constants.MAG_FILTER] = num[api.texture.mag_filter(self.node)]
self[constants.MIN_FILTER] = num[api.texture.min_filter(self.node)]
self[constants.MAPPING] = num[api.texture.mapping(self.node)]
@property
def image(self):
"""
:return: the image object of the current texture
:rtype: image.Image
"""
return self.scene.image(self[constants.IMAGE])
| mit |
DANCEcollaborative/forum-xblock | XBlock Integration Files/xdjangobb/xblock/lib/python2.7/site-packages/django/contrib/gis/geos/base.py | 86 | 1682 | from ctypes import c_void_p
from types import NoneType
from django.contrib.gis.geos.error import GEOSException
# Trying to import GDAL libraries, if available. Have to place in
# try/except since this package may be used outside GeoDjango.
try:
from django.contrib.gis import gdal
except ImportError:
# A 'dummy' gdal module.
class GDALInfo(object):
HAS_GDAL = False
GEOJSON = False
gdal = GDALInfo()
# NumPy supported?
try:
import numpy
except ImportError:
numpy = False
class GEOSBase(object):
"""
Base object for GEOS objects that has a pointer access property
that controls access to the underlying C pointer.
"""
# Initially the pointer is NULL.
_ptr = None
# Default allowed pointer type.
ptr_type = c_void_p
# Pointer access property.
def _get_ptr(self):
# Raise an exception if the pointer isn't valid don't
# want to be passing NULL pointers to routines --
# that's very bad.
if self._ptr: return self._ptr
else: raise GEOSException('NULL GEOS %s pointer encountered.' % self.__class__.__name__)
def _set_ptr(self, ptr):
# Only allow the pointer to be set with pointers of the
# compatible type or None (NULL).
if isinstance(ptr, (self.ptr_type, NoneType)):
self._ptr = ptr
else:
raise TypeError('Incompatible pointer type')
# Property for controlling access to the GEOS object pointers. Using
# this raises an exception when the pointer is NULL, thus preventing
# the C library from attempting to access an invalid memory location.
ptr = property(_get_ptr, _set_ptr)
| mit |
rootfs/origin | cmd/cluster-capacity/go/src/github.com/kubernetes-incubator/cluster-capacity/vendor/k8s.io/kubernetes/examples/cluster-dns/images/backend/server.py | 504 | 1293 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
PORT_NUMBER = 8000
# This class will handles any incoming request.
class HTTPHandler(BaseHTTPRequestHandler):
# Handler for the GET requests
def do_GET(self):
self.send_response(200)
self.send_header('Content-type','text/html')
self.end_headers()
self.wfile.write("Hello World!")
try:
# Create a web server and define the handler to manage the incoming request.
server = HTTPServer(('', PORT_NUMBER), HTTPHandler)
print 'Started httpserver on port ' , PORT_NUMBER
server.serve_forever()
except KeyboardInterrupt:
print '^C received, shutting down the web server'
server.socket.close()
| apache-2.0 |
tensorflow/ecosystem | data_service/tf_std_data_server.py | 1 | 2000 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Run a tf.data service server."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
flags = tf.compat.v1.app.flags
flags.DEFINE_integer("port", 0, "Port to listen on")
flags.DEFINE_bool("is_dispatcher", False, "Whether to start a dispatcher (as opposed to a worker server")
flags.DEFINE_string("dispatcher_address", "", "The address of the dispatcher. This is only needed when starting a worker server.")
flags.DEFINE_string("worker_address", "", "The address of the worker server. This is only needed when starting a worker server.")
FLAGS = flags.FLAGS
def main(unused_argv):
if FLAGS.is_dispatcher:
print("Starting tf.data service dispatcher")
server = tf.data.experimental.service.DispatchServer(
tf.data.experimental.service.DispatcherConfig(
port=FLAGS.port,
protocol="grpc"))
else:
print("Starting tf.data service worker")
server = tf.data.experimental.service.WorkerServer(
tf.data.experimental.service.WorkerConfig(
port=FLAGS.port,
protocol="grpc",
dispatcher_address=FLAGS.dispatcher_address,
worker_address=FLAGS.worker_address))
server.join()
if __name__ == "__main__":
tf.compat.v1.app.run()
| apache-2.0 |
RJVB/audacity | lib-src/lv2/lv2/waflib/TaskGen.py | 85 | 11872 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import copy,re,os
from waflib import Task,Utils,Logs,Errors,ConfigSet,Node
feats=Utils.defaultdict(set)
class task_gen(object):
mappings={}
prec=Utils.defaultdict(list)
def __init__(self,*k,**kw):
self.source=''
self.target=''
self.meths=[]
self.prec=Utils.defaultdict(list)
self.mappings={}
self.features=[]
self.tasks=[]
if not'bld'in kw:
self.env=ConfigSet.ConfigSet()
self.idx=0
self.path=None
else:
self.bld=kw['bld']
self.env=self.bld.env.derive()
self.path=self.bld.path
try:
self.idx=self.bld.idx[id(self.path)]=self.bld.idx.get(id(self.path),0)+1
except AttributeError:
self.bld.idx={}
self.idx=self.bld.idx[id(self.path)]=1
for key,val in kw.items():
setattr(self,key,val)
def __str__(self):
return"<task_gen %r declared in %s>"%(self.name,self.path.abspath())
def __repr__(self):
lst=[]
for x in self.__dict__.keys():
if x not in['env','bld','compiled_tasks','tasks']:
lst.append("%s=%s"%(x,repr(getattr(self,x))))
return"bld(%s) in %s"%(", ".join(lst),self.path.abspath())
def get_name(self):
try:
return self._name
except AttributeError:
if isinstance(self.target,list):
lst=[str(x)for x in self.target]
name=self._name=','.join(lst)
else:
name=self._name=str(self.target)
return name
def set_name(self,name):
self._name=name
name=property(get_name,set_name)
def to_list(self,val):
if isinstance(val,str):return val.split()
else:return val
def post(self):
if getattr(self,'posted',None):
return False
self.posted=True
keys=set(self.meths)
self.features=Utils.to_list(self.features)
for x in self.features+['*']:
st=feats[x]
if not st:
if not x in Task.classes:
Logs.warn('feature %r does not exist - bind at least one method to it'%x)
keys.update(list(st))
prec={}
prec_tbl=self.prec or task_gen.prec
for x in prec_tbl:
if x in keys:
prec[x]=prec_tbl[x]
tmp=[]
for a in keys:
for x in prec.values():
if a in x:break
else:
tmp.append(a)
tmp.sort()
out=[]
while tmp:
e=tmp.pop()
if e in keys:out.append(e)
try:
nlst=prec[e]
except KeyError:
pass
else:
del prec[e]
for x in nlst:
for y in prec:
if x in prec[y]:
break
else:
tmp.append(x)
if prec:
raise Errors.WafError('Cycle detected in the method execution %r'%prec)
out.reverse()
self.meths=out
Logs.debug('task_gen: posting %s %d'%(self,id(self)))
for x in out:
try:
v=getattr(self,x)
except AttributeError:
raise Errors.WafError('%r is not a valid task generator method'%x)
Logs.debug('task_gen: -> %s (%d)'%(x,id(self)))
v()
Logs.debug('task_gen: posted %s'%self.name)
return True
def get_hook(self,node):
name=node.name
for k in self.mappings:
if name.endswith(k):
return self.mappings[k]
for k in task_gen.mappings:
if name.endswith(k):
return task_gen.mappings[k]
raise Errors.WafError("File %r has no mapping in %r (did you forget to load a waf tool?)"%(node,task_gen.mappings.keys()))
def create_task(self,name,src=None,tgt=None):
task=Task.classes[name](env=self.env.derive(),generator=self)
if src:
task.set_inputs(src)
if tgt:
task.set_outputs(tgt)
self.tasks.append(task)
return task
def clone(self,env):
newobj=self.bld()
for x in self.__dict__:
if x in['env','bld']:
continue
elif x in['path','features']:
setattr(newobj,x,getattr(self,x))
else:
setattr(newobj,x,copy.copy(getattr(self,x)))
newobj.posted=False
if isinstance(env,str):
newobj.env=self.bld.all_envs[env].derive()
else:
newobj.env=env.derive()
return newobj
def declare_chain(name='',rule=None,reentrant=None,color='BLUE',ext_in=[],ext_out=[],before=[],after=[],decider=None,scan=None,install_path=None,shell=False):
ext_in=Utils.to_list(ext_in)
ext_out=Utils.to_list(ext_out)
if not name:
name=rule
cls=Task.task_factory(name,rule,color=color,ext_in=ext_in,ext_out=ext_out,before=before,after=after,scan=scan,shell=shell)
def x_file(self,node):
ext=decider and decider(self,node)or cls.ext_out
if ext_in:
_ext_in=ext_in[0]
tsk=self.create_task(name,node)
cnt=0
keys=list(self.mappings.keys())+list(self.__class__.mappings.keys())
for x in ext:
k=node.change_ext(x,ext_in=_ext_in)
tsk.outputs.append(k)
if reentrant!=None:
if cnt<int(reentrant):
self.source.append(k)
else:
for y in keys:
if k.name.endswith(y):
self.source.append(k)
break
cnt+=1
if install_path:
self.bld.install_files(install_path,tsk.outputs)
return tsk
for x in cls.ext_in:
task_gen.mappings[x]=x_file
return x_file
def taskgen_method(func):
setattr(task_gen,func.__name__,func)
return func
def feature(*k):
def deco(func):
setattr(task_gen,func.__name__,func)
for name in k:
feats[name].update([func.__name__])
return func
return deco
def before_method(*k):
def deco(func):
setattr(task_gen,func.__name__,func)
for fun_name in k:
if not func.__name__ in task_gen.prec[fun_name]:
task_gen.prec[fun_name].append(func.__name__)
return func
return deco
before=before_method
def after_method(*k):
def deco(func):
setattr(task_gen,func.__name__,func)
for fun_name in k:
if not fun_name in task_gen.prec[func.__name__]:
task_gen.prec[func.__name__].append(fun_name)
return func
return deco
after=after_method
def extension(*k):
def deco(func):
setattr(task_gen,func.__name__,func)
for x in k:
task_gen.mappings[x]=func
return func
return deco
@taskgen_method
def to_nodes(self,lst,path=None):
tmp=[]
path=path or self.path
find=path.find_resource
if isinstance(lst,self.path.__class__):
lst=[lst]
for x in Utils.to_list(lst):
if isinstance(x,str):
node=find(x)
else:
node=x
if not node:
raise Errors.WafError("source not found: %r in %r"%(x,self))
tmp.append(node)
return tmp
@feature('*')
def process_source(self):
self.source=self.to_nodes(getattr(self,'source',[]))
for node in self.source:
self.get_hook(node)(self,node)
@feature('*')
@before_method('process_source')
def process_rule(self):
if not getattr(self,'rule',None):
return
name=str(getattr(self,'name',None)or self.target or getattr(self.rule,'__name__',self.rule))
try:
cache=self.bld.cache_rule_attr
except AttributeError:
cache=self.bld.cache_rule_attr={}
cls=None
if getattr(self,'cache_rule','True'):
try:
cls=cache[(name,self.rule)]
except KeyError:
pass
if not cls:
cls=Task.task_factory(name,self.rule,getattr(self,'vars',[]),shell=getattr(self,'shell',True),color=getattr(self,'color','BLUE'),scan=getattr(self,'scan',None))
if getattr(self,'scan',None):
cls.scan=self.scan
elif getattr(self,'deps',None):
def scan(self):
nodes=[]
for x in self.generator.to_list(getattr(self.generator,'deps',None)):
node=self.generator.path.find_resource(x)
if not node:
self.generator.bld.fatal('Could not find %r (was it declared?)'%x)
nodes.append(node)
return[nodes,[]]
cls.scan=scan
if getattr(self,'update_outputs',None):
Task.update_outputs(cls)
if getattr(self,'always',None):
Task.always_run(cls)
for x in['after','before','ext_in','ext_out']:
setattr(cls,x,getattr(self,x,[]))
if getattr(self,'cache_rule','True'):
cache[(name,self.rule)]=cls
tsk=self.create_task(name)
if getattr(self,'target',None):
if isinstance(self.target,str):
self.target=self.target.split()
if not isinstance(self.target,list):
self.target=[self.target]
for x in self.target:
if isinstance(x,str):
tsk.outputs.append(self.path.find_or_declare(x))
else:
x.parent.mkdir()
tsk.outputs.append(x)
if getattr(self,'install_path',None):
self.bld.install_files(self.install_path,tsk.outputs)
if getattr(self,'source',None):
tsk.inputs=self.to_nodes(self.source)
self.source=[]
if getattr(self,'cwd',None):
tsk.cwd=self.cwd
@feature('seq')
def sequence_order(self):
if self.meths and self.meths[-1]!='sequence_order':
self.meths.append('sequence_order')
return
if getattr(self,'seq_start',None):
return
if getattr(self.bld,'prev',None):
self.bld.prev.post()
for x in self.bld.prev.tasks:
for y in self.tasks:
y.set_run_after(x)
self.bld.prev=self
re_m4=re.compile('@(\w+)@',re.M)
class subst_pc(Task.Task):
def run(self):
if getattr(self.generator,'is_copy',None):
self.outputs[0].write(self.inputs[0].read('rb'),'wb')
if getattr(self.generator,'chmod',None):
os.chmod(self.outputs[0].abspath(),self.generator.chmod)
return None
if getattr(self.generator,'fun',None):
self.generator.fun(self)
code=self.inputs[0].read(encoding=getattr(self.generator,'encoding','ISO8859-1'))
if getattr(self.generator,'subst_fun',None):
code=self.generator.subst_fun(self,code)
if code:
self.outputs[0].write(code,encoding=getattr(self.generator,'encoding','ISO8859-1'))
return
code=code.replace('%','%%')
lst=[]
def repl(match):
g=match.group
if g(1):
lst.append(g(1))
return"%%(%s)s"%g(1)
return''
code=re_m4.sub(repl,code)
try:
d=self.generator.dct
except AttributeError:
d={}
for x in lst:
tmp=getattr(self.generator,x,'')or self.env.get_flat(x)or self.env.get_flat(x.upper())
d[x]=str(tmp)
code=code%d
self.outputs[0].write(code,encoding=getattr(self.generator,'encoding','ISO8859-1'))
self.generator.bld.raw_deps[self.uid()]=self.dep_vars=lst
try:delattr(self,'cache_sig')
except AttributeError:pass
if getattr(self.generator,'chmod',None):
os.chmod(self.outputs[0].abspath(),self.generator.chmod)
def sig_vars(self):
bld=self.generator.bld
env=self.env
upd=self.m.update
if getattr(self.generator,'fun',None):
upd(Utils.h_fun(self.generator.fun))
if getattr(self.generator,'subst_fun',None):
upd(Utils.h_fun(self.generator.subst_fun))
vars=self.generator.bld.raw_deps.get(self.uid(),[])
act_sig=bld.hash_env_vars(env,vars)
upd(act_sig)
lst=[getattr(self.generator,x,'')for x in vars]
upd(Utils.h_list(lst))
return self.m.digest()
@extension('.pc.in')
def add_pcfile(self,node):
tsk=self.create_task('subst_pc',node,node.change_ext('.pc','.pc.in'))
self.bld.install_files(getattr(self,'install_path','${LIBDIR}/pkgconfig/'),tsk.outputs)
class subst(subst_pc):
pass
@feature('subst')
@before_method('process_source','process_rule')
def process_subst(self):
src=Utils.to_list(getattr(self,'source',[]))
if isinstance(src,Node.Node):
src=[src]
tgt=Utils.to_list(getattr(self,'target',[]))
if isinstance(tgt,Node.Node):
tgt=[tgt]
if len(src)!=len(tgt):
raise Errors.WafError('invalid number of source/target for %r'%self)
for x,y in zip(src,tgt):
if not x or not y:
raise Errors.WafError('null source or target for %r'%self)
a,b=None,None
if isinstance(x,str)and isinstance(y,str)and x==y:
a=self.path.find_node(x)
b=self.path.get_bld().make_node(y)
if not os.path.isfile(b.abspath()):
b.sig=None
b.parent.mkdir()
else:
if isinstance(x,str):
a=self.path.find_resource(x)
elif isinstance(x,Node.Node):
a=x
if isinstance(y,str):
b=self.path.find_or_declare(y)
elif isinstance(y,Node.Node):
b=y
if not a:
raise Errors.WafError('cound not find %r for %r'%(x,self))
has_constraints=False
tsk=self.create_task('subst',a,b)
for k in('after','before','ext_in','ext_out'):
val=getattr(self,k,None)
if val:
has_constraints=True
setattr(tsk,k,val)
if not has_constraints and b.name.endswith('.h'):
tsk.before=[k for k in('c','cxx')if k in Task.classes]
inst_to=getattr(self,'install_path',None)
if inst_to:
self.bld.install_files(inst_to,b,chmod=getattr(self,'chmod',Utils.O644))
self.source=[]
| gpl-2.0 |
IssamLaradji/scikit-learn | benchmarks/bench_plot_ward.py | 290 | 1260 | """
Benchmark scikit-learn's Ward implement compared to SciPy's
"""
import time
import numpy as np
from scipy.cluster import hierarchy
import pylab as pl
from sklearn.cluster import AgglomerativeClustering
ward = AgglomerativeClustering(n_clusters=3, linkage='ward')
n_samples = np.logspace(.5, 3, 9)
n_features = np.logspace(1, 3.5, 7)
N_samples, N_features = np.meshgrid(n_samples,
n_features)
scikits_time = np.zeros(N_samples.shape)
scipy_time = np.zeros(N_samples.shape)
for i, n in enumerate(n_samples):
for j, p in enumerate(n_features):
X = np.random.normal(size=(n, p))
t0 = time.time()
ward.fit(X)
scikits_time[j, i] = time.time() - t0
t0 = time.time()
hierarchy.ward(X)
scipy_time[j, i] = time.time() - t0
ratio = scikits_time / scipy_time
pl.figure("scikit-learn Ward's method benchmark results")
pl.imshow(np.log(ratio), aspect='auto', origin="lower")
pl.colorbar()
pl.contour(ratio, levels=[1, ], colors='k')
pl.yticks(range(len(n_features)), n_features.astype(np.int))
pl.ylabel('N features')
pl.xticks(range(len(n_samples)), n_samples.astype(np.int))
pl.xlabel('N samples')
pl.title("Scikit's time, in units of scipy time (log)")
pl.show()
| bsd-3-clause |
openelisglobal/openelisglobal-core | liquibase/OE5.1/testCatalogKenya/Scripts/region_district.py | 4 | 2749 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
organization = []
region = []
region_dict = {}
used = ['']
county_list = []
county_name = []
county_district_file = open('input_files/county_district.txt','r')
region_file = open('input_files/region.txt', 'r')
for line in county_district_file:
if len(line) > 1:
organization.append(line.strip(" "))
county_district_file.close()
for line in region_file:
if len(line) > 1:
region.append(line.strip(" "))
region_file.close()
for row in range(0, len(region)):
#create dictionary for the county/region name and number
county_list = region[row].split(",")
region_dict[county_list[0].strip()] = county_list[1].strip()
def get_org_type_id(org):
for row in range(0, len(region)):
region_name = region[row]
region_field = region_name.split(",")
if len(org) > 3 and org.strip() not in used and region_field[0].strip() == org.strip():
used.append(org.strip())
return '8'
return '7'
#escape name for the organization and distinquish counties (regions) against sub counties (districts)
def esc_org_name(name):
name = dist_org_names(name)#distinguish regions against districts
if "'" in name:
return name.replace("'", "''")
else:
return name
#escape country name
def esc_name(name):
if "'" in name:
return name.replace("'", "''")
else:
return name
def dist_org_names(orgName):
if orgName.strip() not in county_name:
county_name.append(orgName.strip())
return orgName.strip()
else:
return orgName+' Sub County'
def get_org_id(orgName):
if orgName.strip() == '':
return 'NULL'
else:
return "(SELECT id FROM clinlims.organization WHERE name = '"+esc_name(orgName)+"')"
sql_insert = "INSERT INTO clinlims.organization( id, name, org_id, lastupdated, is_active) \n\t VALUES (nextval('clinlims.organization_seq'), "
count = 10
county_district_results = open("output_files/county_district.sql", 'w')
for row in range(0, len(organization)):
county_district_name = organization[row]
org_field = county_district_name.split(",")
if org_field not in used and 'n/a' not in org_field:
used.append(org_field)
county_district_results.write(sql_insert)
county_district_results.write("'" + esc_org_name(org_field[1]) + "', "+get_org_id(org_field[2].strip())+", now(), 'Y');\n\t")
county_district_results.write("INSERT INTO clinlims.organization_organization_type( org_id, org_type_id) \n\t\t VALUES ( currval('clinlims.organization_seq'), "+ get_org_type_id(org_field[1]) +");\n")
print "Done Look for the results in county_district.sql"
| mpl-2.0 |
dhp-denero/LibrERP | account_financial_report_horizontal/wizard/account_report_profit_loss.py | 2 | 2337 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2013 Agile Business Group sagl
# (<http://www.agilebg.com>) (<lorenzo.battistini@agilebg.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
class account_pl_report(orm.TransientModel):
"""
This wizard will provide the account profit and loss report by periods,
between any two dates.
"""
_inherit = "account_financial_report_horizontal.common.account.report"
_name = "account.pl.report"
_description = "Account Profit And Loss Report"
_columns = {
'name': fields.char("Name", size=16),
'display_type': fields.boolean("Landscape Mode"),
}
_defaults = {
'display_type': True,
'target_move': 'all',
}
def _print_report(self, cr, uid, ids, data, context=None):
if context is None:
context = {}
data = self.pre_print_report(cr, uid, ids, data, context=context)
data['form'].update(self.read(cr, uid, ids, ['display_type'])[0])
if data['form']['display_type']:
return {
'type': 'ir.actions.report.xml',
'report_name': 'account.profit_horizontal',
'datas': data,
}
else:
return {
'type': 'ir.actions.report.xml',
'report_name': 'account.profit_loss',
'datas': data,
}
| agpl-3.0 |
SVoxel/R9000 | git_home/u-boot.git/tools/patman/patchstream.py | 5 | 17615 | # Copyright (c) 2011 The Chromium OS Authors.
#
# SPDX-License-Identifier: GPL-2.0+
#
import os
import re
import shutil
import tempfile
import command
import commit
import gitutil
from series import Series
# Tags that we detect and remove
re_remove = re.compile('^BUG=|^TEST=|^BRANCH=|^Change-Id:|^Review URL:'
'|Reviewed-on:|Commit-\w*:')
# Lines which are allowed after a TEST= line
re_allowed_after_test = re.compile('^Signed-off-by:')
# Signoffs
re_signoff = re.compile('^Signed-off-by: *(.*)')
# The start of the cover letter
re_cover = re.compile('^Cover-letter:')
# A cover letter Cc
re_cover_cc = re.compile('^Cover-letter-cc: *(.*)')
# Patch series tag
re_series_tag = re.compile('^Series-([a-z-]*): *(.*)')
# Commit series tag
re_commit_tag = re.compile('^Commit-([a-z-]*): *(.*)')
# Commit tags that we want to collect and keep
re_tag = re.compile('^(Tested-by|Acked-by|Reviewed-by|Patch-cc): (.*)')
# The start of a new commit in the git log
re_commit = re.compile('^commit ([0-9a-f]*)$')
# We detect these since checkpatch doesn't always do it
re_space_before_tab = re.compile('^[+].* \t')
# States we can be in - can we use range() and still have comments?
STATE_MSG_HEADER = 0 # Still in the message header
STATE_PATCH_SUBJECT = 1 # In patch subject (first line of log for a commit)
STATE_PATCH_HEADER = 2 # In patch header (after the subject)
STATE_DIFFS = 3 # In the diff part (past --- line)
class PatchStream:
"""Class for detecting/injecting tags in a patch or series of patches
We support processing the output of 'git log' to read out the tags we
are interested in. We can also process a patch file in order to remove
unwanted tags or inject additional ones. These correspond to the two
phases of processing.
"""
def __init__(self, series, name=None, is_log=False):
self.skip_blank = False # True to skip a single blank line
self.found_test = False # Found a TEST= line
self.lines_after_test = 0 # MNumber of lines found after TEST=
self.warn = [] # List of warnings we have collected
self.linenum = 1 # Output line number we are up to
self.in_section = None # Name of start...END section we are in
self.notes = [] # Series notes
self.section = [] # The current section...END section
self.series = series # Info about the patch series
self.is_log = is_log # True if indent like git log
self.in_change = 0 # Non-zero if we are in a change list
self.blank_count = 0 # Number of blank lines stored up
self.state = STATE_MSG_HEADER # What state are we in?
self.signoff = [] # Contents of signoff line
self.commit = None # Current commit
def AddToSeries(self, line, name, value):
"""Add a new Series-xxx tag.
When a Series-xxx tag is detected, we come here to record it, if we
are scanning a 'git log'.
Args:
line: Source line containing tag (useful for debug/error messages)
name: Tag name (part after 'Series-')
value: Tag value (part after 'Series-xxx: ')
"""
if name == 'notes':
self.in_section = name
self.skip_blank = False
if self.is_log:
self.series.AddTag(self.commit, line, name, value)
def AddToCommit(self, line, name, value):
"""Add a new Commit-xxx tag.
When a Commit-xxx tag is detected, we come here to record it.
Args:
line: Source line containing tag (useful for debug/error messages)
name: Tag name (part after 'Commit-')
value: Tag value (part after 'Commit-xxx: ')
"""
if name == 'notes':
self.in_section = 'commit-' + name
self.skip_blank = False
def CloseCommit(self):
"""Save the current commit into our commit list, and reset our state"""
if self.commit and self.is_log:
self.series.AddCommit(self.commit)
self.commit = None
def ProcessLine(self, line):
"""Process a single line of a patch file or commit log
This process a line and returns a list of lines to output. The list
may be empty or may contain multiple output lines.
This is where all the complicated logic is located. The class's
state is used to move between different states and detect things
properly.
We can be in one of two modes:
self.is_log == True: This is 'git log' mode, where most output is
indented by 4 characters and we are scanning for tags
self.is_log == False: This is 'patch' mode, where we already have
all the tags, and are processing patches to remove junk we
don't want, and add things we think are required.
Args:
line: text line to process
Returns:
list of output lines, or [] if nothing should be output
"""
# Initially we have no output. Prepare the input line string
out = []
line = line.rstrip('\n')
if self.is_log:
if line[:4] == ' ':
line = line[4:]
# Handle state transition and skipping blank lines
series_tag_match = re_series_tag.match(line)
commit_tag_match = re_commit_tag.match(line)
commit_match = re_commit.match(line) if self.is_log else None
cover_cc_match = re_cover_cc.match(line)
signoff_match = re_signoff.match(line)
tag_match = None
if self.state == STATE_PATCH_HEADER:
tag_match = re_tag.match(line)
is_blank = not line.strip()
if is_blank:
if (self.state == STATE_MSG_HEADER
or self.state == STATE_PATCH_SUBJECT):
self.state += 1
# We don't have a subject in the text stream of patch files
# It has its own line with a Subject: tag
if not self.is_log and self.state == STATE_PATCH_SUBJECT:
self.state += 1
elif commit_match:
self.state = STATE_MSG_HEADER
# If we are in a section, keep collecting lines until we see END
if self.in_section:
if line == 'END':
if self.in_section == 'cover':
self.series.cover = self.section
elif self.in_section == 'notes':
if self.is_log:
self.series.notes += self.section
elif self.in_section == 'commit-notes':
if self.is_log:
self.commit.notes += self.section
else:
self.warn.append("Unknown section '%s'" % self.in_section)
self.in_section = None
self.skip_blank = True
self.section = []
else:
self.section.append(line)
# Detect the commit subject
elif not is_blank and self.state == STATE_PATCH_SUBJECT:
self.commit.subject = line
# Detect the tags we want to remove, and skip blank lines
elif re_remove.match(line) and not commit_tag_match:
self.skip_blank = True
# TEST= should be the last thing in the commit, so remove
# everything after it
if line.startswith('TEST='):
self.found_test = True
elif self.skip_blank and is_blank:
self.skip_blank = False
# Detect the start of a cover letter section
elif re_cover.match(line):
self.in_section = 'cover'
self.skip_blank = False
elif cover_cc_match:
value = cover_cc_match.group(1)
self.AddToSeries(line, 'cover-cc', value)
# If we are in a change list, key collected lines until a blank one
elif self.in_change:
if is_blank:
# Blank line ends this change list
self.in_change = 0
elif line == '---':
self.in_change = 0
out = self.ProcessLine(line)
else:
if self.is_log:
self.series.AddChange(self.in_change, self.commit, line)
self.skip_blank = False
# Detect Series-xxx tags
elif series_tag_match:
name = series_tag_match.group(1)
value = series_tag_match.group(2)
if name == 'changes':
# value is the version number: e.g. 1, or 2
try:
value = int(value)
except ValueError as str:
raise ValueError("%s: Cannot decode version info '%s'" %
(self.commit.hash, line))
self.in_change = int(value)
else:
self.AddToSeries(line, name, value)
self.skip_blank = True
# Detect Commit-xxx tags
elif commit_tag_match:
name = commit_tag_match.group(1)
value = commit_tag_match.group(2)
if name == 'notes':
self.AddToCommit(line, name, value)
self.skip_blank = True
# Detect the start of a new commit
elif commit_match:
self.CloseCommit()
self.commit = commit.Commit(commit_match.group(1))
# Detect tags in the commit message
elif tag_match:
# Remove Tested-by self, since few will take much notice
if (tag_match.group(1) == 'Tested-by' and
tag_match.group(2).find(os.getenv('USER') + '@') != -1):
self.warn.append("Ignoring %s" % line)
elif tag_match.group(1) == 'Patch-cc':
self.commit.AddCc(tag_match.group(2).split(','))
else:
out = [line]
# Suppress duplicate signoffs
elif signoff_match:
if (self.is_log or not self.commit or
self.commit.CheckDuplicateSignoff(signoff_match.group(1))):
out = [line]
# Well that means this is an ordinary line
else:
pos = 1
# Look for ugly ASCII characters
for ch in line:
# TODO: Would be nicer to report source filename and line
if ord(ch) > 0x80:
self.warn.append("Line %d/%d ('%s') has funny ascii char" %
(self.linenum, pos, line))
pos += 1
# Look for space before tab
m = re_space_before_tab.match(line)
if m:
self.warn.append('Line %d/%d has space before tab' %
(self.linenum, m.start()))
# OK, we have a valid non-blank line
out = [line]
self.linenum += 1
self.skip_blank = False
if self.state == STATE_DIFFS:
pass
# If this is the start of the diffs section, emit our tags and
# change log
elif line == '---':
self.state = STATE_DIFFS
# Output the tags (signeoff first), then change list
out = []
log = self.series.MakeChangeLog(self.commit)
out += [line]
if self.commit:
out += self.commit.notes
out += [''] + log
elif self.found_test:
if not re_allowed_after_test.match(line):
self.lines_after_test += 1
return out
def Finalize(self):
"""Close out processing of this patch stream"""
self.CloseCommit()
if self.lines_after_test:
self.warn.append('Found %d lines after TEST=' %
self.lines_after_test)
def ProcessStream(self, infd, outfd):
"""Copy a stream from infd to outfd, filtering out unwanting things.
This is used to process patch files one at a time.
Args:
infd: Input stream file object
outfd: Output stream file object
"""
# Extract the filename from each diff, for nice warnings
fname = None
last_fname = None
re_fname = re.compile('diff --git a/(.*) b/.*')
while True:
line = infd.readline()
if not line:
break
out = self.ProcessLine(line)
# Try to detect blank lines at EOF
for line in out:
match = re_fname.match(line)
if match:
last_fname = fname
fname = match.group(1)
if line == '+':
self.blank_count += 1
else:
if self.blank_count and (line == '-- ' or match):
self.warn.append("Found possible blank line(s) at "
"end of file '%s'" % last_fname)
outfd.write('+\n' * self.blank_count)
outfd.write(line + '\n')
self.blank_count = 0
self.Finalize()
def GetMetaDataForList(commit_range, git_dir=None, count=None,
series = None, allow_overwrite=False):
"""Reads out patch series metadata from the commits
This does a 'git log' on the relevant commits and pulls out the tags we
are interested in.
Args:
commit_range: Range of commits to count (e.g. 'HEAD..base')
git_dir: Path to git repositiory (None to use default)
count: Number of commits to list, or None for no limit
series: Series object to add information into. By default a new series
is started.
allow_overwrite: Allow tags to overwrite an existing tag
Returns:
A Series object containing information about the commits.
"""
if not series:
series = Series()
series.allow_overwrite = allow_overwrite
params = gitutil.LogCmd(commit_range,reverse=True, count=count,
git_dir=git_dir)
stdout = command.RunPipe([params], capture=True).stdout
ps = PatchStream(series, is_log=True)
for line in stdout.splitlines():
ps.ProcessLine(line)
ps.Finalize()
return series
def GetMetaData(start, count):
"""Reads out patch series metadata from the commits
This does a 'git log' on the relevant commits and pulls out the tags we
are interested in.
Args:
start: Commit to start from: 0=HEAD, 1=next one, etc.
count: Number of commits to list
"""
return GetMetaDataForList('HEAD~%d' % start, None, count)
def FixPatch(backup_dir, fname, series, commit):
"""Fix up a patch file, by adding/removing as required.
We remove our tags from the patch file, insert changes lists, etc.
The patch file is processed in place, and overwritten.
A backup file is put into backup_dir (if not None).
Args:
fname: Filename to patch file to process
series: Series information about this patch set
commit: Commit object for this patch file
Return:
A list of errors, or [] if all ok.
"""
handle, tmpname = tempfile.mkstemp()
outfd = os.fdopen(handle, 'w')
infd = open(fname, 'r')
ps = PatchStream(series)
ps.commit = commit
ps.ProcessStream(infd, outfd)
infd.close()
outfd.close()
# Create a backup file if required
if backup_dir:
shutil.copy(fname, os.path.join(backup_dir, os.path.basename(fname)))
shutil.move(tmpname, fname)
return ps.warn
def FixPatches(series, fnames):
"""Fix up a list of patches identified by filenames
The patch files are processed in place, and overwritten.
Args:
series: The series object
fnames: List of patch files to process
"""
# Current workflow creates patches, so we shouldn't need a backup
backup_dir = None #tempfile.mkdtemp('clean-patch')
count = 0
for fname in fnames:
commit = series.commits[count]
commit.patch = fname
result = FixPatch(backup_dir, fname, series, commit)
if result:
print '%d warnings for %s:' % (len(result), fname)
for warn in result:
print '\t', warn
print
count += 1
print 'Cleaned %d patches' % count
return series
def InsertCoverLetter(fname, series, count):
"""Inserts a cover letter with the required info into patch 0
Args:
fname: Input / output filename of the cover letter file
series: Series object
count: Number of patches in the series
"""
fd = open(fname, 'r')
lines = fd.readlines()
fd.close()
fd = open(fname, 'w')
text = series.cover
prefix = series.GetPatchPrefix()
for line in lines:
if line.startswith('Subject:'):
# TODO: if more than 10 patches this should save 00/xx, not 0/xx
line = 'Subject: [%s 0/%d] %s\n' % (prefix, count, text[0])
# Insert our cover letter
elif line.startswith('*** BLURB HERE ***'):
# First the blurb test
line = '\n'.join(text[1:]) + '\n'
if series.get('notes'):
line += '\n'.join(series.notes) + '\n'
# Now the change list
out = series.MakeChangeLog(None)
line += '\n' + '\n'.join(out)
fd.write(line)
fd.close()
| gpl-2.0 |
andyshinn/dx-toolkit | src/python/dxpy/bindings/dxgtable_functions.py | 2 | 3997 | # Copyright (C) 2013-2015 DNAnexus, Inc.
#
# This file is part of dx-toolkit (DNAnexus platform client libraries).
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helper Functions
****************
The following functions allow opening an existing remote table (for
reading or writing) and creating new remote tables (write-only). All of
these methods return a remote table handler.
"""
from __future__ import (print_function, unicode_literals)
from . import DXGTable
def open_dxgtable(dxid, project=None, mode=None):
'''
:param dxid: table ID
:type dxid: string
:param mode: One of "r", "w", or "a" for read, write, and append modes, respectively
:type mode: string
:rtype: :class:`~dxpy.bindings.dxgtable.DXGTable`
Given the object ID of an existing table, returns a
:class:`~dxpy.bindings.dxgtable.DXGTable` object for reading (with
:meth:`~dxpy.bindings.dxgtable.DXGTable.get_rows`) or writing (with
:meth:`~dxpy.bindings.dxgtable.DXGTable.add_row` or
:meth:`~dxpy.bindings.dxgtable.DXGTable.add_rows`).
Example::
with open_dxgtable("gtable-xxxx") as dxgtable:
for row in dxgtable.get_rows():
print row[1] # Prints the value in the first column (after the row ID) for this row
Note that this function is shorthand for the following::
DXGTable(dxid)
'''
return DXGTable(dxid, project=project, mode=mode)
def new_dxgtable(columns=None, indices=None, init_from=None, mode=None, **kwargs):
'''
:param columns: An ordered list containing column descriptors. See :meth:`~dxpy.bindings.dxgtable.DXGTable.make_column_desc` (required if *init_from* is not provided)
:type columns: list of column descriptors
:param indices: An ordered list containing index descriptors. See description in :func:`~dxpy.bindings.dxgtable.DXGTable._new`.
:type indices: list of index descriptors
:param init_from: GTable from which to initialize the metadata including column and index specs
:type init_from: :class:`~dxpy.bindings.dxgtable.DXGTable`
:param mode: One of "w" or "a" for write and append modes, respectively
:type mode: string
:returns: Remote table handler for the newly created table
:rtype: :class:`~dxpy.bindings.dxgtable.DXGTable`
Additional optional parameters not listed: all those under
:func:`dxpy.bindings.DXDataObject.new`.
Creates a new remote GTable with the given columns. If indices are
given, the GTable will be indexed by the requested indices at the
time that the table is closed.
Example::
col_descs = [dxpy.DXGTable.make_column_desc("a", "string"),
dxpy.DXGTable.make_column_desc("b", "int32")]
with new_dxgtable(columns=col_descs, mode='w') as dxgtable:
dxgtable.add_rows([["foo", 23], ["bar", 7]])
gri_cols = [dxpy.DXGTable.make_column_desc("chr", "string"),
dxpy.DXGTable.make_column_desc("lo", "int32"),
dxpy.DXGTable.make_column_desc("hi", "int32")]
gri_index = dxpy.DXGTable.genomic_range_index("chr", "lo", "hi")
indexed_table = new_dxgtable(columns=gri_cols, indices=[gri_index])
Note that this function is shorthand for the following::
dxgtable = DXGTable()
dxgtable.new(columns, **kwargs)
'''
dxgtable = DXGTable(mode=mode)
dxgtable.new(columns=columns, indices=indices, init_from=init_from, **kwargs)
return dxgtable
| apache-2.0 |
windyuuy/opera | chromium/src/tools/grit/grit/gather/txt.py | 62 | 1175 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Supports making amessage from a text file.
'''
from grit.gather import interface
from grit import tclib
class TxtFile(interface.GathererBase):
'''A text file gatherer. Very simple, all text from the file becomes a
single clique.
'''
def Parse(self):
self.text_ = self._LoadInputFile()
self.clique_ = self.uberclique.MakeClique(tclib.Message(text=self.text_))
def GetText(self):
'''Returns the text of what is being gathered.'''
return self.text_
def GetTextualIds(self):
return [self.extkey]
def GetCliques(self):
'''Returns the MessageClique objects for all translateable portions.'''
return [self.clique_]
def Translate(self, lang, pseudo_if_not_available=True,
skeleton_gatherer=None, fallback_to_english=False):
return self.clique_.MessageForLanguage(lang,
pseudo_if_not_available,
fallback_to_english).GetRealContent()
| bsd-3-clause |
WillisXChen/django-oscar | oscar/lib/python2.7/site-packages/werkzeug/contrib/iterio.py | 147 | 10718 | # -*- coding: utf-8 -*-
r"""
werkzeug.contrib.iterio
~~~~~~~~~~~~~~~~~~~~~~~
This module implements a :class:`IterIO` that converts an iterator into
a stream object and the other way round. Converting streams into
iterators requires the `greenlet`_ module.
To convert an iterator into a stream all you have to do is to pass it
directly to the :class:`IterIO` constructor. In this example we pass it
a newly created generator::
def foo():
yield "something\n"
yield "otherthings"
stream = IterIO(foo())
print stream.read() # read the whole iterator
The other way round works a bit different because we have to ensure that
the code execution doesn't take place yet. An :class:`IterIO` call with a
callable as first argument does two things. The function itself is passed
an :class:`IterIO` stream it can feed. The object returned by the
:class:`IterIO` constructor on the other hand is not an stream object but
an iterator::
def foo(stream):
stream.write("some")
stream.write("thing")
stream.flush()
stream.write("otherthing")
iterator = IterIO(foo)
print iterator.next() # prints something
print iterator.next() # prints otherthing
iterator.next() # raises StopIteration
.. _greenlet: http://codespeak.net/py/dist/greenlet.html
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
try:
import greenlet
except ImportError:
greenlet = None
from werkzeug._compat import implements_iterator
def _mixed_join(iterable, sentinel):
"""concatenate any string type in an intelligent way."""
iterator = iter(iterable)
first_item = next(iterator, sentinel)
if isinstance(first_item, bytes):
return first_item + b''.join(iterator)
return first_item + u''.join(iterator)
def _newline(reference_string):
if isinstance(reference_string, bytes):
return b'\n'
return u'\n'
@implements_iterator
class IterIO(object):
"""Instances of this object implement an interface compatible with the
standard Python :class:`file` object. Streams are either read-only or
write-only depending on how the object is created.
If the first argument is an iterable a file like object is returned that
returns the contents of the iterable. In case the iterable is empty
read operations will return the sentinel value.
If the first argument is a callable then the stream object will be
created and passed to that function. The caller itself however will
not receive a stream but an iterable. The function will be be executed
step by step as something iterates over the returned iterable. Each
call to :meth:`flush` will create an item for the iterable. If
:meth:`flush` is called without any writes in-between the sentinel
value will be yielded.
Note for Python 3: due to the incompatible interface of bytes and
streams you should set the sentinel value explicitly to an empty
bytestring (``b''``) if you are expecting to deal with bytes as
otherwise the end of the stream is marked with the wrong sentinel
value.
.. versionadded:: 0.9
`sentinel` parameter was added.
"""
def __new__(cls, obj, sentinel=''):
try:
iterator = iter(obj)
except TypeError:
return IterI(obj, sentinel)
return IterO(iterator, sentinel)
def __iter__(self):
return self
def tell(self):
if self.closed:
raise ValueError('I/O operation on closed file')
return self.pos
def isatty(self):
if self.closed:
raise ValueError('I/O operation on closed file')
return False
def seek(self, pos, mode=0):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def truncate(self, size=None):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def write(self, s):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def writelines(self, list):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def read(self, n=-1):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def readlines(self, sizehint=0):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def readline(self, length=None):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def flush(self):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def __next__(self):
if self.closed:
raise StopIteration()
line = self.readline()
if not line:
raise StopIteration()
return line
class IterI(IterIO):
"""Convert an stream into an iterator."""
def __new__(cls, func, sentinel=''):
if greenlet is None:
raise RuntimeError('IterI requires greenlet support')
stream = object.__new__(cls)
stream._parent = greenlet.getcurrent()
stream._buffer = []
stream.closed = False
stream.sentinel = sentinel
stream.pos = 0
def run():
func(stream)
stream.close()
g = greenlet.greenlet(run, stream._parent)
while 1:
rv = g.switch()
if not rv:
return
yield rv[0]
def close(self):
if not self.closed:
self.closed = True
self._flush_impl()
def write(self, s):
if self.closed:
raise ValueError('I/O operation on closed file')
if s:
self.pos += len(s)
self._buffer.append(s)
def writelines(self, list):
for item in list:
self.write(item)
def flush(self):
if self.closed:
raise ValueError('I/O operation on closed file')
self._flush_impl()
def _flush_impl(self):
data = _mixed_join(self._buffer, self.sentinel)
self._buffer = []
if not data and self.closed:
self._parent.switch()
else:
self._parent.switch((data,))
class IterO(IterIO):
"""Iter output. Wrap an iterator and give it a stream like interface."""
def __new__(cls, gen, sentinel=''):
self = object.__new__(cls)
self._gen = gen
self._buf = None
self.sentinel = sentinel
self.closed = False
self.pos = 0
return self
def __iter__(self):
return self
def _buf_append(self, string):
'''Replace string directly without appending to an empty string,
avoiding type issues.'''
if not self._buf:
self._buf = string
else:
self._buf += string
def close(self):
if not self.closed:
self.closed = True
if hasattr(self._gen, 'close'):
self._gen.close()
def seek(self, pos, mode=0):
if self.closed:
raise ValueError('I/O operation on closed file')
if mode == 1:
pos += self.pos
elif mode == 2:
self.read()
self.pos = min(self.pos, self.pos + pos)
return
elif mode != 0:
raise IOError('Invalid argument')
buf = []
try:
tmp_end_pos = len(self._buf)
while pos > tmp_end_pos:
item = self._gen.next()
tmp_end_pos += len(item)
buf.append(item)
except StopIteration:
pass
if buf:
self._buf_append(_mixed_join(buf, self.sentinel))
self.pos = max(0, pos)
def read(self, n=-1):
if self.closed:
raise ValueError('I/O operation on closed file')
if n < 0:
self._buf_append(_mixed_join(self._gen, self.sentinel))
result = self._buf[self.pos:]
self.pos += len(result)
return result
new_pos = self.pos + n
buf = []
try:
tmp_end_pos = 0 if self._buf is None else len(self._buf)
while new_pos > tmp_end_pos or (self._buf is None and not buf):
item = next(self._gen)
tmp_end_pos += len(item)
buf.append(item)
except StopIteration:
pass
if buf:
self._buf_append(_mixed_join(buf, self.sentinel))
if self._buf is None:
return self.sentinel
new_pos = max(0, new_pos)
try:
return self._buf[self.pos:new_pos]
finally:
self.pos = min(new_pos, len(self._buf))
def readline(self, length=None):
if self.closed:
raise ValueError('I/O operation on closed file')
nl_pos = -1
if self._buf:
nl_pos = self._buf.find(_newline(self._buf), self.pos)
buf = []
try:
pos = self.pos
while nl_pos < 0:
item = next(self._gen)
local_pos = item.find(_newline(item))
buf.append(item)
if local_pos >= 0:
nl_pos = pos + local_pos
break
pos += len(item)
except StopIteration:
pass
if buf:
self._buf_append(_mixed_join(buf, self.sentinel))
if self._buf is None:
return self.sentinel
if nl_pos < 0:
new_pos = len(self._buf)
else:
new_pos = nl_pos + 1
if length is not None and self.pos + length < new_pos:
new_pos = self.pos + length
try:
return self._buf[self.pos:new_pos]
finally:
self.pos = min(new_pos, len(self._buf))
def readlines(self, sizehint=0):
total = 0
lines = []
line = self.readline()
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline()
return lines
| bsd-3-clause |
naslanidis/ansible | lib/ansible/modules/packaging/os/pkg5_publisher.py | 25 | 5926 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2014 Peter Oliver <ansible@mavit.org.uk>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: pkg5_publisher
author: "Peter Oliver (@mavit)"
short_description: Manages Solaris 11 Image Packaging System publishers
version_added: 1.9
description:
- IPS packages are the native packages in Solaris 11 and higher.
- This modules will configure which publishers a client will download IPS
packages from.
options:
name:
description:
- The publisher's name.
required: true
aliases: [ publisher ]
state:
description:
- Whether to ensure that a publisher is present or absent.
required: false
default: present
choices: [ present, absent ]
sticky:
description:
- Packages installed from a sticky repository can only receive updates
from that repository.
required: false
default: null
choices: [ true, false ]
enabled:
description:
- Is the repository enabled or disabled?
required: false
default: null
choices: [ true, false ]
origin:
description:
- A path or URL to the repository.
- Multiple values may be provided.
required: false
default: null
mirror:
description:
- A path or URL to the repository mirror.
- Multiple values may be provided.
required: false
default: null
'''
EXAMPLES = '''
# Fetch packages for the solaris publisher direct from Oracle:
- pkg5_publisher:
name: solaris
sticky: true
origin: https://pkg.oracle.com/solaris/support/
# Configure a publisher for locally-produced packages:
- pkg5_publisher:
name: site
origin: 'https://pkg.example.com/site/'
'''
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True, aliases=['publisher']),
state=dict(default='present', choices=['present', 'absent']),
sticky=dict(type='bool'),
enabled=dict(type='bool'),
# search_after=dict(),
# search_before=dict(),
origin=dict(type='list'),
mirror=dict(type='list'),
)
)
for option in ['origin', 'mirror']:
if module.params[option] == ['']:
module.params[option] = []
if module.params['state'] == 'present':
modify_publisher(module, module.params)
else:
unset_publisher(module, module.params['name'])
def modify_publisher(module, params):
name = params['name']
existing = get_publishers(module)
if name in existing:
for option in ['origin', 'mirror', 'sticky', 'enabled']:
if params[option] is not None:
if params[option] != existing[name][option]:
return set_publisher(module, params)
else:
return set_publisher(module, params)
module.exit_json()
def set_publisher(module, params):
name = params['name']
args = []
if params['origin'] is not None:
args.append('--remove-origin=*')
args.extend(['--add-origin=' + u for u in params['origin']])
if params['mirror'] is not None:
args.append('--remove-mirror=*')
args.extend(['--add-mirror=' + u for u in params['mirror']])
if params['sticky'] is not None and params['sticky']:
args.append('--sticky')
elif params['sticky'] is not None:
args.append('--non-sticky')
if params['enabled'] is not None and params['enabled']:
args.append('--enable')
elif params['enabled'] is not None:
args.append('--disable')
rc, out, err = module.run_command(
["pkg", "set-publisher"] + args + [name],
check_rc=True
)
response = {
'rc': rc,
'results': [out],
'msg': err,
'changed': True,
}
module.exit_json(**response)
def unset_publisher(module, publisher):
if not publisher in get_publishers(module):
module.exit_json()
rc, out, err = module.run_command(
["pkg", "unset-publisher", publisher],
check_rc=True
)
response = {
'rc': rc,
'results': [out],
'msg': err,
'changed': True,
}
module.exit_json(**response)
def get_publishers(module):
rc, out, err = module.run_command(["pkg", "publisher", "-Ftsv"], True)
lines = out.splitlines()
keys = lines.pop(0).lower().split("\t")
publishers = {}
for line in lines:
values = dict(zip(keys, map(unstringify, line.split("\t"))))
name = values['publisher']
if not name in publishers:
publishers[name] = dict(
(k, values[k]) for k in ['sticky', 'enabled']
)
publishers[name]['origin'] = []
publishers[name]['mirror'] = []
if values['type'] is not None:
publishers[name][values['type']].append(values['uri'])
return publishers
def unstringify(val):
if val == "-" or val == '':
return None
elif val == "true":
return True
elif val == "false":
return False
else:
return val
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
abramhindle/slowdraw | slowdraw.py | 1 | 5288 | #!/usr/bin/env python
''' Slowdraw watches an image file and makes animations out of the changes
'''
import sys
import cv2
import cv
import numpy as np
import logging
import time
import argparse
import watchdog
import os.path
import pickle
import math
from watchdog.observers import Observer
parser = argparse.ArgumentParser(description='slowdraw')
parser.add_argument('-W', default=1024, help='Width of window')
parser.add_argument('-H', default=768, help='Height of window')
parser.add_argument('-strict', default=False, help='Strictness')
parser.add_argument('path', help='Path of file to watch')
args = parser.parse_args()
full_w = int(args.W)
full_h = int(args.H)
strictness = bool(args.strict)
def new_rgb(width,height):
return np.zeros((height,width,3), np.uint8)
fullscreen_buffer = new_rgb(full_w,full_h)
logging.basicConfig(stream = sys.stderr, level=logging.INFO)
load_queue = []
class ModListener(watchdog.events.FileSystemEventHandler):
def __init__(self, handler):
super(ModListener, self).__init__()
self.queue = []
self.handler = handler;
def on_modified(self, event):
logging.info("Modified: "+event.src_path)
if ((not strictness and
os.path.dirname(args.path) == os.path.dirname(event.src_path))
or event.src_path == args.path):
logging.info( "Recorded Modified: " + event.src_path )
self.queue.append( event.src_path )
self.handler( event.src_path )
window_name = "slowdraw"
fullscreen = False
cv2.namedWindow(window_name, cv2.WND_PROP_FULLSCREEN | cv2.WINDOW_OPENGL)
def start_fullscreen():
global fullscreen
global window_name
if not fullscreen:
cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN, cv2.cv.CV_WINDOW_FULLSCREEN)
fullscreen = True
else:
cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN, 0)
fullscreen = False
frame1 = cv2.imread(args.path)
w,h,_ = frame1.shape
frames = [frame1]
curr_frame = 0
done = False
def handle_frame(fname):
if (len(fname) > 4 and fname[-4:] == ".png"):
newframe = cv2.imread(fname)
frames.append(newframe)
mod_listener = ModListener(handle_frame)
observer = Observer()
directory = os.path.dirname(args.path)
observer.schedule(mod_listener, directory, recursive=True)
observer.start()
maxtime = 1000/2
mintime = 1000/30
# 2 4 8 16 32 64 128 256 512
maxtimes = [2000,2000,2000, 1000, 1000, 1000, 1000, 1000, 1000, 1000]
mintimes = [1000,1000,1000, 1000, 500, 200, 100, 50, 50, 50]
def get_times(nframes):
index = int(math.ceil(math.log(nframes) / math.log(2)))
if index >= len(maxtimes):
return maxtimes[-1], mintimes[-1]
else:
return maxtimes[index], mintimes[index]
def scalexp(v,mint,maxt,scale=5):
mine = math.exp(1.0)/math.exp(scale)
maxe = 1.0
vs = math.exp(1 + (scale-1)*v)/math.exp(scale)
vs = (vs - mine)/(maxe - mine)
return vs * (maxt - mint) + mint
def linscale(v,mint,maxt):
return v*(maxt-mint) + mint
def maintain_aspect(maxx,maxy,x,y):
wr = maxx/float(x)
hr = maxy/float(y)
if hr*y <= maxy or hr*x <= maxx:
return (int(hr*x),int(hr*y))
else:
return (int(wr*x),int(wr*y))
# maintain_aspect(1024,768,640,480)==(1024,768)
# maintain_aspect(1024,768,608,472)==(989,768)
# maintain_aspect(1024,768,random.randint(1,1324),random.randint(1,1324))
fourcc = cv2.cv.FOURCC(*'XVID')
writer = cv2.VideoWriter("slowdraw.avi",fourcc,30,(h,w),1)
frametime = 1000.0/30.0
resized_frame = None
fs_offset_x = 0
fs_offset_y = 0
cv2.imshow('slowdraw', fullscreen_buffer )
try:
while not done:
framen = curr_frame % len(frames)
frame = frames[curr_frame % len(frames)]
#if resized_frame == None:
# (lh,lw,depth) = frame.shape
# ratio = float(full_h)/float(lh)
# (resized_w,resized_h) = maintain_aspect(full_w,full_h,lw,lh)
# resized_frame = new_rgb(resized_w,resized_h)
# fs_offset_x = (full_w - resized_w)/2
# fs_offset_y = (full_h - resized_h)/2
# print "%s %s %s %s" % (resized_w,resized_h,fs_offset_x, fs_offset_y)
#resized_frame[:,:] = cv2.resize(frame,(resized_w,resized_h))
#fullscreen_buffer[fs_offset_y:fs_offset_y+resized_h , fs_offset_x:fs_offset_x+resized_w] = resized_frame
cv2.imshow('slowdraw', frame )
#print "%s,%s,%s" % fullscreen_buffer.shape
#cv2.imshow('slowdraw', fullscreen_buffer )
tmaxtime, tmintime = get_times(len(frames))
wait = scalexp( (framen + 1.0) / len(frames) , tmintime,tmaxtime)
print(wait,tmaxtime,tmintime)
curr_frame += 1
for i in range(0,max(1,int(wait/frametime))):
# print("Writing frame %s %s %s" % (i,wait,wait/frametime))
writer.write(frame)
# TODO: fix the wait time
k = cv2.waitKey(int(wait)) & 0xff
if k == 27:
done = True
continue
if k == ord('f'):
start_fullscreen()
except KeyboardInterrupt:
observer.stop()
# pickle.dump(frames,file('slowdraw.pkl','wb'))
writer.release()
observer.stop()
observer.join()
| gpl-3.0 |
jpush/jbox | Server/venv/lib/python3.5/site-packages/jinja2/utils.py | 323 | 16560 | # -*- coding: utf-8 -*-
"""
jinja2.utils
~~~~~~~~~~~~
Utility functions.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import re
import errno
from collections import deque
from threading import Lock
from jinja2._compat import text_type, string_types, implements_iterator, \
url_quote
_word_split_re = re.compile(r'(\s+)')
_punctuation_re = re.compile(
'^(?P<lead>(?:%s)*)(?P<middle>.*?)(?P<trail>(?:%s)*)$' % (
'|'.join(map(re.escape, ('(', '<', '<'))),
'|'.join(map(re.escape, ('.', ',', ')', '>', '\n', '>')))
)
)
_simple_email_re = re.compile(r'^\S+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9._-]+$')
_striptags_re = re.compile(r'(<!--.*?-->|<[^>]*>)')
_entity_re = re.compile(r'&([^;]+);')
_letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
_digits = '0123456789'
# special singleton representing missing values for the runtime
missing = type('MissingType', (), {'__repr__': lambda x: 'missing'})()
# internal code
internal_code = set()
concat = u''.join
def contextfunction(f):
"""This decorator can be used to mark a function or method context callable.
A context callable is passed the active :class:`Context` as first argument when
called from the template. This is useful if a function wants to get access
to the context or functions provided on the context object. For example
a function that returns a sorted list of template variables the current
template exports could look like this::
@contextfunction
def get_exported_names(context):
return sorted(context.exported_vars)
"""
f.contextfunction = True
return f
def evalcontextfunction(f):
"""This decorator can be used to mark a function or method as an eval
context callable. This is similar to the :func:`contextfunction`
but instead of passing the context, an evaluation context object is
passed. For more information about the eval context, see
:ref:`eval-context`.
.. versionadded:: 2.4
"""
f.evalcontextfunction = True
return f
def environmentfunction(f):
"""This decorator can be used to mark a function or method as environment
callable. This decorator works exactly like the :func:`contextfunction`
decorator just that the first argument is the active :class:`Environment`
and not context.
"""
f.environmentfunction = True
return f
def internalcode(f):
"""Marks the function as internally used"""
internal_code.add(f.__code__)
return f
def is_undefined(obj):
"""Check if the object passed is undefined. This does nothing more than
performing an instance check against :class:`Undefined` but looks nicer.
This can be used for custom filters or tests that want to react to
undefined variables. For example a custom default filter can look like
this::
def default(var, default=''):
if is_undefined(var):
return default
return var
"""
from jinja2.runtime import Undefined
return isinstance(obj, Undefined)
def consume(iterable):
"""Consumes an iterable without doing anything with it."""
for event in iterable:
pass
def clear_caches():
"""Jinja2 keeps internal caches for environments and lexers. These are
used so that Jinja2 doesn't have to recreate environments and lexers all
the time. Normally you don't have to care about that but if you are
messuring memory consumption you may want to clean the caches.
"""
from jinja2.environment import _spontaneous_environments
from jinja2.lexer import _lexer_cache
_spontaneous_environments.clear()
_lexer_cache.clear()
def import_string(import_name, silent=False):
"""Imports an object based on a string. This is useful if you want to
use import paths as endpoints or something similar. An import path can
be specified either in dotted notation (``xml.sax.saxutils.escape``)
or with a colon as object delimiter (``xml.sax.saxutils:escape``).
If the `silent` is True the return value will be `None` if the import
fails.
:return: imported object
"""
try:
if ':' in import_name:
module, obj = import_name.split(':', 1)
elif '.' in import_name:
items = import_name.split('.')
module = '.'.join(items[:-1])
obj = items[-1]
else:
return __import__(import_name)
return getattr(__import__(module, None, None, [obj]), obj)
except (ImportError, AttributeError):
if not silent:
raise
def open_if_exists(filename, mode='rb'):
"""Returns a file descriptor for the filename if that file exists,
otherwise `None`.
"""
try:
return open(filename, mode)
except IOError as e:
if e.errno not in (errno.ENOENT, errno.EISDIR, errno.EINVAL):
raise
def object_type_repr(obj):
"""Returns the name of the object's type. For some recognized
singletons the name of the object is returned instead. (For
example for `None` and `Ellipsis`).
"""
if obj is None:
return 'None'
elif obj is Ellipsis:
return 'Ellipsis'
# __builtin__ in 2.x, builtins in 3.x
if obj.__class__.__module__ in ('__builtin__', 'builtins'):
name = obj.__class__.__name__
else:
name = obj.__class__.__module__ + '.' + obj.__class__.__name__
return '%s object' % name
def pformat(obj, verbose=False):
"""Prettyprint an object. Either use the `pretty` library or the
builtin `pprint`.
"""
try:
from pretty import pretty
return pretty(obj, verbose=verbose)
except ImportError:
from pprint import pformat
return pformat(obj)
def urlize(text, trim_url_limit=None, nofollow=False, target=None):
"""Converts any URLs in text into clickable links. Works on http://,
https:// and www. links. Links can have trailing punctuation (periods,
commas, close-parens) and leading punctuation (opening parens) and
it'll still do the right thing.
If trim_url_limit is not None, the URLs in link text will be limited
to trim_url_limit characters.
If nofollow is True, the URLs in link text will get a rel="nofollow"
attribute.
If target is not None, a target attribute will be added to the link.
"""
trim_url = lambda x, limit=trim_url_limit: limit is not None \
and (x[:limit] + (len(x) >=limit and '...'
or '')) or x
words = _word_split_re.split(text_type(escape(text)))
nofollow_attr = nofollow and ' rel="nofollow"' or ''
if target is not None and isinstance(target, string_types):
target_attr = ' target="%s"' % target
else:
target_attr = ''
for i, word in enumerate(words):
match = _punctuation_re.match(word)
if match:
lead, middle, trail = match.groups()
if middle.startswith('www.') or (
'@' not in middle and
not middle.startswith('http://') and
not middle.startswith('https://') and
len(middle) > 0 and
middle[0] in _letters + _digits and (
middle.endswith('.org') or
middle.endswith('.net') or
middle.endswith('.com')
)):
middle = '<a href="http://%s"%s%s>%s</a>' % (middle,
nofollow_attr, target_attr, trim_url(middle))
if middle.startswith('http://') or \
middle.startswith('https://'):
middle = '<a href="%s"%s%s>%s</a>' % (middle,
nofollow_attr, target_attr, trim_url(middle))
if '@' in middle and not middle.startswith('www.') and \
not ':' in middle and _simple_email_re.match(middle):
middle = '<a href="mailto:%s">%s</a>' % (middle, middle)
if lead + middle + trail != word:
words[i] = lead + middle + trail
return u''.join(words)
def generate_lorem_ipsum(n=5, html=True, min=20, max=100):
"""Generate some lorem ipsum for the template."""
from jinja2.constants import LOREM_IPSUM_WORDS
from random import choice, randrange
words = LOREM_IPSUM_WORDS.split()
result = []
for _ in range(n):
next_capitalized = True
last_comma = last_fullstop = 0
word = None
last = None
p = []
# each paragraph contains out of 20 to 100 words.
for idx, _ in enumerate(range(randrange(min, max))):
while True:
word = choice(words)
if word != last:
last = word
break
if next_capitalized:
word = word.capitalize()
next_capitalized = False
# add commas
if idx - randrange(3, 8) > last_comma:
last_comma = idx
last_fullstop += 2
word += ','
# add end of sentences
if idx - randrange(10, 20) > last_fullstop:
last_comma = last_fullstop = idx
word += '.'
next_capitalized = True
p.append(word)
# ensure that the paragraph ends with a dot.
p = u' '.join(p)
if p.endswith(','):
p = p[:-1] + '.'
elif not p.endswith('.'):
p += '.'
result.append(p)
if not html:
return u'\n\n'.join(result)
return Markup(u'\n'.join(u'<p>%s</p>' % escape(x) for x in result))
def unicode_urlencode(obj, charset='utf-8', for_qs=False):
"""URL escapes a single bytestring or unicode string with the
given charset if applicable to URL safe quoting under all rules
that need to be considered under all supported Python versions.
If non strings are provided they are converted to their unicode
representation first.
"""
if not isinstance(obj, string_types):
obj = text_type(obj)
if isinstance(obj, text_type):
obj = obj.encode(charset)
safe = for_qs and b'' or b'/'
rv = text_type(url_quote(obj, safe))
if for_qs:
rv = rv.replace('%20', '+')
return rv
class LRUCache(object):
"""A simple LRU Cache implementation."""
# this is fast for small capacities (something below 1000) but doesn't
# scale. But as long as it's only used as storage for templates this
# won't do any harm.
def __init__(self, capacity):
self.capacity = capacity
self._mapping = {}
self._queue = deque()
self._postinit()
def _postinit(self):
# alias all queue methods for faster lookup
self._popleft = self._queue.popleft
self._pop = self._queue.pop
self._remove = self._queue.remove
self._wlock = Lock()
self._append = self._queue.append
def __getstate__(self):
return {
'capacity': self.capacity,
'_mapping': self._mapping,
'_queue': self._queue
}
def __setstate__(self, d):
self.__dict__.update(d)
self._postinit()
def __getnewargs__(self):
return (self.capacity,)
def copy(self):
"""Return a shallow copy of the instance."""
rv = self.__class__(self.capacity)
rv._mapping.update(self._mapping)
rv._queue = deque(self._queue)
return rv
def get(self, key, default=None):
"""Return an item from the cache dict or `default`"""
try:
return self[key]
except KeyError:
return default
def setdefault(self, key, default=None):
"""Set `default` if the key is not in the cache otherwise
leave unchanged. Return the value of this key.
"""
self._wlock.acquire()
try:
try:
return self[key]
except KeyError:
self[key] = default
return default
finally:
self._wlock.release()
def clear(self):
"""Clear the cache."""
self._wlock.acquire()
try:
self._mapping.clear()
self._queue.clear()
finally:
self._wlock.release()
def __contains__(self, key):
"""Check if a key exists in this cache."""
return key in self._mapping
def __len__(self):
"""Return the current size of the cache."""
return len(self._mapping)
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self._mapping
)
def __getitem__(self, key):
"""Get an item from the cache. Moves the item up so that it has the
highest priority then.
Raise a `KeyError` if it does not exist.
"""
self._wlock.acquire()
try:
rv = self._mapping[key]
if self._queue[-1] != key:
try:
self._remove(key)
except ValueError:
# if something removed the key from the container
# when we read, ignore the ValueError that we would
# get otherwise.
pass
self._append(key)
return rv
finally:
self._wlock.release()
def __setitem__(self, key, value):
"""Sets the value for an item. Moves the item up so that it
has the highest priority then.
"""
self._wlock.acquire()
try:
if key in self._mapping:
self._remove(key)
elif len(self._mapping) == self.capacity:
del self._mapping[self._popleft()]
self._append(key)
self._mapping[key] = value
finally:
self._wlock.release()
def __delitem__(self, key):
"""Remove an item from the cache dict.
Raise a `KeyError` if it does not exist.
"""
self._wlock.acquire()
try:
del self._mapping[key]
try:
self._remove(key)
except ValueError:
# __getitem__ is not locked, it might happen
pass
finally:
self._wlock.release()
def items(self):
"""Return a list of items."""
result = [(key, self._mapping[key]) for key in list(self._queue)]
result.reverse()
return result
def iteritems(self):
"""Iterate over all items."""
return iter(self.items())
def values(self):
"""Return a list of all values."""
return [x[1] for x in self.items()]
def itervalue(self):
"""Iterate over all values."""
return iter(self.values())
def keys(self):
"""Return a list of all keys ordered by most recent usage."""
return list(self)
def iterkeys(self):
"""Iterate over all keys in the cache dict, ordered by
the most recent usage.
"""
return reversed(tuple(self._queue))
__iter__ = iterkeys
def __reversed__(self):
"""Iterate over the values in the cache dict, oldest items
coming first.
"""
return iter(tuple(self._queue))
__copy__ = copy
# register the LRU cache as mutable mapping if possible
try:
from collections import MutableMapping
MutableMapping.register(LRUCache)
except ImportError:
pass
@implements_iterator
class Cycler(object):
"""A cycle helper for templates."""
def __init__(self, *items):
if not items:
raise RuntimeError('at least one item has to be provided')
self.items = items
self.reset()
def reset(self):
"""Resets the cycle."""
self.pos = 0
@property
def current(self):
"""Returns the current item."""
return self.items[self.pos]
def __next__(self):
"""Goes one item ahead and returns it."""
rv = self.current
self.pos = (self.pos + 1) % len(self.items)
return rv
class Joiner(object):
"""A joining helper for templates."""
def __init__(self, sep=u', '):
self.sep = sep
self.used = False
def __call__(self):
if not self.used:
self.used = True
return u''
return self.sep
# Imported here because that's where it was in the past
from markupsafe import Markup, escape, soft_unicode
| mit |
Zeken/audacity | lib-src/lv2/lv2/waflib/Tools/ruby.py | 316 | 3925 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os
from waflib import Task,Options,Utils
from waflib.TaskGen import before_method,feature,after_method,Task,extension
from waflib.Configure import conf
@feature('rubyext')
@before_method('apply_incpaths','apply_lib_vars','apply_bundle','apply_link')
def init_rubyext(self):
self.install_path='${ARCHDIR_RUBY}'
self.uselib=self.to_list(getattr(self,'uselib',''))
if not'RUBY'in self.uselib:
self.uselib.append('RUBY')
if not'RUBYEXT'in self.uselib:
self.uselib.append('RUBYEXT')
@feature('rubyext')
@before_method('apply_link','propagate_uselib')
def apply_ruby_so_name(self):
self.env['cshlib_PATTERN']=self.env['cxxshlib_PATTERN']=self.env['rubyext_PATTERN']
@conf
def check_ruby_version(self,minver=()):
if Options.options.rubybinary:
self.env.RUBY=Options.options.rubybinary
else:
self.find_program('ruby',var='RUBY')
ruby=self.env.RUBY
try:
version=self.cmd_and_log([ruby,'-e','puts defined?(VERSION) ? VERSION : RUBY_VERSION']).strip()
except Exception:
self.fatal('could not determine ruby version')
self.env.RUBY_VERSION=version
try:
ver=tuple(map(int,version.split(".")))
except Exception:
self.fatal('unsupported ruby version %r'%version)
cver=''
if minver:
if ver<minver:
self.fatal('ruby is too old %r'%ver)
cver='.'.join([str(x)for x in minver])
else:
cver=ver
self.msg('Checking for ruby version %s'%str(minver or''),cver)
@conf
def check_ruby_ext_devel(self):
if not self.env.RUBY:
self.fatal('ruby detection is required first')
if not self.env.CC_NAME and not self.env.CXX_NAME:
self.fatal('load a c/c++ compiler first')
version=tuple(map(int,self.env.RUBY_VERSION.split(".")))
def read_out(cmd):
return Utils.to_list(self.cmd_and_log([self.env.RUBY,'-rrbconfig','-e',cmd]))
def read_config(key):
return read_out('puts Config::CONFIG[%r]'%key)
ruby=self.env['RUBY']
archdir=read_config('archdir')
cpppath=archdir
if version>=(1,9,0):
ruby_hdrdir=read_config('rubyhdrdir')
cpppath+=ruby_hdrdir
cpppath+=[os.path.join(ruby_hdrdir[0],read_config('arch')[0])]
self.check(header_name='ruby.h',includes=cpppath,errmsg='could not find ruby header file')
self.env.LIBPATH_RUBYEXT=read_config('libdir')
self.env.LIBPATH_RUBYEXT+=archdir
self.env.INCLUDES_RUBYEXT=cpppath
self.env.CFLAGS_RUBYEXT=read_config('CCDLFLAGS')
self.env.rubyext_PATTERN='%s.'+read_config('DLEXT')[0]
flags=read_config('LDSHARED')
while flags and flags[0][0]!='-':
flags=flags[1:]
if len(flags)>1 and flags[1]=="ppc":
flags=flags[2:]
self.env.LINKFLAGS_RUBYEXT=flags
self.env.LINKFLAGS_RUBYEXT+=read_config('LIBS')
self.env.LINKFLAGS_RUBYEXT+=read_config('LIBRUBYARG_SHARED')
if Options.options.rubyarchdir:
self.env.ARCHDIR_RUBY=Options.options.rubyarchdir
else:
self.env.ARCHDIR_RUBY=read_config('sitearchdir')[0]
if Options.options.rubylibdir:
self.env.LIBDIR_RUBY=Options.options.rubylibdir
else:
self.env.LIBDIR_RUBY=read_config('sitelibdir')[0]
@conf
def check_ruby_module(self,module_name):
self.start_msg('Ruby module %s'%module_name)
try:
self.cmd_and_log([self.env['RUBY'],'-e','require \'%s\';puts 1'%module_name])
except Exception:
self.end_msg(False)
self.fatal('Could not find the ruby module %r'%module_name)
self.end_msg(True)
@extension('.rb')
def process(self,node):
tsk=self.create_task('run_ruby',node)
class run_ruby(Task.Task):
run_str='${RUBY} ${RBFLAGS} -I ${SRC[0].parent.abspath()} ${SRC}'
def options(opt):
opt.add_option('--with-ruby-archdir',type='string',dest='rubyarchdir',help='Specify directory where to install arch specific files')
opt.add_option('--with-ruby-libdir',type='string',dest='rubylibdir',help='Specify alternate ruby library path')
opt.add_option('--with-ruby-binary',type='string',dest='rubybinary',help='Specify alternate ruby binary')
| gpl-2.0 |
vFense/vFenseAgent-nix | agent/deps/mac/Python-2.7.5/lib/python2.7/idlelib/configHandler.py | 51 | 29367 | """Provides access to stored IDLE configuration information.
Refer to the comments at the beginning of config-main.def for a description of
the available configuration files and the design implemented to update user
configuration information. In particular, user configuration choices which
duplicate the defaults will be removed from the user's configuration files,
and if a file becomes empty, it will be deleted.
The contents of the user files may be altered using the Options/Configure IDLE
menu to access the configuration GUI (configDialog.py), or manually.
Throughout this module there is an emphasis on returning useable defaults
when a problem occurs in returning a requested configuration value back to
idle. This is to allow IDLE to continue to function in spite of errors in
the retrieval of config information. When a default is returned instead of
a requested config value, a message is printed to stderr to aid in
configuration problem notification and resolution.
"""
import os
import sys
import string
from idlelib import macosxSupport
from ConfigParser import ConfigParser, NoOptionError, NoSectionError
class InvalidConfigType(Exception): pass
class InvalidConfigSet(Exception): pass
class InvalidFgBg(Exception): pass
class InvalidTheme(Exception): pass
class IdleConfParser(ConfigParser):
"""
A ConfigParser specialised for idle configuration file handling
"""
def __init__(self, cfgFile, cfgDefaults=None):
"""
cfgFile - string, fully specified configuration file name
"""
self.file=cfgFile
ConfigParser.__init__(self,defaults=cfgDefaults)
def Get(self, section, option, type=None, default=None, raw=False):
"""
Get an option value for given section/option or return default.
If type is specified, return as type.
"""
if not self.has_option(section, option):
return default
if type=='bool':
return self.getboolean(section, option)
elif type=='int':
return self.getint(section, option)
else:
return self.get(section, option, raw=raw)
def GetOptionList(self,section):
"""
Get an option list for given section
"""
if self.has_section(section):
return self.options(section)
else: #return a default value
return []
def Load(self):
"""
Load the configuration file from disk
"""
self.read(self.file)
class IdleUserConfParser(IdleConfParser):
"""
IdleConfigParser specialised for user configuration handling.
"""
def AddSection(self,section):
"""
if section doesn't exist, add it
"""
if not self.has_section(section):
self.add_section(section)
def RemoveEmptySections(self):
"""
remove any sections that have no options
"""
for section in self.sections():
if not self.GetOptionList(section):
self.remove_section(section)
def IsEmpty(self):
"""
Remove empty sections and then return 1 if parser has no sections
left, else return 0.
"""
self.RemoveEmptySections()
if self.sections():
return 0
else:
return 1
def RemoveOption(self,section,option):
"""
If section/option exists, remove it.
Returns 1 if option was removed, 0 otherwise.
"""
if self.has_section(section):
return self.remove_option(section,option)
def SetOption(self,section,option,value):
"""
Sets option to value, adding section if required.
Returns 1 if option was added or changed, otherwise 0.
"""
if self.has_option(section,option):
if self.get(section,option)==value:
return 0
else:
self.set(section,option,value)
return 1
else:
if not self.has_section(section):
self.add_section(section)
self.set(section,option,value)
return 1
def RemoveFile(self):
"""
Removes the user config file from disk if it exists.
"""
if os.path.exists(self.file):
os.remove(self.file)
def Save(self):
"""Update user configuration file.
Remove empty sections. If resulting config isn't empty, write the file
to disk. If config is empty, remove the file from disk if it exists.
"""
if not self.IsEmpty():
fname = self.file
try:
cfgFile = open(fname, 'w')
except IOError:
os.unlink(fname)
cfgFile = open(fname, 'w')
self.write(cfgFile)
else:
self.RemoveFile()
class IdleConf:
"""
holds config parsers for all idle config files:
default config files
(idle install dir)/config-main.def
(idle install dir)/config-extensions.def
(idle install dir)/config-highlight.def
(idle install dir)/config-keys.def
user config files
(user home dir)/.idlerc/config-main.cfg
(user home dir)/.idlerc/config-extensions.cfg
(user home dir)/.idlerc/config-highlight.cfg
(user home dir)/.idlerc/config-keys.cfg
"""
def __init__(self):
self.defaultCfg={}
self.userCfg={}
self.cfg={}
self.CreateConfigHandlers()
self.LoadCfgFiles()
#self.LoadCfg()
def CreateConfigHandlers(self):
"""
set up a dictionary of config parsers for default and user
configurations respectively
"""
#build idle install path
if __name__ != '__main__': # we were imported
idleDir=os.path.dirname(__file__)
else: # we were exec'ed (for testing only)
idleDir=os.path.abspath(sys.path[0])
userDir=self.GetUserCfgDir()
configTypes=('main','extensions','highlight','keys')
defCfgFiles={}
usrCfgFiles={}
for cfgType in configTypes: #build config file names
defCfgFiles[cfgType]=os.path.join(idleDir,'config-'+cfgType+'.def')
usrCfgFiles[cfgType]=os.path.join(userDir,'config-'+cfgType+'.cfg')
for cfgType in configTypes: #create config parsers
self.defaultCfg[cfgType]=IdleConfParser(defCfgFiles[cfgType])
self.userCfg[cfgType]=IdleUserConfParser(usrCfgFiles[cfgType])
def GetUserCfgDir(self):
"""
Creates (if required) and returns a filesystem directory for storing
user config files.
"""
cfgDir = '.idlerc'
userDir = os.path.expanduser('~')
if userDir != '~': # expanduser() found user home dir
if not os.path.exists(userDir):
warn = ('\n Warning: os.path.expanduser("~") points to\n '+
userDir+',\n but the path does not exist.\n')
try:
sys.stderr.write(warn)
except IOError:
pass
userDir = '~'
if userDir == "~": # still no path to home!
# traditionally IDLE has defaulted to os.getcwd(), is this adequate?
userDir = os.getcwd()
userDir = os.path.join(userDir, cfgDir)
if not os.path.exists(userDir):
try:
os.mkdir(userDir)
except (OSError, IOError):
warn = ('\n Warning: unable to create user config directory\n'+
userDir+'\n Check path and permissions.\n Exiting!\n\n')
sys.stderr.write(warn)
raise SystemExit
return userDir
def GetOption(self, configType, section, option, default=None, type=None,
warn_on_default=True, raw=False):
"""
Get an option value for given config type and given general
configuration section/option or return a default. If type is specified,
return as type. Firstly the user configuration is checked, with a
fallback to the default configuration, and a final 'catch all'
fallback to a useable passed-in default if the option isn't present in
either the user or the default configuration.
configType must be one of ('main','extensions','highlight','keys')
If a default is returned, and warn_on_default is True, a warning is
printed to stderr.
"""
try:
if self.userCfg[configType].has_option(section,option):
return self.userCfg[configType].Get(section, option,
type=type, raw=raw)
except ValueError:
warning = ('\n Warning: configHandler.py - IdleConf.GetOption -\n'
' invalid %r value for configuration option %r\n'
' from section %r: %r\n' %
(type, option, section,
self.userCfg[configType].Get(section, option,
raw=raw)))
try:
sys.stderr.write(warning)
except IOError:
pass
try:
if self.defaultCfg[configType].has_option(section,option):
return self.defaultCfg[configType].Get(section, option,
type=type, raw=raw)
except ValueError:
pass
#returning default, print warning
if warn_on_default:
warning = ('\n Warning: configHandler.py - IdleConf.GetOption -\n'
' problem retrieving configuration option %r\n'
' from section %r.\n'
' returning default value: %r\n' %
(option, section, default))
try:
sys.stderr.write(warning)
except IOError:
pass
return default
def SetOption(self, configType, section, option, value):
"""In user's config file, set section's option to value.
"""
self.userCfg[configType].SetOption(section, option, value)
def GetSectionList(self, configSet, configType):
"""
Get a list of sections from either the user or default config for
the given config type.
configSet must be either 'user' or 'default'
configType must be one of ('main','extensions','highlight','keys')
"""
if not (configType in ('main','extensions','highlight','keys')):
raise InvalidConfigType, 'Invalid configType specified'
if configSet == 'user':
cfgParser=self.userCfg[configType]
elif configSet == 'default':
cfgParser=self.defaultCfg[configType]
else:
raise InvalidConfigSet, 'Invalid configSet specified'
return cfgParser.sections()
def GetHighlight(self, theme, element, fgBg=None):
"""
return individual highlighting theme elements.
fgBg - string ('fg'or'bg') or None, if None return a dictionary
containing fg and bg colours (appropriate for passing to Tkinter in,
e.g., a tag_config call), otherwise fg or bg colour only as specified.
"""
if self.defaultCfg['highlight'].has_section(theme):
themeDict=self.GetThemeDict('default',theme)
else:
themeDict=self.GetThemeDict('user',theme)
fore=themeDict[element+'-foreground']
if element=='cursor': #there is no config value for cursor bg
back=themeDict['normal-background']
else:
back=themeDict[element+'-background']
highlight={"foreground": fore,"background": back}
if not fgBg: #return dict of both colours
return highlight
else: #return specified colour only
if fgBg == 'fg':
return highlight["foreground"]
if fgBg == 'bg':
return highlight["background"]
else:
raise InvalidFgBg, 'Invalid fgBg specified'
def GetThemeDict(self,type,themeName):
"""
type - string, 'default' or 'user' theme type
themeName - string, theme name
Returns a dictionary which holds {option:value} for each element
in the specified theme. Values are loaded over a set of ultimate last
fallback defaults to guarantee that all theme elements are present in
a newly created theme.
"""
if type == 'user':
cfgParser=self.userCfg['highlight']
elif type == 'default':
cfgParser=self.defaultCfg['highlight']
else:
raise InvalidTheme, 'Invalid theme type specified'
#foreground and background values are provded for each theme element
#(apart from cursor) even though all these values are not yet used
#by idle, to allow for their use in the future. Default values are
#generally black and white.
theme={ 'normal-foreground':'#000000',
'normal-background':'#ffffff',
'keyword-foreground':'#000000',
'keyword-background':'#ffffff',
'builtin-foreground':'#000000',
'builtin-background':'#ffffff',
'comment-foreground':'#000000',
'comment-background':'#ffffff',
'string-foreground':'#000000',
'string-background':'#ffffff',
'definition-foreground':'#000000',
'definition-background':'#ffffff',
'hilite-foreground':'#000000',
'hilite-background':'gray',
'break-foreground':'#ffffff',
'break-background':'#000000',
'hit-foreground':'#ffffff',
'hit-background':'#000000',
'error-foreground':'#ffffff',
'error-background':'#000000',
#cursor (only foreground can be set)
'cursor-foreground':'#000000',
#shell window
'stdout-foreground':'#000000',
'stdout-background':'#ffffff',
'stderr-foreground':'#000000',
'stderr-background':'#ffffff',
'console-foreground':'#000000',
'console-background':'#ffffff' }
for element in theme.keys():
if not cfgParser.has_option(themeName,element):
#we are going to return a default, print warning
warning=('\n Warning: configHandler.py - IdleConf.GetThemeDict'
' -\n problem retrieving theme element %r'
'\n from theme %r.\n'
' returning default value: %r\n' %
(element, themeName, theme[element]))
try:
sys.stderr.write(warning)
except IOError:
pass
colour=cfgParser.Get(themeName,element,default=theme[element])
theme[element]=colour
return theme
def CurrentTheme(self):
"""
Returns the name of the currently active theme
"""
return self.GetOption('main','Theme','name',default='')
def CurrentKeys(self):
"""
Returns the name of the currently active key set
"""
return self.GetOption('main','Keys','name',default='')
def GetExtensions(self, active_only=True, editor_only=False, shell_only=False):
"""
Gets a list of all idle extensions declared in the config files.
active_only - boolean, if true only return active (enabled) extensions
"""
extns=self.RemoveKeyBindNames(
self.GetSectionList('default','extensions'))
userExtns=self.RemoveKeyBindNames(
self.GetSectionList('user','extensions'))
for extn in userExtns:
if extn not in extns: #user has added own extension
extns.append(extn)
if active_only:
activeExtns=[]
for extn in extns:
if self.GetOption('extensions', extn, 'enable', default=True,
type='bool'):
#the extension is enabled
if editor_only or shell_only:
if editor_only:
option = "enable_editor"
else:
option = "enable_shell"
if self.GetOption('extensions', extn,option,
default=True, type='bool',
warn_on_default=False):
activeExtns.append(extn)
else:
activeExtns.append(extn)
return activeExtns
else:
return extns
def RemoveKeyBindNames(self,extnNameList):
#get rid of keybinding section names
names=extnNameList
kbNameIndicies=[]
for name in names:
if name.endswith(('_bindings', '_cfgBindings')):
kbNameIndicies.append(names.index(name))
kbNameIndicies.sort()
kbNameIndicies.reverse()
for index in kbNameIndicies: #delete each keybinding section name
del(names[index])
return names
def GetExtnNameForEvent(self,virtualEvent):
"""
Returns the name of the extension that virtualEvent is bound in, or
None if not bound in any extension.
virtualEvent - string, name of the virtual event to test for, without
the enclosing '<< >>'
"""
extName=None
vEvent='<<'+virtualEvent+'>>'
for extn in self.GetExtensions(active_only=0):
for event in self.GetExtensionKeys(extn).keys():
if event == vEvent:
extName=extn
return extName
def GetExtensionKeys(self,extensionName):
"""
returns a dictionary of the configurable keybindings for a particular
extension,as they exist in the dictionary returned by GetCurrentKeySet;
that is, where previously used bindings are disabled.
"""
keysName=extensionName+'_cfgBindings'
activeKeys=self.GetCurrentKeySet()
extKeys={}
if self.defaultCfg['extensions'].has_section(keysName):
eventNames=self.defaultCfg['extensions'].GetOptionList(keysName)
for eventName in eventNames:
event='<<'+eventName+'>>'
binding=activeKeys[event]
extKeys[event]=binding
return extKeys
def __GetRawExtensionKeys(self,extensionName):
"""
returns a dictionary of the configurable keybindings for a particular
extension, as defined in the configuration files, or an empty dictionary
if no bindings are found
"""
keysName=extensionName+'_cfgBindings'
extKeys={}
if self.defaultCfg['extensions'].has_section(keysName):
eventNames=self.defaultCfg['extensions'].GetOptionList(keysName)
for eventName in eventNames:
binding=self.GetOption('extensions',keysName,
eventName,default='').split()
event='<<'+eventName+'>>'
extKeys[event]=binding
return extKeys
def GetExtensionBindings(self,extensionName):
"""
Returns a dictionary of all the event bindings for a particular
extension. The configurable keybindings are returned as they exist in
the dictionary returned by GetCurrentKeySet; that is, where re-used
keybindings are disabled.
"""
bindsName=extensionName+'_bindings'
extBinds=self.GetExtensionKeys(extensionName)
#add the non-configurable bindings
if self.defaultCfg['extensions'].has_section(bindsName):
eventNames=self.defaultCfg['extensions'].GetOptionList(bindsName)
for eventName in eventNames:
binding=self.GetOption('extensions',bindsName,
eventName,default='').split()
event='<<'+eventName+'>>'
extBinds[event]=binding
return extBinds
def GetKeyBinding(self, keySetName, eventStr):
"""
returns the keybinding for a specific event.
keySetName - string, name of key binding set
eventStr - string, the virtual event we want the binding for,
represented as a string, eg. '<<event>>'
"""
eventName=eventStr[2:-2] #trim off the angle brackets
binding=self.GetOption('keys',keySetName,eventName,default='').split()
return binding
def GetCurrentKeySet(self):
result = self.GetKeySet(self.CurrentKeys())
if macosxSupport.runningAsOSXApp():
# We're using AquaTk, replace all keybingings that use the
# Alt key by ones that use the Option key because the former
# don't work reliably.
for k, v in result.items():
v2 = [ x.replace('<Alt-', '<Option-') for x in v ]
if v != v2:
result[k] = v2
return result
def GetKeySet(self,keySetName):
"""
Returns a dictionary of: all requested core keybindings, plus the
keybindings for all currently active extensions. If a binding defined
in an extension is already in use, that binding is disabled.
"""
keySet=self.GetCoreKeys(keySetName)
activeExtns=self.GetExtensions(active_only=1)
for extn in activeExtns:
extKeys=self.__GetRawExtensionKeys(extn)
if extKeys: #the extension defines keybindings
for event in extKeys.keys():
if extKeys[event] in keySet.values():
#the binding is already in use
extKeys[event]='' #disable this binding
keySet[event]=extKeys[event] #add binding
return keySet
def IsCoreBinding(self,virtualEvent):
"""
returns true if the virtual event is bound in the core idle keybindings.
virtualEvent - string, name of the virtual event to test for, without
the enclosing '<< >>'
"""
return ('<<'+virtualEvent+'>>') in self.GetCoreKeys().keys()
def GetCoreKeys(self, keySetName=None):
"""
returns the requested set of core keybindings, with fallbacks if
required.
Keybindings loaded from the config file(s) are loaded _over_ these
defaults, so if there is a problem getting any core binding there will
be an 'ultimate last resort fallback' to the CUA-ish bindings
defined here.
"""
keyBindings={
'<<copy>>': ['<Control-c>', '<Control-C>'],
'<<cut>>': ['<Control-x>', '<Control-X>'],
'<<paste>>': ['<Control-v>', '<Control-V>'],
'<<beginning-of-line>>': ['<Control-a>', '<Home>'],
'<<center-insert>>': ['<Control-l>'],
'<<close-all-windows>>': ['<Control-q>'],
'<<close-window>>': ['<Alt-F4>'],
'<<do-nothing>>': ['<Control-x>'],
'<<end-of-file>>': ['<Control-d>'],
'<<python-docs>>': ['<F1>'],
'<<python-context-help>>': ['<Shift-F1>'],
'<<history-next>>': ['<Alt-n>'],
'<<history-previous>>': ['<Alt-p>'],
'<<interrupt-execution>>': ['<Control-c>'],
'<<view-restart>>': ['<F6>'],
'<<restart-shell>>': ['<Control-F6>'],
'<<open-class-browser>>': ['<Alt-c>'],
'<<open-module>>': ['<Alt-m>'],
'<<open-new-window>>': ['<Control-n>'],
'<<open-window-from-file>>': ['<Control-o>'],
'<<plain-newline-and-indent>>': ['<Control-j>'],
'<<print-window>>': ['<Control-p>'],
'<<redo>>': ['<Control-y>'],
'<<remove-selection>>': ['<Escape>'],
'<<save-copy-of-window-as-file>>': ['<Alt-Shift-S>'],
'<<save-window-as-file>>': ['<Alt-s>'],
'<<save-window>>': ['<Control-s>'],
'<<select-all>>': ['<Alt-a>'],
'<<toggle-auto-coloring>>': ['<Control-slash>'],
'<<undo>>': ['<Control-z>'],
'<<find-again>>': ['<Control-g>', '<F3>'],
'<<find-in-files>>': ['<Alt-F3>'],
'<<find-selection>>': ['<Control-F3>'],
'<<find>>': ['<Control-f>'],
'<<replace>>': ['<Control-h>'],
'<<goto-line>>': ['<Alt-g>'],
'<<smart-backspace>>': ['<Key-BackSpace>'],
'<<newline-and-indent>>': ['<Key-Return>', '<Key-KP_Enter>'],
'<<smart-indent>>': ['<Key-Tab>'],
'<<indent-region>>': ['<Control-Key-bracketright>'],
'<<dedent-region>>': ['<Control-Key-bracketleft>'],
'<<comment-region>>': ['<Alt-Key-3>'],
'<<uncomment-region>>': ['<Alt-Key-4>'],
'<<tabify-region>>': ['<Alt-Key-5>'],
'<<untabify-region>>': ['<Alt-Key-6>'],
'<<toggle-tabs>>': ['<Alt-Key-t>'],
'<<change-indentwidth>>': ['<Alt-Key-u>'],
'<<del-word-left>>': ['<Control-Key-BackSpace>'],
'<<del-word-right>>': ['<Control-Key-Delete>']
}
if keySetName:
for event in keyBindings.keys():
binding=self.GetKeyBinding(keySetName,event)
if binding:
keyBindings[event]=binding
else: #we are going to return a default, print warning
warning=('\n Warning: configHandler.py - IdleConf.GetCoreKeys'
' -\n problem retrieving key binding for event %r'
'\n from key set %r.\n'
' returning default value: %r\n' %
(event, keySetName, keyBindings[event]))
try:
sys.stderr.write(warning)
except IOError:
pass
return keyBindings
def GetExtraHelpSourceList(self,configSet):
"""Fetch list of extra help sources from a given configSet.
Valid configSets are 'user' or 'default'. Return a list of tuples of
the form (menu_item , path_to_help_file , option), or return the empty
list. 'option' is the sequence number of the help resource. 'option'
values determine the position of the menu items on the Help menu,
therefore the returned list must be sorted by 'option'.
"""
helpSources=[]
if configSet=='user':
cfgParser=self.userCfg['main']
elif configSet=='default':
cfgParser=self.defaultCfg['main']
else:
raise InvalidConfigSet, 'Invalid configSet specified'
options=cfgParser.GetOptionList('HelpFiles')
for option in options:
value=cfgParser.Get('HelpFiles',option,default=';')
if value.find(';')==-1: #malformed config entry with no ';'
menuItem='' #make these empty
helpPath='' #so value won't be added to list
else: #config entry contains ';' as expected
value=string.split(value,';')
menuItem=value[0].strip()
helpPath=value[1].strip()
if menuItem and helpPath: #neither are empty strings
helpSources.append( (menuItem,helpPath,option) )
helpSources.sort(key=lambda x: int(x[2]))
return helpSources
def GetAllExtraHelpSourcesList(self):
"""
Returns a list of tuples containing the details of all additional help
sources configured, or an empty list if there are none. Tuples are of
the format returned by GetExtraHelpSourceList.
"""
allHelpSources=( self.GetExtraHelpSourceList('default')+
self.GetExtraHelpSourceList('user') )
return allHelpSources
def LoadCfgFiles(self):
"""
load all configuration files.
"""
for key in self.defaultCfg.keys():
self.defaultCfg[key].Load()
self.userCfg[key].Load() #same keys
def SaveUserCfgFiles(self):
"""
write all loaded user configuration files back to disk
"""
for key in self.userCfg.keys():
self.userCfg[key].Save()
idleConf=IdleConf()
### module test
if __name__ == '__main__':
def dumpCfg(cfg):
print '\n',cfg,'\n'
for key in cfg.keys():
sections=cfg[key].sections()
print key
print sections
for section in sections:
options=cfg[key].options(section)
print section
print options
for option in options:
print option, '=', cfg[key].Get(section,option)
dumpCfg(idleConf.defaultCfg)
dumpCfg(idleConf.userCfg)
print idleConf.userCfg['main'].Get('Theme','name')
#print idleConf.userCfg['highlight'].GetDefHighlight('Foo','normal')
| lgpl-3.0 |
theonlynexus/gui2py | gui/__init__.py | 13 | 2100 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"gui2py: Simple and powerful GUI framework for agile development - Main Package"
__author__ = "Mariano Reingart (reingart@gmail.com)"
__copyright__ = "Copyright (C) 2013- Mariano Reingart"
__license__ = "LGPL 3.0"
__version__ = "0.9.4"
# some parts where inspired or borrowed from different sources & projects
# please see the respective files or commit messages for proper recognition
import wx
# TODO: we'd select the tested wx version before importing it (or warn):
##import wxversion
##wxversion.select("2.9")
# useful shortcuts:
from .controls import Label, Button, TextBox, CheckBox, ListBox, ComboBox, \
HtmlBox, Image, Gauge, Slider, ListView, ListColumn, \
TreeView, Notebook, TabPanel, Panel, RadioButton, Line, \
GridView, GridColumn
from .windows import Window, HtmlWindow
from .menu import MenuBar, Menu, MenuItem, MenuItemCheckable, MenuItemSeparator
from .statusbar import StatusBar
from .component import get
from .dialog import alert, prompt, confirm, select_font, select_color, \
open_file, save_file, choose_directory, \
single_choice, multiple_choice, find
#from . import tools
import os
# disable ubuntu unified menu
os.environ['UBUNTU_MENUPROXY'] = '0'
# create an app, note that the app could be already created (i.e. by an IDE):
app = wx.GetApp()
if app is None:
app = wx.App(False)
main_loop = app.MainLoop
else:
# app and main loop is already created and executed by a third party tool
main_loop = lambda: None
from .resource import parse, load, dump, save, connect, Controller
# useful functions (shortcuts)
def inspect(obj):
"Open the inspector windows for a given object"
from gui.tools.inspector import InspectorTool
inspector = InspectorTool()
inspector.show(obj)
return inspector
def shell():
"Open a shell"
from gui.tools.debug import Shell
shell = Shell()
shell.show()
return shell
call_after = wx.CallAfter
call_later = wx.CallLater
| lgpl-3.0 |
openshift/openshift-tools | ansible/roles/lib_oa_openshift/library/oc_edit.py | 18 | 55795 | #!/usr/bin/env python
# pylint: disable=missing-docstring
# flake8: noqa: T001
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
#
# Copyright 2016 Red Hat, Inc. and/or its affiliates
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
from __future__ import print_function
import atexit
import copy
import fcntl
import json
import time
import os
import re
import shutil
import subprocess
import tempfile
# pylint: disable=import-error
try:
import ruamel.yaml as yaml
except ImportError:
import yaml
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: doc/edit -*- -*- -*-
DOCUMENTATION = '''
---
module: oc_edit
short_description: Modify, and idempotently manage openshift objects.
description:
- Modify openshift objects programmatically.
options:
state:
description:
- Currently present is only supported state.
required: true
default: present
choices: ["present"]
aliases: []
kubeconfig:
description:
- The path for the kubeconfig file to use for authentication
required: false
default: /etc/origin/master/admin.kubeconfig
aliases: []
debug:
description:
- Turn on debug output.
required: false
default: False
aliases: []
name:
description:
- Name of the object that is being queried.
required: false
default: None
aliases: []
namespace:
description:
- The namespace where the object lives.
required: false
default: str
aliases: []
kind:
description:
- The kind attribute of the object.
required: True
default: None
choices:
- bc
- buildconfig
- configmaps
- dc
- deploymentconfig
- imagestream
- imagestreamtag
- is
- istag
- namespace
- project
- projects
- node
- ns
- persistentvolume
- pv
- rc
- replicationcontroller
- routes
- scc
- secret
- securitycontextconstraints
- service
- svc
aliases: []
file_name:
description:
- The file name in which to edit
required: false
default: None
aliases: []
file_format:
description:
- The format of the file being edited.
required: false
default: yaml
aliases: []
content:
description:
- Content of the file
required: false
default: None
aliases: []
edits:
description:
- a list of dictionaries with a yedit format for edits
required: false
default: None
aliases: []
force:
description:
- Whether or not to force the operation
required: false
default: None
aliases: []
separator:
description:
- The separator format for the edit.
required: false
default: '.'
aliases: []
author:
- "Kenny Woodson <kwoodson@redhat.com>"
extends_documentation_fragment: []
'''
EXAMPLES = '''
oc_edit:
kind: rc
name: hawkular-cassandra-rc
namespace: openshift-infra
content:
spec.template.spec.containers[0].resources.limits.memory: 512
spec.template.spec.containers[0].resources.requests.memory: 256
'''
# -*- -*- -*- End included fragment: doc/edit -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods,too-many-instance-attributes
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self,
filename=None,
content=None,
content_type='yaml',
separator='.',
backup_ext=None,
backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
if backup_ext is None:
self.backup_ext = ".{}".format(time.strftime("%Y%m%dT%H%M%S"))
else:
self.backup_ext = backup_ext
self.load(content_type=self.content_type)
if self.__yaml_dict is None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for separator '''
return self._separator
@separator.setter
def separator(self, inc_sep):
''' setter method for separator '''
self._separator = inc_sep
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def remove_entry(data, key, index=None, value=None, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
if value is not None:
data.pop(value)
elif index is not None:
raise YeditException("remove_entry for a dictionary does not have an index {}".format(index))
else:
data.clear()
return True
elif key == '' and isinstance(data, list):
ind = None
if value is not None:
try:
ind = data.index(value)
except ValueError:
return False
elif index is not None:
ind = index
else:
del data[:]
if ind is not None:
data.pop(ind)
return True
if not (key and Yedit.valid_key(key, sep)) and \
isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
raise YeditException("Unexpected item type found while going through key " +
"path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
# didn't add/update to an existing list, nor add/update key to a dict
# so we must have been provided some syntax like a.b.c[<int>] = "data" for a
# non-existent array
else:
raise YeditException("Error adding to object at path: {}".format(key))
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
return data
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
tmp_filename = filename + '.yedit'
with open(tmp_filename, 'w') as yfd:
fcntl.flock(yfd, fcntl.LOCK_EX | fcntl.LOCK_NB)
yfd.write(contents)
fcntl.flock(yfd, fcntl.LOCK_UN)
os.rename(tmp_filename, filename)
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, '{}{}'.format(self.filename, self.backup_ext))
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripDumper if supported.
if self.content_type == 'yaml':
try:
Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except AttributeError:
Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
elif self.content_type == 'json':
Yedit._write(self.filename, json.dumps(self.yaml_dict, indent=4, sort_keys=True))
else:
raise YeditException('Unsupported content_type: {}.'.format(self.content_type) +
'Please specify a content_type of yaml or json.')
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename is None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripLoader if supported.
try:
self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
except AttributeError:
self.yaml_dict = yaml.safe_load(contents)
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path, index=None, value=None):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, index, value, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# AUDIT:maybe-no-member makes sense due to loading data from
# a serialized format.
# pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index is not None:
ind = index
if ind is not None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
# already exists, return
if ind is not None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is None:
return (False, self.yaml_dict)
# When path equals "" it is a special case.
# "" refers to the root of the document
# Only update the root path (entire document) when its a list or dict
if path == '':
if isinstance(result, list) or isinstance(result, dict):
self.yaml_dict = result
return (True, self.yaml_dict)
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
@staticmethod
def get_curr_value(invalue, val_type):
'''return the current value'''
if invalue is None:
return None
curr_value = invalue
if val_type == 'yaml':
curr_value = yaml.safe_load(str(invalue))
elif val_type == 'json':
curr_value = json.loads(invalue)
return curr_value
@staticmethod
def parse_value(inc_value, vtype=''):
'''determine value type passed'''
true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
'on', 'On', 'ON', ]
false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
'off', 'Off', 'OFF']
# It came in as a string but you didn't specify value_type as string
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
# There is a special case where '' will turn into None after yaml loading it so skip
if isinstance(inc_value, str) and inc_value == '':
pass
# If vtype is not str then go ahead and attempt to yaml load it.
elif isinstance(inc_value, str) and 'str' not in vtype:
try:
inc_value = yaml.safe_load(inc_value)
except Exception:
raise YeditException('Could not determine type of incoming value. ' +
'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
@staticmethod
def process_edits(edits, yamlfile):
'''run through a list of edits and process them one-by-one'''
results = []
for edit in edits:
value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
if edit.get('action') == 'update':
# pylint: disable=line-too-long
curr_value = Yedit.get_curr_value(
Yedit.parse_value(edit.get('curr_value')),
edit.get('curr_value_format'))
rval = yamlfile.update(edit['key'],
value,
edit.get('index'),
curr_value)
elif edit.get('action') == 'append':
rval = yamlfile.append(edit['key'], value)
else:
rval = yamlfile.put(edit['key'], value)
if rval[0]:
results.append({'key': edit['key'], 'edit': rval[1]})
return {'changed': len(results) > 0, 'results': results}
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(params):
'''perform the idempotent crud operations'''
yamlfile = Yedit(filename=params['src'],
backup=params['backup'],
content_type=params['content_type'],
backup_ext=params['backup_ext'],
separator=params['separator'])
state = params['state']
if params['src']:
rval = yamlfile.load()
if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
'file exists, that it is has correct permissions, and is valid yaml.'}
if state == 'list':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['key']:
rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
elif state == 'absent':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['update']:
rval = yamlfile.pop(params['key'], params['value'])
else:
rval = yamlfile.delete(params['key'], params['index'], params['value'])
if rval[0] and params['src']:
yamlfile.write()
return {'changed': rval[0], 'result': rval[1], 'state': state}
elif state == 'present':
# check if content is different than what is in the file
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
params['value'] is None:
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
# If we were passed a key, value then
# we enapsulate it in a list and process it
# Key, Value passed to the module : Converted to Edits list #
edits = []
_edit = {}
if params['value'] is not None:
_edit['value'] = params['value']
_edit['value_type'] = params['value_type']
_edit['key'] = params['key']
if params['update']:
_edit['action'] = 'update'
_edit['curr_value'] = params['curr_value']
_edit['curr_value_format'] = params['curr_value_format']
_edit['index'] = params['index']
elif params['append']:
_edit['action'] = 'append'
edits.append(_edit)
elif params['edits'] is not None:
edits = params['edits']
if edits:
results = Yedit.process_edits(edits, yamlfile)
# if there were changes and a src provided to us we need to write
if results['changed'] and params['src']:
yamlfile.write()
return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
'state': state}
# We were passed content but no src, key or value, or edits. Return contents in memory
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
# pylint: disable=too-many-lines
# noqa: E301,E302,E303,T001
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
def locate_oc_binary():
''' Find and return oc binary file '''
# https://github.com/openshift/openshift-ansible/issues/3410
# oc can be in /usr/local/bin in some cases, but that may not
# be in $PATH due to ansible/sudo
paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
oc_binary = 'oc'
# Use shutil.which if it is available, otherwise fallback to a naive path search
try:
which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
if which_result is not None:
oc_binary = which_result
except AttributeError:
for path in paths:
if os.path.exists(os.path.join(path, oc_binary)):
oc_binary = os.path.join(path, oc_binary)
break
return oc_binary
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, edits=None, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, res['results'][0], separator=sep)
updated = False
if content is not None:
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
updated = True
elif edits is not None:
results = Yedit.process_edits(edits, yed)
if results['changed']:
updated = True
if updated:
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
# We are removing the 'resourceVersion' to handle
# a race condition when modifying oc objects
yed = Yedit(fname)
results = yed.delete('metadata.resourceVersion')
if results[0]:
yed.write()
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''create a temporary file and then call oc create on it'''
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
cmd = ['delete', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
else:
raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
'''process a template
template_name: the name of the template to process
create: whether to send to oc create after processing
params: the parameters for the template
template_data: the incoming template's data; instead of a file
'''
cmd = ['process']
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-p')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = Utils.create_tmpfile(template_name + '-')
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['create', '-f', fname])
def _get(self, resource, name=None, selector=None, field_selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
if field_selector is not None:
cmd.append('--field-selector={}'.format(field_selector))
# Name cannot be used with selector or field_selector.
if selector is None and field_selector is None and name is not None:
cmd.append(name)
cmd.extend(['-o', 'json'])
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if 'items' in rval:
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm list pods
node: the node in which to list pods
selector: the label selector filter if provided
pod_selector: the pod selector filter if provided
'''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _version(self):
''' return the openshift version'''
return self.openshift_cmd(['version'], output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
def _run(self, cmds, input_data):
''' Actually executes the command. This makes mocking easier. '''
curr_env = os.environ.copy()
curr_env.update({'KUBECONFIG': self.kubeconfig})
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=curr_env)
stdout, stderr = proc.communicate(input_data)
return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = [self.oc_binary]
if oadm:
cmds.append('adm')
cmds.extend(cmd)
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
if self.verbose:
print(' '.join(cmds))
try:
returncode, stdout, stderr = self._run(cmds, input_data)
except OSError as ex:
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
"cmd": ' '.join(cmds)}
if output_type == 'json':
rval['results'] = {}
if output and stdout:
try:
rval['results'] = json.loads(stdout)
except ValueError as verr:
if "No JSON object could be decoded" in verr.args:
rval['err'] = verr.args
elif output_type == 'raw':
rval['results'] = stdout if output else ''
if self.verbose:
print("STDOUT: {0}".format(stdout))
print("STDERR: {0}".format(stderr))
if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
"stdout": stdout})
return rval
class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripDumper'):
Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
else:
Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
Utils._write(tmp, data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [tmp])
return tmp
@staticmethod
def create_tmpfile_copy(inc_file):
'''create a temporary copy of a file'''
tmpfile = Utils.create_tmpfile('lib_openshift-')
Utils._write(tmpfile, open(inc_file).read())
# Cleanup the tmpfile
atexit.register(Utils.cleanup, [tmpfile])
return tmpfile
@staticmethod
def create_tmpfile(prefix='tmp'):
''' Generates and returns a temporary file name '''
with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
return tmp.name
@staticmethod
def create_tmp_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_tmp_file_from_contents(item['path'] + '-',
item['data'],
ftype=content_type)
files.append({'name': os.path.basename(item['path']),
'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if 'metadata' in result and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripLoader'):
contents = yaml.load(contents, yaml.RoundTripLoader)
else:
contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
@staticmethod
def filter_versions(stdout):
''' filter the oc version output '''
version_dict = {}
version_search = ['oc', 'openshift', 'kubernetes']
for line in stdout.strip().split('\n'):
for term in version_search:
if not line:
continue
if line.startswith(term):
version_dict[term] = line.split()[-1]
# horrible hack to get openshift version in Openshift 3.2
# By default "oc version in 3.2 does not return an "openshift" version
if "openshift" not in version_dict:
version_dict["openshift"] = version_dict["oc"]
return version_dict
@staticmethod
def add_custom_versions(versions):
''' create custom versions strings '''
versions_dict = {}
for tech, version in versions.items():
# clean up "-" from version
if "-" in version:
version = version.split("-")[0]
if version.startswith('v'):
version = version[1:] # Remove the 'v' prefix
versions_dict[tech + '_numeric'] = version.split('+')[0]
# "3.3.0.33" is what we have, we want "3.3"
versions_dict[tech + '_short'] = "{}.{}".format(*version.split('.'))
return versions_dict
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
import rpm
transaction_set = rpm.TransactionSet()
rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
return rpmquery.count() > 0
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if key not in user_def:
if debug:
print('User data does not have key [%s]' % key)
print('User data: %s' % user_def)
return False
if not isinstance(user_def[key], list):
if debug:
print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
return False
if len(user_def[key]) != len(value):
if debug:
print("List lengths are not equal.")
print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
print("user_def: %s" % user_def[key])
print("value: %s" % value)
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print('sending list - list')
print(type(values[0]))
print(type(values[1]))
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print('list compare returned false')
return False
elif value != user_def[key]:
if debug:
print('value should be identical')
print(user_def[key])
print(value)
return False
# recurse on a dictionary
elif isinstance(value, dict):
if key not in user_def:
if debug:
print("user_def does not have key [%s]" % key)
return False
if not isinstance(user_def[key], dict):
if debug:
print("dict returned false: not instance of dict")
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print("keys are not equal in dict")
print(user_values)
print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print("dict returned false")
print(result)
return False
# Verify each key, value pair is the same
else:
if key not in user_def or value != user_def[key]:
if debug:
print("value not equal; user_def does not have key")
print(key)
print(value)
if key in user_def:
print(user_def[key])
return False
if debug:
print('returning true')
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self, ascommalist=''):
'''return all options as a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs'''
return self.stringify(ascommalist)
def stringify(self, ascommalist=''):
''' return the options hash as cli params in a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
val = data['value']
rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/oc_edit.py -*- -*- -*-
class Edit(OpenShiftCLI):
''' Class to wrap the oc command line tools
'''
# pylint: disable=too-many-arguments
def __init__(self,
kind,
namespace,
resource_name=None,
kubeconfig='/etc/origin/master/admin.kubeconfig',
separator='.',
verbose=False):
''' Constructor for OpenshiftOC '''
super(Edit, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose)
self.kind = kind
self.name = resource_name
self.separator = separator
def get(self):
'''return a secret by name '''
return self._get(self.kind, self.name)
def update(self, file_name, content, edits, force=False, content_type='yaml'):
'''run update '''
if file_name:
if content_type == 'yaml':
data = yaml.load(open(file_name))
elif content_type == 'json':
data = json.loads(open(file_name).read())
yed = Yedit(filename=file_name, content=data, separator=self.separator)
# Keep this for compatibility
if content is not None:
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([not change[0] for change in changes]):
return {'returncode': 0, 'updated': False}
elif edits is not None:
results = Yedit.process_edits(edits, yed)
if not results['changed']:
return results
yed.write()
atexit.register(Utils.cleanup, [file_name])
return self._replace(file_name, force=force)
return self._replace_content(self.kind, self.name, content, edits, force=force, sep=self.separator)
@staticmethod
def run_ansible(params, check_mode):
'''run the oc_edit module'''
ocedit = Edit(params['kind'],
params['namespace'],
params['name'],
kubeconfig=params['kubeconfig'],
separator=params['separator'],
verbose=params['debug'])
api_rval = ocedit.get()
########
# Create
########
if not Utils.exists(api_rval['results'], params['name']):
return {"failed": True, 'msg': api_rval}
########
# Update
########
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed edit'}
api_rval = ocedit.update(params['file_name'],
params['content'],
params['edits'],
params['force'],
params['file_format'])
if api_rval['returncode'] != 0:
return {"failed": True, 'msg': api_rval}
if 'updated' in api_rval and not api_rval['updated']:
return {"changed": False, 'results': api_rval, 'state': 'present'}
# return the created object
api_rval = ocedit.get()
if api_rval['returncode'] != 0:
return {"failed": True, 'msg': api_rval}
return {"changed": True, 'results': api_rval, 'state': 'present'}
# -*- -*- -*- End included fragment: class/oc_edit.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: ansible/oc_edit.py -*- -*- -*-
def main():
'''
ansible oc module for editing objects
'''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='present', type='str',
choices=['present']),
debug=dict(default=False, type='bool'),
namespace=dict(default='default', type='str'),
name=dict(default=None, required=True, type='str'),
kind=dict(required=True, type='str'),
file_name=dict(default=None, type='str'),
file_format=dict(default='yaml', type='str'),
content=dict(default=None, type='dict'),
force=dict(default=False, type='bool'),
separator=dict(default='.', type='str'),
edits=dict(default=None, type='list'),
),
supports_check_mode=True,
mutually_exclusive=[['content', 'edits']],
required_one_of=[['content', 'edits']],
)
rval = Edit.run_ansible(module.params, module.check_mode)
if 'failed' in rval:
module.fail_json(**rval)
module.exit_json(**rval)
if __name__ == '__main__':
main()
# -*- -*- -*- End included fragment: ansible/oc_edit.py -*- -*- -*-
| apache-2.0 |
digitaleric-google/GCG-3.3 | tools/perf/scripts/python/check-perf-trace.py | 11214 | 2503 | # perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
| gpl-2.0 |
eduNEXT/edx-platform | openedx/core/djangoapps/content/course_overviews/tests/factories.py | 4 | 1249 | # lint-amnesty, pylint: disable=missing-module-docstring
from datetime import timedelta
import json
from django.utils import timezone
import factory
from factory.django import DjangoModelFactory
from opaque_keys.edx.locator import CourseLocator
from ..models import CourseOverview
class CourseOverviewFactory(DjangoModelFactory): # lint-amnesty, pylint: disable=missing-class-docstring
class Meta:
model = CourseOverview
django_get_or_create = ('id', )
exclude = ('run', )
version = CourseOverview.VERSION
pre_requisite_courses = []
org = 'edX'
run = factory.Sequence('2012_Fall_{}'.format)
@factory.lazy_attribute
def _pre_requisite_courses_json(self):
return json.dumps(self.pre_requisite_courses)
@factory.lazy_attribute
def _location(self):
return self.id.make_usage_key('course', 'course')
@factory.lazy_attribute
def id(self):
return CourseLocator(self.org, 'toy', self.run)
@factory.lazy_attribute
def display_name(self):
return f"{self.id} Course"
@factory.lazy_attribute
def start(self):
return timezone.now()
@factory.lazy_attribute
def end(self):
return timezone.now() + timedelta(30)
| agpl-3.0 |
Backspace-Dev/x920d-jp | scripts/gcc-wrapper.py | 501 | 3410 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Code Aurora nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Invoke gcc, looking for warnings, and causing a failure if there are
# non-whitelisted warnings.
import errno
import re
import os
import sys
import subprocess
# Note that gcc uses unicode, which may depend on the locale. TODO:
# force LANG to be set to en_US.UTF-8 to get consistent warnings.
allowed_warnings = set([
"alignment.c:327",
"mmu.c:602",
"return_address.c:62",
])
# Capture the name of the object file, can find it.
ofile = None
warning_re = re.compile(r'''(.*/|)([^/]+\.[a-z]+:\d+):(\d+:)? warning:''')
def interpret_warning(line):
"""Decode the message from gcc. The messages we care about have a filename, and a warning"""
line = line.rstrip('\n')
m = warning_re.match(line)
if m and m.group(2) not in allowed_warnings:
print "error, forbidden warning:", m.group(2)
# If there is a warning, remove any object if it exists.
if ofile:
try:
os.remove(ofile)
except OSError:
pass
sys.exit(1)
def run_gcc():
args = sys.argv[1:]
# Look for -o
try:
i = args.index('-o')
global ofile
ofile = args[i+1]
except (ValueError, IndexError):
pass
compiler = sys.argv[0]
try:
proc = subprocess.Popen(args, stderr=subprocess.PIPE)
for line in proc.stderr:
print line,
interpret_warning(line)
result = proc.wait()
except OSError as e:
result = e.errno
if result == errno.ENOENT:
print args[0] + ':',e.strerror
print 'Is your PATH set correctly?'
else:
print ' '.join(args), str(e)
return result
if __name__ == '__main__':
status = run_gcc()
sys.exit(status)
| gpl-2.0 |
B0fH/dionaea | modules/python/dionaea/emu_scripts/handler.py | 3 | 2573 | import logging
import re
logger = logging.getLogger("emu_scripts")
class BaseHandler(object):
name = ""
def __init__(self, config=None):
self._config = {}
if isinstance(config, dict):
self._config = config
self.min_match_count = 0
self._regex_detect = []
self._regex_url = re.compile(
b"(?P<url>(http|ftp|https)://([\w_-]+(?:(?:\.[\w_-]+)+))([\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])?)"
)
def run(self, data):
match_count = 0
for regex in self._regex_detect:
m = regex.search(data)
if m:
match_count += 1
if match_count < self.min_match_count:
logger.info("Match count for %s is %d should at least be %d", self.name, match_count, self.min_match_count)
return
logger.info("Looking for URLs '%s'", self.name)
urls = []
for m in self._regex_url.finditer(data):
urls.append(m.group("url"))
return urls
class RawURL(object):
name = "raw_url"
def __init__(self, config=None):
self._config = {}
if isinstance(config, dict):
self._config = config
self._regex_url = re.compile(
b"(?P<url>(http|ftp|https)://([\w_-]+(?:(?:\.[\w_-]+)+))([\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])?)"
)
def run(self, data):
urls = []
for m in self._regex_url.finditer(data):
urls.append(m.group("url"))
return urls
class PowerShell(BaseHandler):
name = "powershell"
def __init__(self, config=None):
BaseHandler.__init__(self, config=config)
self.min_match_count = 2
self._regex_detect = [
re.compile(b"New-Object\s+System\.Net\.WebClient"),
re.compile(b"DownloadFile([^,]+?,[^,]+?)"),
re.compile(b"Invoke-Expression([^)]+?)")
]
self._regex_url = re.compile(
b"\w+\s*=\s*\"\s*(?P<url>(http|ftp|https)://([\w_-]+(?:(?:\.[\w_-]+)+))([\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])?)\s*\""
)
class VBScript(BaseHandler):
name = "vbscript"
def __init__(self, config=None):
BaseHandler.__init__(self, config=config)
self.min_match_count = 1
self._regex_detect = [
re.compile(b"Set\s+\w+\s+=\s+CreateObject\(.*?(Msxml2.XMLHTTP|Wscript.Shell).*?\)")
]
self._regex_url = re.compile(
b"\.Open\s+\"GET\"\s*,\s*\"(?P<url>(http|ftp|https)://([\w_-]+(?:(?:\.[\w_-]+)+))([\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])?)\""
)
| gpl-2.0 |
applephil/headlyne | venv/lib/python2.7/site-packages/gunicorn/six.py | 13 | 13679 | """Utilities for writing code that runs on Python 2 and 3"""
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.2.0"
# True if we are running on Python 3.
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform == "java":
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result)
# This is a bit ugly, but it avoids running this again.
delattr(tp, self.name)
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _MovedItems(types.ModuleType):
"""Lazy loading of moved objects"""
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
del attr
moves = sys.modules["gunicorn.six.moves"] = _MovedItems("moves")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_iterkeys = "keys"
_itervalues = "values"
_iteritems = "items"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_code = "func_code"
_func_defaults = "func_defaults"
_iterkeys = "iterkeys"
_itervalues = "itervalues"
_iteritems = "iteritems"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
if PY3:
def get_unbound_function(unbound):
return unbound
Iterator = object
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
else:
def get_unbound_function(unbound):
return unbound.im_func
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
def iterkeys(d):
"""Return an iterator over the keys of a dictionary."""
return iter(getattr(d, _iterkeys)())
def itervalues(d):
"""Return an iterator over the values of a dictionary."""
return iter(getattr(d, _itervalues)())
def iteritems(d):
"""Return an iterator over the (key, value) pairs of a dictionary."""
return iter(getattr(d, _iteritems)())
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
else:
def b(s):
return s
def u(s):
return unicode(s, "unicode_escape")
int2byte = chr
import cStringIO
def StringIO(buf=''):
sio = cStringIO.StringIO()
if buf:
sio.write(buf)
sio.seek(0)
return sio
BytesIO = StringIO
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
def _check_if_pyc(fname):
""" Returns True if the extension is .pyc, False if .py and None if otherwise """
from imp import find_module
from os.path import realpath, dirname, basename, splitext
# Normalize the file-path for the find_module()
filepath = realpath(fname)
dirpath = dirname(filepath)
module_name = splitext(basename(filepath))[0]
# Validate and fetch
try:
fileobj, fullpath, (_, _, pytype) = find_module(module_name, [ dirpath ])
except ImportError:
raise IOError("Cannot find config file. Path maybe incorrect! : {0}".format(filepath))
return (pytype, fileobj, fullpath)
def _get_codeobj(pyfile):
""" Returns the code object, given a python file """
from imp import PY_COMPILED, PY_SOURCE
result, fileobj, fullpath = _check_if_pyc(pyfile)
# WARNING:
# fp.read() can blowup if the module is extremely large file.
# Lookout for overflow errors.
try:
data = fileobj.read()
finally:
fileobj.close()
# This is a .pyc file. Treat accordingly.
if result is PY_COMPILED:
# .pyc format is as follows:
# 0 - 4 bytes: Magic number, which changes with each create of .pyc file.
# First 2 bytes change with each marshal of .pyc file. Last 2 bytes is "\r\n".
# 4 - 8 bytes: Datetime value, when the .py was last changed.
# 8 - EOF: Marshalled code object data.
# So to get code object, just read the 8th byte onwards till EOF, and UN-marshal it.
import marshal
code_obj = marshal.loads(data[8:])
elif result is PY_SOURCE:
# This is a .py file.
code_obj = compile(data, fullpath, 'exec')
else:
# Unsupported extension
raise Exception("Input file is unknown format: {0}".format(fullpath))
# Return code object
return code_obj
if PY3:
import builtins
exec_ = getattr(builtins, "exec")
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
print_ = getattr(builtins, "print")
def execfile_(fname, *args):
if fname.endswith(".pyc"):
code = _get_codeobj(fname)
else:
code = compile(open(fname, 'rb').read(), fname, 'exec')
return exec_(code, *args)
del builtins
else:
def exec_(code, globs=None, locs=None):
"""Execute code in a namespace."""
if globs is None:
frame = sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
def execfile_(fname, *args):
""" Overriding PY2 execfile() implementation to support .pyc files """
if fname.endswith(".pyc"):
return exec_(_get_codeobj(fname), *args)
return execfile(fname, *args)
def print_(*args, **kwargs):
"""The new-style print function."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
_add_doc(reraise, """Reraise an exception.""")
def with_metaclass(meta, base=object):
"""Create a base class with a metaclass."""
return meta("NewBase", (base,), {})
# specific to gunicorn
if PY3:
def bytes_to_str(b):
if isinstance(b, text_type):
return b
return str(b, 'latin1')
import urllib.parse
def unquote_to_wsgi_str(string):
return _unquote_to_bytes(string).decode('latin-1')
_unquote_to_bytes = urllib.parse.unquote_to_bytes
urlsplit = urllib.parse.urlsplit
urlparse = urllib.parse.urlparse
else:
def bytes_to_str(s):
if isinstance(s, unicode):
return s.encode('utf-8')
return s
import urlparse as orig_urlparse
urlsplit = orig_urlparse.urlsplit
urlparse = orig_urlparse.urlparse
import urllib
unquote_to_wsgi_str = urllib.unquote
| apache-2.0 |
2015fallhw/cdw11 | users/a/g1/a40323105_1.py | 2 | 37865 | from flask import Blueprint, request
a40323105_1 = Blueprint('a40323105_1', __name__, url_prefix='/a40323105_1', template_folder='templates')
head_str = '''
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>網際 2D 繪圖</title>
<!-- IE 9: display inline SVG -->
<meta http-equiv="X-UA-Compatible" content="IE=9">
<script type="text/javascript" src="http://brython.info/src/brython_dist.js"></script>
<script type="text/javascript" src="http://cptocadp-2015fallhw.rhcloud.com/static/Cango-8v03.js"></script>
<script type="text/javascript" src="http://cptocadp-2015fallhw.rhcloud.com/static/Cango2D-6v13.js"></script>
<script type="text/javascript" src="http://cptocadp-2015fallhw.rhcloud.com/static/CangoAxes-1v33.js"></script>
</head>
<body>
<script>
window.onload=function(){
brython(1);
}
</script>
<canvas id="plotarea" width="800" height="800"></canvas>
'''
tail_str = '''
</script>
</body>
</html>
'''
chain_str = '''
<script type="text/python">
from javascript import JSConstructor
from browser import alert
from browser import window
import math
cango = JSConstructor(window.Cango)
cobj = JSConstructor(window.Cobj)
shapedefs = window.shapeDefs
obj2d = JSConstructor(window.Obj2D)
cgo = cango("plotarea")
cgo.setWorldCoords(-250, -250, 500, 500)
# 畫軸線
cgo.drawAxes(0, 240, 0, 240, {
"strokeColor":"#aaaaaa",
"fillColor": "#aaaaaa",
"xTickInterval": 20,
"xLabelInterval": 20,
"yTickInterval": 20,
"yLabelInterval": 20})
deg = math.pi/180
# 將繪製鏈條輪廓的內容寫成 class 物件
class chain():
# 輪廓的外型設為 class variable
chamber = "M -6.8397, -1.4894 \
A 7, 7, 0, 1, 0, 6.8397, -1.4894 \
A 40, 40, 0, 0, 1, 6.8397, -18.511 \
A 7, 7, 0, 1, 0, -6.8397, -18.511 \
A 40, 40, 0, 0, 1, -6.8397, -1.4894 z"
#chamber = "M 0, 0 L 0, -20 z"
cgoChamber = window.svgToCgoSVG(chamber)
def __init__(self, fillcolor="green", border=True, strokecolor= "tan", linewidth=2, scale=1):
self.fillcolor = fillcolor
self.border = border
self.strokecolor = strokecolor
self.linewidth = linewidth
self.scale = scale
# 利用鏈條起點與終點定義繪圖
def basic(self, x1, y1, x2, y2):
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
# 注意, cgo.Chamber 為成員變數
cmbr = cobj(self.cgoChamber, "SHAPE", {
"fillColor": self.fillcolor,
"border": self.border,
"strokeColor": self.strokecolor,
"lineWidth": self.linewidth })
# hole 為原點位置
hole = cobj(shapedefs.circle(4*self.scale), "PATH")
cmbr.appendPath(hole)
# 複製 cmbr, 然後命名為 basic1
basic1 = cmbr.dup()
# 因為鏈條的角度由原點向下垂直, 所以必須轉 90 度, 再考量 atan2 的轉角
basic1.rotate(math.atan2(y2-y1, x2-x1)/deg+90)
# 放大 scale 倍
cgo.render(basic1, x1, y1, self.scale, 0)
# 利用鏈條起點與旋轉角度定義繪圖, 使用內定的 color, border 與 linewidth 變數
def basic_rot(self, x1, y1, rot, v=False):
# 若 v 為 True 則為虛擬 chain, 不 render
self.x1 = x1
self.y1 = y1
self.rot = rot
self.v = v
# 注意, cgoChamber 為成員變數
cmbr = cobj(self.cgoChamber, "SHAPE", {
"fillColor": self.fillcolor,
"border": self.border,
"strokeColor": self.strokecolor,
"lineWidth": self.linewidth })
# hole0 為原點位置
hole = cobj(shapedefs.circle(4*self.scale), "PATH")
cmbr.appendPath(hole)
# 根據旋轉角度, 計算 x2 與 y2
x2 = x1 + 20*math.cos(rot*deg)*self.scale
y2 = y1 + 20*math.sin(rot*deg)*self.scale
# 複製 cmbr, 然後命名為 basic1
basic1 = cmbr.dup()
# 因為鏈條的角度由原點向下垂直, 所以必須轉 90 度, 再考量 atan2 的轉角
basic1.rotate(rot+90)
# 放大 scale 倍
if v == False:
cgo.render(basic1, x1, y1, self.scale, 0)
return x2, y2
'''
# 傳繪 A 函式內容
def a(x, y, scale=1, color="green"):
outstring = '''
# 利用 chain class 建立案例, 對應到 mychain 變數
mychain = chain(scale='''+str(scale)+''', fillcolor="'''+str(color)+'''")
# 畫 A
# 左邊兩個垂直單元
x1, y1 = mychain.basic_rot('''+str(x)+","+str(y)+''', 90)
x2, y2 = mychain.basic_rot(x1, y1, 90)
# 左斜邊兩個單元
x3, y3 = mychain.basic_rot(x2, y2, 80)
x4, y4 = mychain.basic_rot(x3, y3, 71)
# 最上方水平單元
x5, y5 = mychain.basic_rot(x4, y4, 0)
# 右斜邊兩個單元
x6, y6 = mychain.basic_rot(x5, y5, -71)
x7, y7 = mychain.basic_rot(x6, y6, -80)
# 右邊兩個垂直單元
x8, y8 = mychain.basic_rot(x7, y7, -90)
x9, y9 = mychain.basic_rot(x8, y8, -90)
# 中間兩個水平單元
x10, y10 = mychain.basic_rot(x8, y8, -180)
mychain.basic(x10, y10, x1, y1)
'''
return outstring
# 傳繪 B 函式內容
def b(x, y):
outstring = '''
# 利用 chain class 建立案例, 對應到 mychain 變數
mychain = chain()
# 畫 B
# 左邊四個垂直單元
# 每一個字元間隔為 65 pixels
#x1, y1 = mychain.basic_rot(0+ 65, 0, 90)
x1, y1 = mychain.basic_rot('''+str(x)+","+str(y)+''', 90)
x2, y2 = mychain.basic_rot(x1, y1, 90)
x3, y3 = mychain.basic_rot(x2, y2, 90)
x4, y4 = mychain.basic_rot(x3, y3, 90)
# 上方一個水平單元
x5, y5 = mychain.basic_rot(x4, y4, 0)
# 右斜 -30 度
x6, y6 = mychain.basic_rot(x5, y5, -30)
# 右上垂直向下單元
x7, y7 = mychain.basic_rot(x6, y6, -90)
# 右斜 240 度
x8, y8 = mychain.basic_rot(x7, y7, 210)
# 中間水平
mychain.basic(x8, y8, x2, y2)
# 右下斜 -30 度
x10, y10 = mychain.basic_rot(x8, y8, -30)
# 右下垂直向下單元
x11, y11 = mychain.basic_rot(x10, y10, -90)
# 右下斜 240 度
x12, y12 = mychain.basic_rot(x11, y11, 210)
# 水平接回起點
mychain.basic(x12,y12, '''+str(x)+","+str(y)+''')
'''
return outstring
# 傳繪 C 函式內容
def c(x, y):
outstring = '''
# 利用 chain class 建立案例, 對應到 mychain 變數
mychain = chain()
# 上半部
# 左邊中間垂直起點, 圓心位於線段中央, y 方向再向上平移兩個鏈條圓心距單位
#x1, y1 = mychain.basic_rot(0+65*2, -10+10+20*math.sin(80*deg)+20*math.sin(30*deg), 90)
x1, y1 = mychain.basic_rot('''+str(x)+","+str(y)+'''-10+10+20*math.sin(80*deg)+20*math.sin(30*deg), 90)
# 上方轉 80 度
x2, y2 = mychain.basic_rot(x1, y1, 80)
# 上方轉 30 度
x3, y3 = mychain.basic_rot(x2, y2, 30)
# 上方水平
x4, y4 = mychain.basic_rot(x3, y3, 0)
# 下半部, 從起點開始 -80 度
#x5, y5 = mychain.basic_rot(0+65*2, -10+10+20*math.sin(80*deg)+20*math.sin(30*deg), -80)
x5, y5 = mychain.basic_rot('''+str(x)+","+str(y)+'''-10+10+20*math.sin(80*deg)+20*math.sin(30*deg), -80)
# 下斜 -30 度
x6, y6 = mychain.basic_rot(x5, y5, -30)
# 下方水平單元
x7, y7 = mychain.basic_rot(x6, y6, -0)
'''
return outstring
# 傳繪 D 函式內容
def d(x, y):
outstring = '''
# 利用 chain class 建立案例, 對應到 mychain 變數
mychain = chain()
# 左邊四個垂直單元
#x1, y1 = mychain.basic_rot(0+65*3, 0, 90)
x1, y1 = mychain.basic_rot('''+str(x)+","+str(y)+''', 90)
x2, y2 = mychain.basic_rot(x1, y1, 90)
x3, y3 = mychain.basic_rot(x2, y2, 90)
x4, y4 = mychain.basic_rot(x3, y3, 90)
# 上方一個水平單元
x5, y5 = mychain.basic_rot(x4, y4, 0)
# 右斜 -40 度
x6, y6 = mychain.basic_rot(x5, y5, -40)
x7, y7 = mychain.basic_rot(x6, y6, -60)
# 右中垂直向下單元
x8, y8 = mychain.basic_rot(x7, y7, -90)
# -120 度
x9, y9 = mychain.basic_rot(x8, y8, -120)
# -140
x10, y10 = mychain.basic_rot(x9, y9, -140)
# 水平接回原點
#mychain.basic(x10, y10, 0+65*3, 0, color="red")
mychain.basic(x10, y10, '''+str(x)+","+str(y)+''')
'''
return outstring
def circle(x, y):
outstring = '''
mychain = chain()
x1, y1 = mychain.basic_rot('''+str(x)+","+str(y)+''', 50)
'''
for i in range(2, 10):
outstring += "x"+str(i)+", y"+str(i)+"=mychain.basic_rot(x"+str(i-1)+", y"+str(i-1)+", 90-"+str(i*40)+") \n"
return outstring
def circle1(x, y, degree=10):
# 20 為鏈條兩圓距
# chain 所圍之圓圈半徑為 20/2/math.asin(degree*math.pi/180/2)
# degree = math.asin(20/2/radius)*180/math.pi
#degree = 10
first_degree = 90 - degree
repeat = 360 / degree
outstring = '''
mychain = chain()
x1, y1 = mychain.basic_rot('''+str(x)+","+str(y)+", "+str(first_degree)+''')
'''
for i in range(2, int(repeat)+1):
outstring += "x"+str(i)+", y"+str(i)+"=mychain.basic_rot(x"+str(i-1)+", y"+str(i-1)+", 90-"+str(i*degree)+") \n"
return outstring
def circle2(x, y, degree=10):
# 20 為鏈條兩圓距
# chain 所圍之圓圈半徑為 20/2/math.asin(degree*math.pi/180/2)
# degree = math.asin(20/2/radius)*180/math.pi
#degree = 10
first_degree = 90 - degree
repeat = 360 / degree
outstring = '''
mychain = chain()
x1, y1 = mychain.basic_rot('''+str(x)+","+str(y)+", "+str(first_degree)+''')
'''
for i in range(2, int(repeat)+1):
outstring += "x"+str(i)+", y"+str(i)+"=mychain.basic_rot(x"+str(i-1)+", y"+str(i-1)+", 90-"+str(i*degree)+") \n"
return outstring
def twocircle(x, y):
# 20 為鏈條兩圓距
# chain 所圍之圓圈半徑為 20/2/math.asin(degree*math.pi/180/2)
# degree = math.asin(20/2/radius)*180/math.pi
x = 50
y = 0
degree = 12
# 78, 66, 54, 42, 30, 18, 6度
#必須有某些 chain 算座標但是不 render
first_degree = 90 - degree
repeat = 360 / degree
# 第1節也是 virtual chain
outstring = '''
mychain = chain()
x1, y1 = mychain.basic_rot('''+str(x)+","+str(y)+", "+str(first_degree)+''', True)
#x1, y1 = mychain.basic_rot('''+str(x)+","+str(y)+", "+str(first_degree)+''')
'''
# 這裡要上下各多留一節虛擬 chain, 以便最後進行連接 (x7, y7) 與 (x22, y22)
for i in range(2, int(repeat)+1):
#if i < 7 or i > 23:
if i <= 7 or i >= 23:
# virautl chain
outstring += "x"+str(i)+", y"+str(i)+"=mychain.basic_rot(x"+str(i-1)+", y"+str(i-1)+", 90-"+str(i*degree)+", True) \n"
#outstring += "x"+str(i)+", y"+str(i)+"=mychain.basic_rot(x"+str(i-1)+", y"+str(i-1)+", 90-"+str(i*degree)+") \n"
else:
outstring += "x"+str(i)+", y"+str(i)+"=mychain.basic_rot(x"+str(i-1)+", y"+str(i-1)+", 90-"+str(i*degree)+") \n"
p = -150
k = 0
degree = 20
# 70, 50, 30, 10
# 從 i=5 開始, 就是 virautl chain
first_degree = 90 - degree
repeat = 360 / degree
# 第1節不是 virtual chain
outstring += '''
#mychain = chain()
p1, k1 = mychain.basic_rot('''+str(p)+","+str(k)+", "+str(first_degree)+''')
'''
for i in range(2, int(repeat)+1):
if i >= 5 and i <= 13:
# virautl chain
outstring += "p"+str(i)+", k"+str(i)+"=mychain.basic_rot(p"+str(i-1)+", k"+str(i-1)+", 90-"+str(i*degree)+", True) \n"
#outstring += "p"+str(i)+", k"+str(i)+"=mychain.basic_rot(p"+str(i-1)+", k"+str(i-1)+", 90-"+str(i*degree)+") \n"
else:
outstring += "p"+str(i)+", k"+str(i)+"=mychain.basic_rot(p"+str(i-1)+", k"+str(i-1)+", 90-"+str(i*degree)+") \n"
# 上段連接直線
# 從 p5, k5 作為起點
first_degree = 10
repeat = 11
outstring += '''
m1, n1 = mychain.basic_rot(p4, k4, '''+str(first_degree)+''')
'''
for i in range(2, int(repeat)+1):
outstring += "m"+str(i)+", n"+str(i)+"=mychain.basic_rot(m"+str(i-1)+", n"+str(i-1)+", "+str(first_degree)+")\n"
# 下段連接直線
# 從 p12, k12 作為起點
first_degree = -10
repeat = 11
outstring += '''
r1, s1 = mychain.basic_rot(p13, k13, '''+str(first_degree)+''')
'''
for i in range(2, int(repeat)+1):
outstring += "r"+str(i)+", s"+str(i)+"=mychain.basic_rot(r"+str(i-1)+", s"+str(i-1)+", "+str(first_degree)+")\n"
# 上段右方接點為 x7, y7, 左側則為 m11, n11
outstring += "mychain.basic(x7, y7, m11, n11)\n"
# 下段右方接點為 x22, y22, 左側則為 r11, s11
outstring += "mychain.basic(x22, y22, r11, s11)\n"
return outstring
def eighteenthirty(x, y):
'''
從圖解法與符號式解法得到的兩條外切線座標點
(-203.592946177111, 0.0), (0.0, 0.0), (-214.364148466539, 56.5714145924675), (-17.8936874260919, 93.9794075692901)
(-203.592946177111, 0.0), (0.0, 0.0), (-214.364148466539, -56.5714145924675), (-17.8936874260919, -93.9794075692901)
左邊關鍵鍊條起點 (-233.06, 49.48), 角度 20.78, 圓心 (-203.593, 0.0)
右邊關鍵鍊條起點 (-17.89, 93.9), 角度 4.78, 圓心 (0, 0)
'''
# 20 為鏈條兩圓距
# chain 所圍之圓圈半徑為 20/2/math.asin(degree*math.pi/180/2)
# degree = math.asin(20/2/radius)*180/math.pi
x = 50
y = 0
degree = 20
first_degree = 20.78
startx = -233.06+100
starty = 49.48
repeat = 360 / degree
# 先畫出左邊第一關鍵節
outstring = '''
mychain = chain()
x1, y1 = mychain.basic_rot('''+str(startx)+","+str(starty)+", "+str(first_degree)+''')
'''
# 接著繪製左邊的非虛擬鍊條
for i in range(2, int(repeat)+1):
if i >=2 and i <=11:
# virautl chain
#outstring += "x"+str(i)+", y"+str(i)+"=mychain.basic_rot(x"+str(i-1)+", y"+str(i-1)+","+str(first_degree+degree-i*degree)+") \n"
outstring += "x"+str(i)+", y"+str(i)+"=mychain.basic_rot(x"+str(i-1)+", y"+str(i-1)+","+str(first_degree+degree-i*degree)+", True) \n"
else:
outstring += "x"+str(i)+", y"+str(i)+"=mychain.basic_rot(x"+str(i-1)+", y"+str(i-1)+","+str(first_degree+degree-i*degree)+") \n"
# 接著處理右邊的非虛擬鍊條
# 先畫出右邊第一關鍵節
p = -17.89+100
k = 93.98
degree = 12
first_degree = 4.78
repeat = 360 / degree
# 第1節不是 virtual chain
outstring += '''
#mychain = chain()
p1, k1 = mychain.basic_rot('''+str(p)+","+str(k)+", "+str(first_degree)+''')
'''
for i in range(2, int(repeat)+1):
if i >=18:
# virautl chain
outstring += "p"+str(i)+", k"+str(i)+"=mychain.basic_rot(p"+str(i-1)+", k"+str(i-1)+","+str(first_degree+degree-i*degree)+", True) \n"
#outstring += "p"+str(i)+", k"+str(i)+"=mychain.basic_rot(p"+str(i-1)+", k"+str(i-1)+","+str(first_degree+degree-i*degree)+") \n"
else:
outstring += "p"+str(i)+", k"+str(i)+"=mychain.basic_rot(p"+str(i-1)+", k"+str(i-1)+","+str(first_degree+degree-i*degree)+") \n"
# 上段連接直線
# 從 x1, y1 作為起點
first_degree = 10.78
repeat = 10
outstring += '''
m1, n1 = mychain.basic_rot(x1, y1, '''+str(first_degree)+''')
'''
for i in range(2, int(repeat)+1):
outstring += "m"+str(i)+", n"+str(i)+"=mychain.basic_rot(m"+str(i-1)+", n"+str(i-1)+", "+str(first_degree)+")\n"
# 下段連接直線
# 從 x11, y11 作為起點
first_degree = -10.78
repeat = 10
outstring += '''
r1, s1 = mychain.basic_rot(x11, y11, '''+str(first_degree)+''')
'''
for i in range(2, int(repeat)+1):
outstring += "r"+str(i)+", s"+str(i)+"=mychain.basic_rot(r"+str(i-1)+", s"+str(i-1)+", "+str(first_degree)+")\n"
return outstring
@a40323105_1.route('/a')
def draw_a():
return head_str + chain_str + a(0, 0) + tail_str
@a40323105_1.route('/b')
def draw_b():
# 每個橫向字元距離為 65 pixels, 上下字距則為 110 pixels
return head_str + chain_str + b(0+65, 0) + tail_str
@a40323105_1.route('/c')
def draw_c():
# 每個橫向字元距離為 65 pixels
return head_str + chain_str + c(0+65*2, 0) + tail_str
@a40323105_1.route('/d')
def draw_d():
return head_str + chain_str + d(0+65*3, 0) + tail_str
@a40323105_1.route('/ab')
def draw_ab():
#return head_str + chain_str + a(0, 0) + b(0+65, 0) + tail_str
return head_str + chain_str + a(0, 0) + b(0, 0-110) + tail_str
@a40323105_1.route('/ac')
def draw_ac():
return head_str + chain_str + a(0, 0) + c(0+65, 0) + tail_str
@a40323105_1.route('/bc')
def draw_bc():
return head_str + chain_str + b(0, 0) + c(0+65, 0) + tail_str
@a40323105_1.route('/abc')
def draw_abc():
return head_str + chain_str + a(0, 0) + b(0+65, 0) + c(0+65*2, 0) + tail_str
@a40323105_1.route('/aaaa')
def draw_aaaa():
outstring = head_str + chain_str
scale = 2
for i in range(20):
scale = scale*0.9
outstring += a(0+10*i, 0, scale=scale)
return outstring + tail_str
#return head_str + chain_str + a(0, 0, scale=1) + a(0+65, 0, scale=0.8, color="red") + a(0+65*2, 0, scale=0.6) + a(0+65*3, 0, scale=0.4, color="red") + tail_str
@a40323105_1.route('/badc')
def draw_badc():
return head_str + chain_str + b(0, 0) + a(0+65, 0) + d(0+65*2, 0) + c(0+65*3, 0) + tail_str
@a40323105_1.route('/abcd')
def draw_abcd():
#return head_str + chain_str + a(0, 0) + b(0+65, 0) + c(0+65*2, 0) + d(0+65*3, 0) + tail_str
return head_str + chain_str + a(0, 110) + b(0, 110-110) + c(0, 110-110*2) + d(0, 110-110*3) + tail_str
@a40323105_1.route('/circle')
def drawcircle():
return head_str + chain_str + circle(0, 0) + tail_str
@a40323105_1.route('/circle1/<degree>', defaults={'x': 0, 'y': 0})
@a40323105_1.route('/circle1/<x>/<degree>', defaults={'y': 0})
@a40323105_1.route('/circle1/<x>/<y>/<degree>')
#@a40323105_1.route('/circle1/<int:x>/<int:y>/<int:degree>')
def drawcircle1(x,y,degree):
return head_str + chain_str + circle1(int(x), int(y), int(degree)) + tail_str
@a40323105_1.route('/circle2/<degree>', defaults={'x': 0, 'y': 0})
@a40323105_1.route('/circle2/<x>/<degree>', defaults={'y': 0})
@a40323105_1.route('/circle2/<x>/<y>/<degree>')
#@a40323105_1.route('/circle2/<int:x>/<int:y>/<int:degree>')
def drawcircle2(x,y,degree):
return head_str + chain_str + circle2(int(x), int(y), int(degree)) + tail_str
@a40323105_1.route('/twocircle/<x>/<y>')
@a40323105_1.route('/twocircle', defaults={'x':0, 'y':0})
def drawtwocircle(x,y):
return head_str + chain_str + twocircle(int(x), int(y)) + tail_str
@a40323105_1.route('/eighteenthirty/<x>/<y>')
@a40323105_1.route('/eighteenthirty', defaults={'x':0, 'y':0})
def draweithteenthirdy(x,y):
return head_str + chain_str + eighteenthirty(int(x), int(y)) + tail_str
@a40323105_1.route('/snap')
# http://svg.dabbles.info/snaptut-base
def snap():
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>網際 snap 繪圖</title>
<!-- IE 9: display inline SVG -->
<meta http-equiv="X-UA-Compatible" content="IE=9">
<script type="text/javascript" src="http://brython.info/src/brython_dist.js"></script>
<script type="text/javascript" src="/static/snap.svg-min.js"></script>
<script>
window.onload=function(){
brython(1);
}
</script>
</head>
<body>
<svg width="800" height="800" viewBox="0 0 800 800" id="svgout"></svg>
<script type="text/python">
from javascript import JSConstructor
from browser import alert
from browser import window, document
# 透過 window 與 JSConstructor 從 Brython 物件 snap 擷取 Snap 物件的內容
snap = JSConstructor(window.Snap)
s = snap("#svgout")
# 建立物件時, 同時設定 id 名稱
r = s.rect(10,10,100,100).attr({'id': 'rect'})
c = s.circle(100,100,50).attr({'id': 'circle'})
r.attr('fill', 'red')
c.attr({ 'fill': 'blue', 'stroke': 'black', 'strokeWidth': 10 })
r.attr({ 'stroke': '#123456', 'strokeWidth': 20 })
s.text(180,100, '點按一下圖形').attr({'fill' : 'blue', 'stroke': 'blue', 'stroke-width': 0.2 })
g = s.group().attr({'id': 'tux'})
def hoverover(ev):
g.animate({'transform': 's1.5r45,t180,20'}, 1000, window.mina.bounce)
def hoverout(ev):
g.animate({'transform': 's1r0,t180,20'}, 1000, window.mina.bounce)
# callback 函式
def onSVGLoaded(data):
#s.append(data)
g.append(data)
#g.hover(hoverover, hoverout )
g.text(300,100, '拿滑鼠指向我')
# 利用 window.Snap.load 載入 svg 檔案
tux = window.Snap.load("/static/Dreaming_tux.svg", onSVGLoaded)
g.transform('t180,20')
# 與視窗事件對應的函式
def rtoyellow(ev):
r.attr('fill', 'yellow')
def ctogreen(ev):
c.attr('fill', 'green')
# 根據物件 id 綁定滑鼠事件執行對應函式
document['rect'].bind('click', rtoyellow)
document['circle'].bind('click', ctogreen)
document['tux'].bind('mouseover', hoverover)
document['tux'].bind('mouseleave', hoverout)
</script>
</body>
</html>
'''
return outstring
@a40323105_1.route('/snap_link')
# http://svg.dabbles.info/
def snap_link():
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>網際 snap 繪圖</title>
<!-- IE 9: display inline SVG -->
<meta http-equiv="X-UA-Compatible" content="IE=9">
<script type="text/javascript" src="http://brython.info/src/brython_dist.js"></script>
<script type="text/javascript" src="/static/snap.svg-min.js"></script>
<script>
window.onload=function(){
brython(1);
}
</script>
</head>
<body>
<svg width="800" height="800" viewBox="0 0 800 800" id="svgout"></svg>
<script type="text/python">
from javascript import JSConstructor
from browser import alert
from browser import window, document
# 透過 window 與 JSConstructor 從 Brython 物件 snap 擷取 Snap 物件的內容
snap = JSConstructor(window.Snap)
# 使用 id 為 "svgout" 的 svg 標註進行繪圖
s = snap("#svgout")
offsetY = 50
# 是否標訂出繪圖範圍
#borderRect = s.rect(0,0,800,640,10,10).attr({ 'stroke': "silver", 'fill': "silver", 'strokeWidth': "3" })
g = s.group().transform('t250,120')
r0 = s.rect(150,150,100,100,20,20).attr({ 'fill': "orange", 'opacity': "0.8", 'stroke': "black", 'strokeWidth': "2" })
c0 = s.circle(225,225,10).attr({ 'fill': "silver", 'stroke': "black", 'strokeWidth': "4" }).attr({ 'id': 'c0' })
g0 = s.group( r0,c0 ).attr({ 'id': 'g0' })
#g0.animate({ 'transform' : 't250,120r360,225,225' },4000)
g0.appendTo( g )
g0.animate({ 'transform' : 'r360,225,225' },4000)
# 讓 g0 可以拖動
g0.drag()
r1 = s.rect(100,100,100,100,20,20).attr({ 'fill': "red", 'opacity': "0.8", 'stroke': "black", 'strokeWidth': "2" })
c1 = s.circle(175,175,10).attr({ 'fill': "silver", 'stroke': "black" , 'strokeWidth': "4"}).attr({ 'id': 'c1' })
g1 = s.group( r1,c1 ).attr({ 'id': 'g1' })
g1.appendTo( g0 ).attr({ 'id': 'g1' })
g1.animate({ 'transform' : 'r360,175,175' },4000)
r2 = s.rect(50,50,100,100,20,20).attr({ 'fill': "blue", 'opacity': "0.8", 'stroke': "black", 'strokeWidth': "2" })
c2 = s.circle(125,125,10).attr({ 'fill': "silver", 'stroke': "black", 'strokeWidth': "4" }).attr({ 'id': 'c2' })
g2 = s.group(r2,c2).attr({ 'id': 'g2' })
g2.appendTo( g1 );
g2.animate( { 'transform' : 'r360,125,125' },4000);
r3 = s.rect(0,0,100,100,20,20).attr({ 'fill': "yellow", 'opacity': "0.8", 'stroke': "black", 'strokeWidth': "2" })
c3 = s.circle(75,75,10).attr({ 'fill': "silver", 'stroke': "black", 'strokeWidth': "4" }).attr({ 'id': 'c3' })
g3 = s.group(r3,c3).attr({ 'id': 'g3' })
g3.appendTo( g2 )
g3.animate( { 'transform' : 'r360,75,75' },4000)
r4 = s.rect(-50,-50,100,100,20,20).attr({ 'fill': "green", 'opacity': "0.8", 'stroke': "black", 'strokeWidth': "2" })
c4 = s.circle(25,25,10).attr({ 'fill': "silver", 'stroke': "black", 'strokeWidth': "4" }).attr({ 'id': 'c4' })
g4 = s.group(r4,c4).attr({ 'id': 'g4' });
g4.appendTo( g3 )
g4.animate( { 'transform' : 'r360,25,25' },4000)
</script>
</body>
</html>
'''
return outstring
@a40323105_1.route('/snap_gear')
def snap_gear():
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>網際 snap 繪圖</title>
<!-- IE 9: display inline SVG -->
<meta http-equiv="X-UA-Compatible" content="IE=9">
<script type="text/javascript" src="http://brython.info/src/brython_dist.js"></script>
<script type="text/javascript" src="/static/snap.svg-min.js"></script>
<script>
window.onload=function(){
brython(1);
}
</script>
</head>
<body>
<svg width="800" height="800" viewBox="0 0 800 800" id="svgout"></svg>
<script type="text/python">
from javascript import JSConstructor
from browser import alert
from browser import window, document
# 透過 window 與 JSConstructor 從 Brython 物件 snap 擷取 Snap 物件的內容
snap = JSConstructor(window.Snap)
s = snap("#svgout")
# 畫直線
s.line(0, 0, 100, 100).attr({ 'fill': "silver", 'stroke': "black", 'strokeWidth': "1" }).attr({ 'id': 'line1' })
</script>
</body>
</html>
'''
return outstring
@a40323105_1.route('/ag1_2D')
def ag1_2D():
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>網際 2D 繪圖</title>
<!-- IE 9: display inline SVG -->
<meta http-equiv="X-UA-Compatible" content="IE=9">
<script type="text/javascript" src="http://brython.info/src/brython_dist.js"></script>
<script type="text/javascript" src="http://2015fallhw.github.io/cptocadp/static/Cango-8v03.js"></script>
<script type="text/javascript" src="http://2015fallhw.github.io/cptocadp/static/Cango2D-7v01-min.js"></script>
<script type="text/javascript" src="http://2015fallhw.github.io/cptocadp/static/gearUtils-05.js"></script>
<script>
window.onload=function(){
brython(1);
}
</script>
</head>
<body>
<canvas id='gear1' width='800' height='750'></canvas>
<script type="text/python">
# 將 導入的 document 設為 doc 主要原因在於與舊程式碼相容
from browser import document as doc
# 由於 Python3 與 Javascript 程式碼已經不再混用, 因此來自 Javascript 的變數, 必須居中透過 window 物件轉換
from browser import window
# 針對 Javascript 既有的物件, 則必須透過 JSConstructor 轉換
from javascript import JSConstructor
import math
# 主要用來取得畫布大小
canvas = doc["gear1"]
# 此程式採用 Cango Javascript 程式庫繪圖, 因此無需 ctx
#ctx = canvas.getContext("2d")
# 針對類別的轉換, 將 Cango.js 中的 Cango 物件轉為 Python cango 物件
cango = JSConstructor(window.Cango)
# 針對變數的轉換, shapeDefs 在 Cango 中資料型別為變數, 可以透過 window 轉換
shapedefs = window.shapeDefs
# 目前 Cango 結合 Animation 在 Brython 尚無法運作, 此刻只能繪製靜態圖形
# in CangoAnimation.js
#interpolate1 = window.interpolate
# Cobi 與 createGearTooth 都是 Cango Javascript 程式庫中的物件
cobj = JSConstructor(window.Cobj)
creategeartooth = JSConstructor(window.createGearTooth)
# 經由 Cango 轉換成 Brython 的 cango, 指定將圖畫在 id="plotarea" 的 canvas 上
cgo = cango("gear1")
######################################
# 畫正齒輪輪廓
#####################################
def spur(cx, cy, m, n, pa, theta):
# n 為齒數
#n = 17
# pa 為壓力角
#pa = 25
# m 為模數, 根據畫布的寬度, 計算適合的模數大小
# Module = mm of pitch diameter per tooth
#m = 0.8*canvas.width/n
# pr 為節圓半徑
pr = n*m/2 # gear Pitch radius
# generate gear
data = creategeartooth(m, n, pa)
# Brython 程式中的 print 會將資料印在 Browser 的 console 區
#print(data)
gearTooth = cobj(data, "SHAPE", {
"fillColor":"#ddd0dd",
"border": True,
"strokeColor": "#606060" })
#gearTooth.rotate(180/n) # rotate gear 1/2 tooth to mesh, 請注意 rotate 角度為 degree
# theta 為角度
gearTooth.rotate(theta)
# 單齒的齒形資料經過旋轉後, 將資料複製到 gear 物件中
gear = gearTooth.dup()
# gear 為單一齒的輪廓資料
#cgo.render(gearTooth)
# 利用單齒輪廓旋轉, 產生整個正齒輪外形
for i in range(1, n):
# 將 gearTooth 中的資料複製到 newTooth
newTooth = gearTooth.dup()
# 配合迴圈, newTooth 的齒形資料進行旋轉, 然後利用 appendPath 方法, 將資料併入 gear
newTooth.rotate(360*i/n)
# appendPath 為 Cango 程式庫中的方法, 第二個變數為 True, 表示要刪除最前頭的 Move to SVG Path 標註符號
gear.appendPath(newTooth, True) # trim move command = True
# 建立軸孔
# add axle hole, hr 為 hole radius
hr = 0.6*pr # diameter of gear shaft
shaft = cobj(shapedefs.circle(hr), "PATH")
shaft.revWinding()
gear.appendPath(shaft) # retain the 'moveTo' command for shaft sub path
gear.translate(cx, cy)
# render 繪出靜態正齒輪輪廓
cgo.render(gear)
# 接著繪製齒輪的基準線
deg = math.pi/180
Line = cobj(['M', cx, cy, 'L', cx+pr*math.cos(theta*deg), cy+pr*math.sin(theta*deg)], "PATH", {
'strokeColor':'red', 'lineWidth': 5})
cgo.render(Line)
# 3個齒輪的齒數
n1 = 17
n2 = 29
n3 = 15
# m 為模數, 根據畫布的寬度, 計算適合的模數大小
# Module = mm of pitch diameter per tooth
# 利用 80% 的畫布寬度進行繪圖
# 計算模數的對應尺寸
m = canvas.width*0.8/(n1+n2+n3)
# 根據齒數與模組計算各齒輪的節圓半徑
pr1 = n1*m/2
pr2 = n2*m/2
pr3 = n3*m/2
# 畫布左右兩側都保留畫布寬度的 10%
# 依此計算對應的最左邊齒輪的軸心座標
cx = canvas.width*0.1+pr1
cy = canvas.height/2
# pa 為壓力角
pa = 25
# 畫最左邊齒輪, 定位線旋轉角為 0, 軸心座標 (cx, cy)
spur(cx, cy, m, n1, pa, 0)
# 第2個齒輪將原始的定位線逆時鐘轉 180 度後, 與第1個齒輪正好齒頂與齒頂對齊
# 只要第2個齒輪再逆時鐘或順時鐘轉動半齒的角度, 即可完成囓合
# 每一個齒分別包括從齒根到齒頂的範圍, 涵蓋角度為 360/n, 因此所謂的半齒角度為 180/n
spur(cx+pr1+pr2, cy, m, n2, pa, 180-180/n2)
# 第2齒與第3齒的囓合, 首先假定第2齒的定位線在 theta 角為 0 的原始位置
# 如此, 第3齒只要逆時鐘旋轉 180 度後, 再逆時鐘或順時鐘轉動半齒的角度, 即可與第2齒囓合
# 但是第2齒為了與第一齒囓合時, 已經從原始定位線轉了 180-180/n2 度
# 而當第2齒從與第3齒囓合的定位線, 逆時鐘旋轉 180-180/n2 角度後, 原先囓合的第3齒必須要再配合旋轉 (180-180/n2 )*n2/n3
spur(cx+pr1+pr2+pr2+pr3, cy, m, n3, pa, 180-180/n3+(180-180/n2)*n2/n3)
</script>
</body>
</html>
'''
return outstring
@a40323105_1.route('/ag1_2D1')
def ag1_2D1():
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>網際 2D 繪圖</title>
<!-- IE 9: display inline SVG -->
<meta http-equiv="X-UA-Compatible" content="IE=9">
<script type="text/javascript" src="http://brython.info/src/brython_dist.js"></script>
<script type="text/javascript" src="http://cptocadp-2015fallhw.rhcloud.com/static/Cango-8v03.js"></script>
<script type="text/javascript" src="http://cptocadp-2015fallhw.rhcloud.com/static/Cango2D-6v13.js"></script>
<script type="text/javascript" src="http://cptocadp-2015fallhw.rhcloud.com/static/CangoAxes-1v33.js"></script>
<script>
window.onload=function(){
brython(1);
}
</script>
<canvas id="plotarea" width="800" height="800"></canvas>
<script type="text/python">
from javascript import JSConstructor
from browser import alert
from browser import window
import math
cango = JSConstructor(window.Cango)
cobj = JSConstructor(window.Cobj)
shapedefs = window.shapeDefs
obj2d = JSConstructor(window.Obj2D)
cgo = cango("plotarea")
cgo.setWorldCoords(-250, -250, 500, 500)
# 畫軸線
cgo.drawAxes(0, 240, 0, 240, {
"strokeColor":"#aaaaaa",
"fillColor": "#aaaaaa",
"xTickInterval": 20,
"xLabelInterval": 20,
"yTickInterval": 20,
"yLabelInterval": 20})
deg = math.pi/180
# 將繪製鏈條輪廓的內容寫成 class 物件
class chain():
# 輪廓的外型設為 class variable
chamber = "M -6.8397, -1.4894 A 7, 7, 0, 1, 0, 6.8397, -1.4894 A 40, 40, 0, 0, 1, 6.8397, -18.511 A 7, 7, 0, 1, 0, -6.8397, -18.511 A 40, 40, 0, 0, 1, -6.8397, -1.4894 z"
#chamber = "M 0, 0 L 0, -20 z"
cgoChamber = window.svgToCgoSVG(chamber)
def __init__(self, fillcolor="green", border=True, strokecolor= "tan", linewidth=2, scale=1):
self.fillcolor = fillcolor
self.border = border
self.strokecolor = strokecolor
self.linewidth = linewidth
self.scale = scale
# 利用鏈條起點與終點定義繪圖
def basic(self, x1, y1, x2, y2):
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
# 注意, cgo.Chamber 為成員變數
cmbr = cobj(self.cgoChamber, "SHAPE", {
"fillColor": self.fillcolor,
"border": self.border,
"strokeColor": self.strokecolor,
"lineWidth": self.linewidth })
# hole 為原點位置
hole = cobj(shapedefs.circle(4*self.scale), "PATH")
cmbr.appendPath(hole)
# 複製 cmbr, 然後命名為 basic1
basic1 = cmbr.dup()
# 因為鏈條的角度由原點向下垂直, 所以必須轉 90 度, 再考量 atan2 的轉角
basic1.rotate(math.atan2(y2-y1, x2-x1)/deg+90)
# 放大 scale 倍
cgo.render(basic1, x1, y1, self.scale, 0)
# 利用鏈條起點與旋轉角度定義繪圖, 使用內定的 color, border 與 linewidth 變數
def basic_rot(self, x1, y1, rot, v=False):
# 若 v 為 True 則為虛擬 chain, 不 render
self.x1 = x1
self.y1 = y1
self.rot = rot
self.v = v
# 注意, cgoChamber 為成員變數
cmbr = cobj(self.cgoChamber, "SHAPE", {
"fillColor": self.fillcolor,
"border": self.border,
"strokeColor": self.strokecolor,
"lineWidth": self.linewidth })
# hole0 為原點位置
hole = cobj(shapedefs.circle(4*self.scale), "PATH")
cmbr.appendPath(hole)
# 根據旋轉角度, 計算 x2 與 y2
x2 = x1 + 20*math.cos(rot*deg)*self.scale
y2 = y1 + 20*math.sin(rot*deg)*self.scale
# 複製 cmbr, 然後命名為 basic1
basic1 = cmbr.dup()
# 因為鏈條的角度由原點向下垂直, 所以必須轉 90 度, 再考量 atan2 的轉角
basic1.rotate(rot+90)
# 放大 scale 倍
if v == False:
cgo.render(basic1, x1, y1, self.scale, 0)
return x2, y2
mychain = chain()
x1, y1 = mychain.basic_rot(-133.06,49.48, 20.78)
x2, y2=mychain.basic_rot(x1, y1,0.7800000000000011, True)
x3, y3=mychain.basic_rot(x2, y2,-19.22, True)
x4, y4=mychain.basic_rot(x3, y3,-39.22, True)
x5, y5=mychain.basic_rot(x4, y4,-59.22, True)
x6, y6=mychain.basic_rot(x5, y5,-79.22, True)
x7, y7=mychain.basic_rot(x6, y6,-99.22, True)
x8, y8=mychain.basic_rot(x7, y7,-119.22, True)
x9, y9=mychain.basic_rot(x8, y8,-139.22, True)
x10, y10=mychain.basic_rot(x9, y9,-159.22, True)
x11, y11=mychain.basic_rot(x10, y10,-179.22, True)
x12, y12=mychain.basic_rot(x11, y11,-199.22)
x13, y13=mychain.basic_rot(x12, y12,-219.22)
x14, y14=mychain.basic_rot(x13, y13,-239.22)
x15, y15=mychain.basic_rot(x14, y14,-259.22)
x16, y16=mychain.basic_rot(x15, y15,-279.22)
x17, y17=mychain.basic_rot(x16, y16,-299.22)
x18, y18=mychain.basic_rot(x17, y17,-319.22)
#mychain = chain()
p1, k1 = mychain.basic_rot(82.11,93.98, 4.78)
p2, k2=mychain.basic_rot(p1, k1,-7.219999999999999)
p3, k3=mychain.basic_rot(p2, k2,-19.22)
p4, k4=mychain.basic_rot(p3, k3,-31.22)
p5, k5=mychain.basic_rot(p4, k4,-43.22)
p6, k6=mychain.basic_rot(p5, k5,-55.22)
p7, k7=mychain.basic_rot(p6, k6,-67.22)
p8, k8=mychain.basic_rot(p7, k7,-79.22)
p9, k9=mychain.basic_rot(p8, k8,-91.22)
p10, k10=mychain.basic_rot(p9, k9,-103.22)
p11, k11=mychain.basic_rot(p10, k10,-115.22)
p12, k12=mychain.basic_rot(p11, k11,-127.22)
p13, k13=mychain.basic_rot(p12, k12,-139.22)
p14, k14=mychain.basic_rot(p13, k13,-151.22)
p15, k15=mychain.basic_rot(p14, k14,-163.22)
p16, k16=mychain.basic_rot(p15, k15,-175.22)
p17, k17=mychain.basic_rot(p16, k16,-187.22)
p18, k18=mychain.basic_rot(p17, k17,-199.22, True)
p19, k19=mychain.basic_rot(p18, k18,-211.22, True)
p20, k20=mychain.basic_rot(p19, k19,-223.22, True)
p21, k21=mychain.basic_rot(p20, k20,-235.22, True)
p22, k22=mychain.basic_rot(p21, k21,-247.22, True)
p23, k23=mychain.basic_rot(p22, k22,-259.22, True)
p24, k24=mychain.basic_rot(p23, k23,-271.22, True)
p25, k25=mychain.basic_rot(p24, k24,-283.22, True)
p26, k26=mychain.basic_rot(p25, k25,-295.22, True)
p27, k27=mychain.basic_rot(p26, k26,-307.22, True)
p28, k28=mychain.basic_rot(p27, k27,-319.22, True)
p29, k29=mychain.basic_rot(p28, k28,-331.22, True)
p30, k30=mychain.basic_rot(p29, k29,-343.22, True)
m1, n1 = mychain.basic_rot(x1, y1, 10.78)
m2, n2=mychain.basic_rot(m1, n1, 10.78)
m3, n3=mychain.basic_rot(m2, n2, 10.78)
m4, n4=mychain.basic_rot(m3, n3, 10.78)
m5, n5=mychain.basic_rot(m4, n4, 10.78)
m6, n6=mychain.basic_rot(m5, n5, 10.78)
m7, n7=mychain.basic_rot(m6, n6, 10.78)
m8, n8=mychain.basic_rot(m7, n7, 10.78)
m9, n9=mychain.basic_rot(m8, n8, 10.78)
m10, n10=mychain.basic_rot(m9, n9, 10.78)
r1, s1 = mychain.basic_rot(x11, y11, -10.78)
r2, s2=mychain.basic_rot(r1, s1, -10.78)
r3, s3=mychain.basic_rot(r2, s2, -10.78)
r4, s4=mychain.basic_rot(r3, s3, -10.78)
r5, s5=mychain.basic_rot(r4, s4, -10.78)
r6, s6=mychain.basic_rot(r5, s5, -10.78)
r7, s7=mychain.basic_rot(r6, s6, -10.78)
r8, s8=mychain.basic_rot(r7, s7, -10.78)
r9, s9=mychain.basic_rot(r8, s8, -10.78)
r10, s10=mychain.basic_rot(r9, s9, -10.78)
</script>
</body>
</html>
'''
return outstring
| agpl-3.0 |
heeraj123/oh-mainline | vendor/packages/Django/tests/regressiontests/comment_tests/tests/model_tests.py | 126 | 2200 | from __future__ import absolute_import
from django.contrib.comments.models import Comment
from . import CommentTestCase
from ..models import Author, Article
class CommentModelTests(CommentTestCase):
def testSave(self):
for c in self.createSomeComments():
self.assertNotEqual(c.submit_date, None)
def testUserProperties(self):
c1, c2, c3, c4 = self.createSomeComments()
self.assertEqual(c1.name, "Joe Somebody")
self.assertEqual(c2.email, "jsomebody@example.com")
self.assertEqual(c3.name, "Frank Nobody")
self.assertEqual(c3.url, "http://example.com/~frank/")
self.assertEqual(c1.user, None)
self.assertEqual(c3.user, c4.user)
class CommentManagerTests(CommentTestCase):
def testInModeration(self):
"""Comments that aren't public are considered in moderation"""
c1, c2, c3, c4 = self.createSomeComments()
c1.is_public = False
c2.is_public = False
c1.save()
c2.save()
moderated_comments = list(Comment.objects.in_moderation().order_by("id"))
self.assertEqual(moderated_comments, [c1, c2])
def testRemovedCommentsNotInModeration(self):
"""Removed comments are not considered in moderation"""
c1, c2, c3, c4 = self.createSomeComments()
c1.is_public = False
c2.is_public = False
c2.is_removed = True
c1.save()
c2.save()
moderated_comments = list(Comment.objects.in_moderation())
self.assertEqual(moderated_comments, [c1])
def testForModel(self):
c1, c2, c3, c4 = self.createSomeComments()
article_comments = list(Comment.objects.for_model(Article).order_by("id"))
author_comments = list(Comment.objects.for_model(Author.objects.get(pk=1)))
self.assertEqual(article_comments, [c1, c3])
self.assertEqual(author_comments, [c2])
def testPrefetchRelated(self):
c1, c2, c3, c4 = self.createSomeComments()
# one for comments, one for Articles, one for Author
with self.assertNumQueries(3):
qs = Comment.objects.prefetch_related('content_object')
[c.content_object for c in qs]
| agpl-3.0 |
MadeiraCloud/opsagent | libs/requests/packages/urllib3/packages/ordered_dict.py | 1093 | 8936 | # Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
# Copyright 2009 Raymond Hettinger, released under the MIT License.
# http://code.activestate.com/recipes/576693/
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
| apache-2.0 |
rhelmer/socorro-lib | socorro/unittest/external/postgresql/test_backfill.py | 1 | 12534 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from .unittestbase import PostgreSQLTestCase
from nose.plugins.attrib import attr
from nose.tools import eq_, assert_raises
import datetime
from socorro.external.postgresql.backfill import Backfill
from socorro.external.postgresql import staticdata, fakedata
from socorro.external import MissingArgumentError
from socorro.lib import datetimeutil
#==============================================================================
@attr(integration='postgres')
class TestBackfill(PostgreSQLTestCase):
"""Tests the calling of all backfill functions"""
#--------------------------------------------------------------------------
def setUp(self):
""" Populate tables with fake data """
super(TestBackfill, self).setUp()
cursor = self.connection.cursor()
self.tables = []
for table in staticdata.tables + fakedata.tables:
# staticdata has no concept of duration
if table.__module__ == 'socorro.external.postgresql.staticdata':
table = table()
else:
table = table(days=1)
table.releases = {
'WaterWolf': {
'channels': {
'Nightly': {
'versions': [{
'number': '18.0',
'probability': 0.5,
'buildid': '%s000020'
}],
'adu': '10',
'repository': 'nightly',
'throttle': '1',
'update_channel': 'nightly',
},
},
'crashes_per_hour': '5',
'guid': '{waterwolf@example.com}'
},
'B2G': {
'channels': {
'Nightly': {
'versions': [{
'number': '18.0',
'probability': 0.5,
'buildid': '%s000020'
}],
'adu': '10',
'repository': 'nightly',
'throttle': '1',
'update_channel': 'nightly',
},
},
'crashes_per_hour': '5',
'guid': '{waterwolf@example.com}'
}
}
table_name = table.table
table_columns = table.columns
values = str(tuple(["%(" + i + ")s" for i in table_columns]))
columns = str(tuple(table_columns))
self.tables.append(table_name)
# TODO: backfill_reports_clean() sometimes tries to insert a
# os_version_id that already exists
if table_name is not "os_versions":
for rows in table.generate_rows():
data = dict(zip(table_columns, rows))
query = "INSERT INTO %(table)s " % {'table': table_name}
query = query + columns.replace("'", "").replace(",)", ")")
query = query + " VALUES "
query = query + values.replace(",)", ")").replace("'", "")
cursor.execute(query, data)
self.connection.commit()
#--------------------------------------------------------------------------
def tearDown(self):
""" Cleanup the database, delete tables and functions """
cursor = self.connection.cursor()
tables = str(self.tables).replace("[", "").replace("]", "")
cursor.execute("TRUNCATE " + tables.replace("'", "") + " CASCADE;")
self.connection.commit()
self.connection.close()
super(TestBackfill, self).tearDown()
#--------------------------------------------------------------------------
def setup_data(self):
self.now = datetimeutil.utc_now()
now = self.now.date()
yesterday = now - datetime.timedelta(days=1)
lastweek = now - datetime.timedelta(days=7)
now_str = datetimeutil.date_to_string(now)
yesterday_str = datetimeutil.date_to_string(yesterday)
lastweek_str = datetimeutil.date_to_string(lastweek)
self.test_source_data = {
# Test backfill_adu
'adu': {
'params': {
"update_day": yesterday_str,
},
'res_expected': [(True,)],
},
# Test backfill_all_dups
'all_dups': {
'params': {
"start_date": yesterday_str,
"end_date": now_str,
},
'res_expected': [(True,)],
},
# Test backfill_build_adu
'build_adu': {
'params': {
"update_day": yesterday_str,
},
'res_expected': [(True,)],
},
# Test backfill_correlations
'correlations': {
'params': {
"update_day": yesterday_str,
},
'res_expected': [(True,)],
},
# Test backfill_crashes_by_user_build
'crashes_by_user_build': {
'params': {
"update_day": yesterday_str,
},
'res_expected': [(True,)],
},
# Test backfill_crashes_by_user
'crashes_by_user': {
'params': {
"update_day": yesterday_str,
},
'res_expected': [(True,)],
},
# TODO: Test backfill_daily_crashes tries to insert into a table
# that do not exists. It can be fixed by creating a temporary one.
#'daily_crashes': {
# 'params': {
# "update_day": now_str,
# },
# 'res_expected': [(True,)],
# },
# Test backfill_exploitability
'exploitability': {
'params': {
"update_day": yesterday_str,
},
'res_expected': [(True,)],
},
# Test backfill_explosiveness
'explosiveness': {
'params': {
"update_day": yesterday_str,
},
'res_expected': [(True,)],
},
# Test backfill_home_page_graph_build
'home_page_graph_build': {
'params': {
"update_day": yesterday_str,
},
'res_expected': [(True,)],
},
# Test backfill_home_page_graph
'home_page_graph': {
'params': {
"update_day": yesterday_str,
},
'res_expected': [(True,)],
},
# Test backfill_matviews
'matviews': {
'params': {
"start_date": yesterday_str,
"reports_clean": 'false',
},
'res_expected': [(True,)],
},
# Test backfill_nightly_builds
'nightly_builds': {
'params': {
"update_day": yesterday_str,
},
'res_expected': [(True,)],
},
# Test backfill_rank_compare
'rank_compare': {
'params': {
"update_day": yesterday_str,
},
'res_expected': [(True,)],
},
# Test backfill_reports_clean
'reports_clean': {
'params': {
"start_date": yesterday_str,
"end_date": now_str,
},
'res_expected': [(True,)],
},
# TODO: Test backfill_reports_duplicates tries to insert into a
# table that do not exists. It can be fixed by using the update
# function inside of the backfill.
#'reports_duplicates': {
# 'params': {
# "start_date": yesterday_str,
# "end_date": now_str,
# },
# 'res_expected': [(True,)],
# },
# TODO: Test backfill_signature_counts tries to insert into
# tables and to update functions that does not exist.
#'signature_counts': {
# 'params': {
# "start_date": yesterday_str,
# "end_date": now_str,
# },
# 'res_expected': [(True,)],
# },
# Test backfill_tcbs_build
'tcbs_build': {
'params': {
"update_day": yesterday_str,
},
'res_expected': [(True,)],
},
# Test backfill_tcbs
'tcbs': {
'params': {
"update_day": yesterday_str,
},
'res_expected': [(True,)],
},
# Test backfill_weekly_report_partitions
'weekly_report_partitions': {
'params': {
"start_date": lastweek_str,
"end_date": now_str,
"table_name": 'raw_crashes',
},
'res_expected': [(True,)],
},
# TODO: Update Backfill to support signature_summary backfill
# through the API
#'signature_summary_products': {
# 'params': {
# "update_day": yesterday_str,
# },
# 'res_expected': [(True,)],
#},
#'signature_summary_installations': {
# 'params': {
# "update_day": yesterday_str,
# },
# 'res_expected': [(True,)],
#},
#'signature_summary_uptime': {
# 'params': {
# "update_day": yesterday_str,
# },
# 'res_expected': [(True,)],
#},
#'signature_summary_os': {
# 'params': {
# "update_day": yesterday_str,
# },
# 'res_expected': [(True,)],
#},
#'signature_summary_process_type': {
# 'params': {
# "update_day": yesterday_str,
# },
# 'res_expected': [(True,)],
#},
#'signature_summary_architecture': {
# 'params': {
# "update_day": yesterday_str,
# },
# 'res_expected': [(True,)],
#},
#'signature_summary_flash_version': {
# 'params': {
# "update_day": yesterday_str,
# },
# 'res_expected': [(True,)],
#},
#'signature_summary_device': {
# 'params': {
# "update_day": yesterday_str,
# },
# 'res_expected': [(True,)],
#},
#'signature_summary_graphics': {
# 'params': {
# "update_day": yesterday_str,
# },
# 'res_expected': [(True,)],
#},
}
#--------------------------------------------------------------------------
def test_get(self):
backfill = Backfill(config=self.config)
#......................................................................
# Test raise error if kind of backfill is not passed
params = {"backfill_type": ''}
assert_raises(MissingArgumentError, backfill.get, **params)
#......................................................................
# Test all the backfill functions
self.setup_data()
for test, data in self.test_source_data.items():
data['params']['backfill_type'] = str(test)
res = backfill.get(**data['params'])
eq_(res[0], data['res_expected'][0])
| mpl-2.0 |
caphrim007/ansible | lib/ansible/plugins/filter/urlsplit.py | 146 | 1136 | # Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
from ansible.errors import AnsibleFilterError
from ansible.module_utils.six.moves.urllib.parse import urlsplit
from ansible.utils import helpers
def split_url(value, query='', alias='urlsplit'):
results = helpers.object_to_dict(urlsplit(value), exclude=['count', 'index', 'geturl', 'encode'])
# If a query is supplied, make sure it's valid then return the results.
# If no option is supplied, return the entire dictionary.
if query:
if query not in results:
raise AnsibleFilterError(alias + ': unknown URL component: %s' % query)
return results[query]
else:
return results
# ---- Ansible filters ----
class FilterModule(object):
''' URI filter '''
def filters(self):
return {
'urlsplit': split_url
}
| gpl-3.0 |
HeathKang/flasky | migrations/versions/38c4e85512a9_initial_migration.py | 182 | 1163 | """initial migration
Revision ID: 38c4e85512a9
Revises: None
Create Date: 2013-12-27 01:23:59.392801
"""
# revision identifiers, used by Alembic.
revision = '38c4e85512a9'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('roles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=64), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['role_id'], ['roles.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index('ix_users_username', 'users', ['username'], unique=True)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_users_username', 'users')
op.drop_table('users')
op.drop_table('roles')
### end Alembic commands ###
| mit |
caphrim007/ansible-modules-core | cloud/rackspace/rax_cdb_database.py | 41 | 4837 | #!/usr/bin/python -tt
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
module: rax_cdb_database
short_description: 'create / delete a database in the Cloud Databases'
description:
- create / delete a database in the Cloud Databases.
version_added: "1.8"
options:
cdb_id:
description:
- The databases server UUID
default: null
name:
description:
- Name to give to the database
default: null
character_set:
description:
- Set of symbols and encodings
default: 'utf8'
collate:
description:
- Set of rules for comparing characters in a character set
default: 'utf8_general_ci'
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
author: Simon JAILLET
extends_documentation_fragment: rackspace
'''
EXAMPLES = '''
- name: Build a database in Cloud Databases
tasks:
- name: Database build request
local_action:
module: rax_cdb_database
credentials: ~/.raxpub
region: IAD
cdb_id: 323e7ce0-9cb0-11e3-a5e2-0800200c9a66
name: db1
state: present
register: rax_db_database
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def find_database(instance, name):
try:
database = instance.get_database(name)
except Exception:
return False
return database
def save_database(module, cdb_id, name, character_set, collate):
cdb = pyrax.cloud_databases
try:
instance = cdb.get(cdb_id)
except Exception, e:
module.fail_json(msg='%s' % e.message)
changed = False
database = find_database(instance, name)
if not database:
try:
database = instance.create_database(name=name,
character_set=character_set,
collate=collate)
except Exception, e:
module.fail_json(msg='%s' % e.message)
else:
changed = True
module.exit_json(changed=changed, action='create',
database=rax_to_dict(database))
def delete_database(module, cdb_id, name):
cdb = pyrax.cloud_databases
try:
instance = cdb.get(cdb_id)
except Exception, e:
module.fail_json(msg='%s' % e.message)
changed = False
database = find_database(instance, name)
if database:
try:
database.delete()
except Exception, e:
module.fail_json(msg='%s' % e.message)
else:
changed = True
module.exit_json(changed=changed, action='delete',
database=rax_to_dict(database))
def rax_cdb_database(module, state, cdb_id, name, character_set, collate):
# act on the state
if state == 'present':
save_database(module, cdb_id, name, character_set, collate)
elif state == 'absent':
delete_database(module, cdb_id, name)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
cdb_id=dict(type='str', required=True),
name=dict(type='str', required=True),
character_set=dict(type='str', default='utf8'),
collate=dict(type='str', default='utf8_general_ci'),
state=dict(default='present', choices=['present', 'absent'])
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
cdb_id = module.params.get('cdb_id')
name = module.params.get('name')
character_set = module.params.get('character_set')
collate = module.params.get('collate')
state = module.params.get('state')
setup_rax_module(module, pyrax)
rax_cdb_database(module, state, cdb_id, name, character_set, collate)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
# invoke the module
main()
| gpl-3.0 |
PatrickChrist/scikit-learn | sklearn/feature_extraction/text.py | 110 | 50157 | # -*- coding: utf-8 -*-
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Robert Layton <robertlayton@gmail.com>
# Jochen Wersdörfer <jochen@wersdoerfer.de>
# Roman Sinayev <roman.sinayev@gmail.com>
#
# License: BSD 3 clause
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
from __future__ import unicode_literals
import array
from collections import Mapping, defaultdict
import numbers
from operator import itemgetter
import re
import unicodedata
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..preprocessing import normalize
from .hashing import FeatureHasher
from .stop_words import ENGLISH_STOP_WORDS
from ..utils import deprecated
from ..utils.fixes import frombuffer_empty, bincount
from ..utils.validation import check_is_fitted
__all__ = ['CountVectorizer',
'ENGLISH_STOP_WORDS',
'TfidfTransformer',
'TfidfVectorizer',
'strip_accents_ascii',
'strip_accents_unicode',
'strip_tags']
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
See also
--------
strip_accents_ascii
Remove accentuated char for any unicode symbol that has a direct
ASCII equivalent.
"""
return ''.join([c for c in unicodedata.normalize('NFKD', s)
if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
See also
--------
strip_accents_unicode
Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize('NFKD', s)
return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
elif stop is None:
return None
else: # assume it's a collection
return frozenset(stop)
class VectorizerMixin(object):
"""Provides common code for text vectorizers (tokenization logic)."""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols
The decoding strategy depends on the vectorizer parameters.
"""
if self.input == 'filename':
with open(doc, 'rb') as fh:
doc = fh.read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError("np.nan is an invalid document, expected byte or "
"unicode string.")
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
tokens = []
n_original_tokens = len(original_tokens)
for n in xrange(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in xrange(n_original_tokens - n + 1):
tokens.append(" ".join(original_tokens[i: i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
ngrams = []
min_n, max_n = self.ngram_range
for n in xrange(min_n, min(max_n + 1, text_len + 1)):
for i in xrange(text_len - n + 1):
ngrams.append(text_document[i: i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
excluding any whitespace (operating only inside word boundaries)"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
for w in text_document.split():
w = ' ' + w + ' '
w_len = len(w)
for n in xrange(min_n, max_n + 1):
offset = 0
ngrams.append(w[offset:offset + n])
while offset + n < w_len:
offset += 1
ngrams.append(w[offset:offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the cost of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
if self.lowercase:
return lambda x: strip_accents(x.lower())
else:
return strip_accents
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return lambda doc: token_pattern.findall(doc)
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
if callable(self.analyzer):
return self.analyzer
preprocess = self.build_preprocessor()
if self.analyzer == 'char':
return lambda doc: self._char_ngrams(preprocess(self.decode(doc)))
elif self.analyzer == 'char_wb':
return lambda doc: self._char_wb_ngrams(
preprocess(self.decode(doc)))
elif self.analyzer == 'word':
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
return lambda doc: self._word_ngrams(
tokenize(preprocess(self.decode(doc))), stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme/analyzer' %
self.analyzer)
def _validate_vocabulary(self):
vocabulary = self.vocabulary
if vocabulary is not None:
if not isinstance(vocabulary, Mapping):
vocab = {}
for i, t in enumerate(vocabulary):
if vocab.setdefault(t, i) != i:
msg = "Duplicate term in vocabulary: %r" % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(six.itervalues(vocabulary))
if len(indices) != len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in xrange(len(vocabulary)):
if i not in indices:
msg = ("Vocabulary of size %d doesn't contain index "
"%d." % (len(vocabulary), i))
raise ValueError(msg)
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
def _check_vocabulary(self):
"""Check if vocabulary is empty or missing (not fit-ed)"""
msg = "%(name)s - Vocabulary wasn't fitted."
check_is_fitted(self, 'vocabulary_', msg=msg),
if len(self.vocabulary_) == 0:
raise ValueError("Vocabulary is empty")
@property
@deprecated("The `fixed_vocabulary` attribute is deprecated and will be "
"removed in 0.18. Please use `fixed_vocabulary_` instead.")
def fixed_vocabulary(self):
return self.fixed_vocabulary_
class HashingVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token occurrences
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
lowercase : boolean, default=True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
n_features : integer, default=(2 ** 20)
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
binary: boolean, default=False.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype: type, optional
Type of the matrix returned by fit_transform() or transform().
non_negative : boolean, default=False
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
See also
--------
CountVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20),
binary=False, norm='l2', non_negative=False,
dtype=np.float64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.non_negative = non_negative
self.dtype = dtype
def partial_fit(self, X, y=None):
"""Does nothing: this transformer is stateless.
This method is just there to mark the fact that this transformer
can work in a streaming setup.
"""
return self
def fit(self, X, y=None):
"""Does nothing: this transformer is stateless."""
# triggers a parameter validation
self._get_hasher().fit(X, y=y)
return self
def transform(self, X, y=None):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Document-term matrix.
"""
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
# Alias transform to fit_transform for convenience
fit_transform = transform
def _get_hasher(self):
return FeatureHasher(n_features=self.n_features,
input_type='string', dtype=self.dtype,
non_negative=self.non_negative)
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.isspmatrix_csr(X):
return bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(sp.csc_matrix(X, copy=False).indptr)
class CountVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token counts
This implementation produces a sparse representation of the counts using
scipy.sparse.coo_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
Only applies if ``analyzer == 'word'``.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, True by default
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp select tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents. Indices
in the mapping should not be repeated and should not have any gap
between 0 and the largest index.
binary : boolean, default=False
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
HashingVectorizer, TfidfVectorizer
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word',
max_df=1.0, min_df=1, max_features=None,
vocabulary=None, binary=False, dtype=np.int64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df of min_df")
self.max_features = max_features
if max_features is not None:
if (not isinstance(max_features, numbers.Integral) or
max_features <= 0):
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
def _sort_features(self, X, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(six.iteritems(vocabulary))
map_index = np.empty(len(sorted_features), dtype=np.int32)
for new_val, (term, old_val) in enumerate(sorted_features):
map_index[new_val] = old_val
vocabulary[term] = new_val
return X[:, map_index]
def _limit_features(self, X, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(X)
tfs = np.asarray(X.sum(axis=0)).ravel()
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return X[:, kept_indices], removed_terms
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = _make_int_array()
indptr = _make_int_array()
indptr.append(0)
for doc in raw_documents:
for feature in analyze(doc):
try:
j_indices.append(vocabulary[feature])
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError("empty vocabulary; perhaps the documents only"
" contain stop words")
j_indices = frombuffer_empty(j_indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = np.ones(len(j_indices))
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sum_duplicates()
return vocabulary, X
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : array, [n_samples, n_features]
Document-term matrix.
"""
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
self._validate_vocabulary()
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
vocabulary, X = self._count_vocab(raw_documents,
self.fixed_vocabulary_)
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary_:
X = self._sort_features(X, vocabulary)
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
X, self.stop_words_ = self._limit_features(X, vocabulary,
max_doc_count,
min_doc_count,
max_features)
self.vocabulary_ = vocabulary
return X
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Document-term matrix.
"""
if not hasattr(self, 'vocabulary_'):
self._validate_vocabulary()
self._check_vocabulary()
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
X_inv : list of arrays, len = n_samples
List of arrays of terms.
"""
self._check_vocabulary()
if sp.issparse(X):
# We need CSR format for fast row manipulations.
X = X.tocsr()
else:
# We need to convert X to a matrix, so that the indexing
# returns 2D objects
X = np.asmatrix(X)
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)]
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
self._check_vocabulary()
return [t for t, i in sorted(six.iteritems(self.vocabulary_),
key=itemgetter(1))]
def _make_int_array():
"""Construct an array.array of a type suitable for scipy.sparse indices."""
return array.array(str("i"))
class TfidfTransformer(BaseEstimator, TransformerMixin):
"""Transform a count matrix to a normalized tf or tf-idf representation
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
The actual formula used for tf-idf is tf * (idf + 1) = tf + tf * idf,
instead of tf * idf. The effect of this is that terms with zero idf, i.e.
that occur in all documents of a training set, will not be entirely
ignored. The formulas used to compute tf and idf depend on parameter
settings that correspond to the SMART notation used in IR, as follows:
Tf is "n" (natural) by default, "l" (logarithmic) when sublinear_tf=True.
Idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when norm='l2', "n" (none) when norm=None.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
References
----------
.. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68-74.`
.. [MRS2008] `C.D. Manning, P. Raghavan and H. Schuetze (2008).
Introduction to Information Retrieval. Cambridge University
Press, pp. 118-120.`
"""
def __init__(self, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
def fit(self, X, y=None):
"""Learn the idf vector (global term weights)
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
"""
if not sp.issparse(X):
X = sp.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
idf = np.log(float(n_samples) / df) + 1.0
self._idf_diag = sp.spdiags(idf,
diags=0, m=n_features, n=n_features)
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
vectors : sparse matrix, [n_samples, n_features]
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.csr_matrix(X, dtype=np.float64, copy=copy)
n_samples, n_features = X.shape
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
check_is_fitted(self, '_idf_diag', 'idf vector is not fitted')
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
# *= doesn't work
X = X * self._idf_diag
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
if hasattr(self, "_idf_diag"):
return np.ravel(self._idf_diag.sum(axis=0))
else:
return None
class TfidfVectorizer(CountVectorizer):
"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to CountVectorizer followed by TfidfTransformer.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char'} or callable
Whether the feature should be made of word or character n-grams.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : boolean, default=False
If True, all non-zero term counts are set to 1. This does not mean
outputs will have only 0/1 values, only that the tf term in tf-idf
is binary. (Set idf and normalization to False to get 0/1 outputs.)
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
idf_ : array, shape = [n_features], or None
The learned idf vector (global term weights)
when ``use_idf`` is set to True, None otherwise.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
CountVectorizer
Tokenize the documents and count the occurrences of token and return
them as a sparse matrix
TfidfTransformer
Apply Term Frequency Inverse Document Frequency normalization to a
sparse matrix of occurrence counts.
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None, lowercase=True,
preprocessor=None, tokenizer=None, analyzer='word',
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), max_df=1.0, min_df=1,
max_features=None, vocabulary=None, binary=False,
dtype=np.int64, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
super(TfidfVectorizer, self).__init__(
input=input, encoding=encoding, decode_error=decode_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern,
ngram_range=ngram_range, max_df=max_df, min_df=min_df,
max_features=max_features, vocabulary=vocabulary, binary=binary,
dtype=dtype)
self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._tfidf.norm
@norm.setter
def norm(self, value):
self._tfidf.norm = value
@property
def use_idf(self):
return self._tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self._tfidf.use_idf = value
@property
def smooth_idf(self):
return self._tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self._tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self._tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self._tfidf.sublinear_tf = value
@property
def idf_(self):
return self._tfidf.idf_
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
self : TfidfVectorizer
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents, copy=True):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
check_is_fitted(self, '_tfidf', 'The tfidf vector is not fitted')
X = super(TfidfVectorizer, self).transform(raw_documents)
return self._tfidf.transform(X, copy=False)
| bsd-3-clause |
Aaron0927/xen-4.2.1 | tools/python/xen/xend/XendProtocol.py | 49 | 7170 | #============================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2004, 2005 Mike Wray <mike.wray@hp.com>
# Copyright (C) 2005 XenSource Ltd.
#============================================================================
import socket
import httplib
import time
import types
from encode import *
from xen.xend import sxp
from xen.xend import XendOptions
DEBUG = 0
HTTP_OK = 200
HTTP_CREATED = 201
HTTP_ACCEPTED = 202
HTTP_NO_CONTENT = 204
xoptions = XendOptions.instance()
class XendError(RuntimeError):
"""Error class for 'expected errors' when talking to xend.
"""
pass
class XendRequest:
"""A request to xend.
"""
def __init__(self, url, method, args):
"""Create a request. Sets up the headers, argument data, and the
url.
@param url: the url to request
@param method: request method, GET or POST
@param args: dict containing request args, if any
"""
if url.proto != 'http':
raise ValueError('Invalid protocol: ' + url.proto)
(hdr, data) = encode_data(args)
if args and method == 'GET':
url.query = data
data = None
if method == "POST" and url.path.endswith('/'):
url.path = url.path[:-1]
self.headers = hdr
self.data = data
self.url = url
self.method = method
class XendClientProtocol:
"""Abstract class for xend clients.
"""
def xendRequest(self, url, method, args=None):
"""Make a request to xend.
Implement in a subclass.
@param url: xend request url
@param method: http method: POST or GET
@param args: request arguments (dict)
"""
raise NotImplementedError()
def xendGet(self, url, args=None):
"""Make a xend request using HTTP GET.
Requests using GET are usually 'safe' and may be repeated without
nasty side-effects.
@param url: xend request url
@param data: request arguments (dict)
"""
return self.xendRequest(url, "GET", args)
def xendPost(self, url, args):
"""Make a xend request using HTTP POST.
Requests using POST potentially cause side-effects, and should
not be repeated unless you really want to repeat the side
effect.
@param url: xend request url
@param args: request arguments (dict)
"""
return self.xendRequest(url, "POST", args)
def handleStatus(self, _, status, message):
"""Handle the status returned from the request.
"""
status = int(status)
if status in [ HTTP_NO_CONTENT ]:
return None
if status not in [ HTTP_OK, HTTP_CREATED, HTTP_ACCEPTED ]:
return self.handleException(XendError(message))
return 'ok'
def handleResponse(self, data):
"""Handle the data returned in response to the request.
"""
if data is None: return None
typ = self.getHeader('Content-Type')
if typ != sxp.mime_type:
return data
try:
pin = sxp.Parser()
pin.input(data);
pin.input_eof()
val = pin.get_val()
except sxp.ParseError, err:
return self.handleException(err)
if isinstance(val, types.ListType) and sxp.name(val) == 'xend.err':
err = XendError(val[1])
return self.handleException(err)
return val
def handleException(self, err):
"""Handle an exception during the request.
May be overridden in a subclass.
"""
raise err
def getHeader(self, key):
"""Get a header from the response.
Case is ignored in the key.
@param key: header key
@return: header
"""
raise NotImplementedError()
class HttpXendClientProtocol(XendClientProtocol):
"""A synchronous xend client. This will make a request, wait for
the reply and return the result.
"""
resp = None
request = None
def makeConnection(self, url):
return httplib.HTTPConnection(url.location())
def makeRequest(self, url, method, args):
return XendRequest(url, method, args)
def xendRequest(self, url, method, args=None):
"""Make a request to xend.
@param url: xend request url
@param method: http method: POST or GET
@param args: request arguments (dict)
"""
retries = 0
while retries < 2:
self.request = self.makeRequest(url, method, args)
conn = self.makeConnection(url)
try:
if DEBUG: conn.set_debuglevel(1)
conn.request(method, url.fullpath(), self.request.data,
self.request.headers)
try:
resp = conn.getresponse()
self.resp = resp
val = self.handleStatus(resp.version, resp.status,
resp.reason)
if val is None:
data = None
else:
data = resp.read()
val = self.handleResponse(data)
return val
except httplib.BadStatusLine:
retries += 1
time.sleep(5)
finally:
conn.close()
raise XendError("Received invalid response from Xend, twice.")
def getHeader(self, key):
return self.resp.getheader(key)
class UnixConnection(httplib.HTTPConnection):
"""Subclass of Python library HTTPConnection that uses a unix-domain socket.
"""
def __init__(self, path):
httplib.HTTPConnection.__init__(self, 'localhost')
self.path = path
def connect(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(self.path)
self.sock = sock
class UnixXendClientProtocol(HttpXendClientProtocol):
"""A synchronous xend client using a unix-domain socket.
"""
def __init__(self, path=None):
if path is None:
path = xoptions.get_xend_unix_path()
self.path = path
def makeConnection(self, _):
return UnixConnection(self.path)
| gpl-2.0 |
kylewray/nova | python/nova/nova_pomdp.py | 1 | 6240 | """ The MIT License (MIT)
Copyright (c) 2015 Kyle Hollins Wray, University of Massachusetts
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import ctypes as ct
import platform
import os.path
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__))))
import nova_pomdp_alpha_vectors as npav
import pomdp_alpha_vectors as pav
# Check if we need to create the nova variable. If so, import the correct library
# file depending on the platform.
#try:
# _nova
#except NameError:
_nova = None
if platform.system() == "Windows":
_nova = ct.CDLL(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"..", "..", "lib", "libnova.dll"))
else:
_nova = ct.CDLL(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"..", "..", "lib", "libnova.so"))
class NovaPOMDP(ct.Structure):
""" The C struct POMDP object. """
_fields_ = [("n", ct.c_uint),
("ns", ct.c_uint),
("m", ct.c_uint),
("z", ct.c_uint),
("r", ct.c_uint),
("rz", ct.c_uint),
("gamma", ct.c_float),
("horizon", ct.c_uint),
("S", ct.POINTER(ct.c_int)),
("T", ct.POINTER(ct.c_float)),
("O", ct.POINTER(ct.c_float)),
("R", ct.POINTER(ct.c_float)),
("Z", ct.POINTER(ct.c_int)),
("B", ct.POINTER(ct.c_float)),
("d_S", ct.POINTER(ct.c_int)),
("d_T", ct.POINTER(ct.c_float)),
("d_O", ct.POINTER(ct.c_float)),
("d_R", ct.POINTER(ct.c_float)),
("d_Z", ct.POINTER(ct.c_int)),
("d_B", ct.POINTER(ct.c_float)),
]
# Functions from 'pomdp_model_cpu.h'.
_nova.pomdp_initialize_cpu.argtypes = (ct.POINTER(NovaPOMDP),
ct.c_uint, # n
ct.c_uint, # ns
ct.c_uint, # m
ct.c_uint, # z
ct.c_uint, # r
ct.c_uint, # rz
ct.c_float, # gamma
ct.c_uint) # horizon
_nova.pomdp_belief_update_cpu.argtypes = (ct.POINTER(NovaPOMDP),
ct.POINTER(ct.c_float), # b
ct.c_uint, # a
ct.c_uint, # o
ct.POINTER(ct.POINTER(ct.c_float))) # bp
_nova.pomdp_add_new_raw_beliefs_cpu.argtypes = (ct.POINTER(NovaPOMDP),
ct.c_uint, # numBeliefPointsToAdd
ct.POINTER(ct.c_float)) # Bnew
_nova.pomdp_uninitialize_cpu.argtypes = tuple([ct.POINTER(NovaPOMDP)])
# Functions from 'pomdp_expand_cpu.h'.
_nova.pomdp_expand_random_cpu.argtypes = (ct.POINTER(NovaPOMDP),
ct.c_uint) # numBeliefsToAdd
_nova.pomdp_expand_distinct_beliefs_cpu.argtypes = tuple([ct.POINTER(NovaPOMDP)])
_nova.pomdp_expand_pema_cpu.argtypes = (ct.POINTER(NovaPOMDP),
ct.POINTER(pav.POMDPAlphaVectors)) # policy
# Functions from 'pomdp_sigma_cpu.h'.
_nova.pomdp_sigma_cpu.argtypes = (ct.POINTER(NovaPOMDP),
ct.c_uint, # numDesiredNonZeroValues
ct.POINTER(ct.c_float)) # sigma
# Functions from 'pomdp_model_gpu.h'.
_nova.pomdp_initialize_gpu.argtypes = tuple([ct.POINTER(NovaPOMDP)])
_nova.pomdp_uninitialize_gpu.argtypes = tuple([ct.POINTER(NovaPOMDP)])
_nova.pomdp_initialize_successors_gpu.argtypes = tuple([ct.POINTER(NovaPOMDP)])
_nova.pomdp_uninitialize_successors_gpu.argtypes = tuple([ct.POINTER(NovaPOMDP)])
_nova.pomdp_initialize_state_transitions_gpu.argtypes = tuple([ct.POINTER(NovaPOMDP)])
_nova.pomdp_uninitialize_state_transitions_gpu.argtypes = tuple([ct.POINTER(NovaPOMDP)])
_nova.pomdp_initialize_observation_transitions_gpu.argtypes = tuple([ct.POINTER(NovaPOMDP)])
_nova.pomdp_uninitialize_observation_transitions_gpu.argtypes = tuple([ct.POINTER(NovaPOMDP)])
_nova.pomdp_initialize_rewards_gpu.argtypes = tuple([ct.POINTER(NovaPOMDP)])
_nova.pomdp_uninitialize_rewards_gpu.argtypes = tuple([ct.POINTER(NovaPOMDP)])
_nova.pomdp_initialize_nonzero_beliefs_gpu.argtypes = tuple([ct.POINTER(NovaPOMDP)])
_nova.pomdp_uninitialize_nonzero_beliefs_gpu.argtypes = tuple([ct.POINTER(NovaPOMDP)])
_nova.pomdp_initialize_belief_points_gpu.argtypes = tuple([ct.POINTER(NovaPOMDP)])
_nova.pomdp_uninitialize_belief_points_gpu.argtypes = tuple([ct.POINTER(NovaPOMDP)])
# Functions from 'pomdp_expand_gpu.h'.
_nova.pomdp_expand_random_gpu.argtypes = (ct.POINTER(NovaPOMDP),
ct.c_uint, # numThreads
ct.c_uint) # numBeliefsToAdd
| mit |
RyanDJLee/pyta | tests/test_type_inference/test_listcomp.py | 1 | 2140 | import astroid
import nose
from hypothesis import settings, given, HealthCheck
from typing import List
import tests.custom_hypothesis_support as cs
settings.load_profile("pyta")
@given(cs.homogeneous_iterable)
@settings(suppress_health_check=[HealthCheck.too_slow])
def test_list_comprehension_single_target_name_homogeneous_iterable(iterable):
"""Test Comprehension node visitor representing a comprehension expression with a single target and a
name expression over a homogeneous list."""
program = f'[num for num in {repr(iterable)}]'
module, typeinferrer = cs._parse_text(program)
listcomp_node = list(module.nodes_of_class(astroid.ListComp))[0]
expected_type = List[listcomp_node.generators[0].iter.inf_type.getValue().__args__[0]]
assert listcomp_node.inf_type.getValue() == expected_type
@given(cs.homogeneous_iterable)
@settings(suppress_health_check=[HealthCheck.too_slow])
def test_list_comprehension_single_target_name_heterogeneous_iterable(iterable):
"""Test Comprehension node visitor representing a comprehension expression with a single target and a
name expression over a heterogeneous list."""
program = f'[num for num in {repr(iterable)}]'
module, typeinferrer = cs._parse_text(program)
listcomp_node = list(module.nodes_of_class(astroid.ListComp))[0]
expected_type = List[listcomp_node.generators[0].iter.inf_type.getValue().__args__[0]]
assert listcomp_node.inf_type.getValue() == expected_type
@given(cs.valid_identifier(min_size=1))
@settings(suppress_health_check=[HealthCheck.too_slow])
def test_list_comprehension_single_target_name_string(iterable):
"""Test Comprehension node visitor representing a comprehension expression with a single target and a
name expression over a string."""
program = f'[num for num in {repr(iterable)}]'
module, typeinferrer = cs._parse_text(program)
listcomp_node = list(module.nodes_of_class(astroid.ListComp))[0]
expected_type = List[listcomp_node.generators[0].iter.inf_type.getValue()]
assert listcomp_node.inf_type.getValue() == expected_type
if __name__ == '__main__':
nose.main()
| gpl-3.0 |
intelie/pycollector | src/helpers/yaml/error.py | 691 | 2559 |
__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError']
class Mark(object):
def __init__(self, name, index, line, column, buffer, pointer):
self.name = name
self.index = index
self.line = line
self.column = column
self.buffer = buffer
self.pointer = pointer
def get_snippet(self, indent=4, max_length=75):
if self.buffer is None:
return None
head = ''
start = self.pointer
while start > 0 and self.buffer[start-1] not in u'\0\r\n\x85\u2028\u2029':
start -= 1
if self.pointer-start > max_length/2-1:
head = ' ... '
start += 5
break
tail = ''
end = self.pointer
while end < len(self.buffer) and self.buffer[end] not in u'\0\r\n\x85\u2028\u2029':
end += 1
if end-self.pointer > max_length/2-1:
tail = ' ... '
end -= 5
break
snippet = self.buffer[start:end].encode('utf-8')
return ' '*indent + head + snippet + tail + '\n' \
+ ' '*(indent+self.pointer-start+len(head)) + '^'
def __str__(self):
snippet = self.get_snippet()
where = " in \"%s\", line %d, column %d" \
% (self.name, self.line+1, self.column+1)
if snippet is not None:
where += ":\n"+snippet
return where
class YAMLError(Exception):
pass
class MarkedYAMLError(YAMLError):
def __init__(self, context=None, context_mark=None,
problem=None, problem_mark=None, note=None):
self.context = context
self.context_mark = context_mark
self.problem = problem
self.problem_mark = problem_mark
self.note = note
def __str__(self):
lines = []
if self.context is not None:
lines.append(self.context)
if self.context_mark is not None \
and (self.problem is None or self.problem_mark is None
or self.context_mark.name != self.problem_mark.name
or self.context_mark.line != self.problem_mark.line
or self.context_mark.column != self.problem_mark.column):
lines.append(str(self.context_mark))
if self.problem is not None:
lines.append(self.problem)
if self.problem_mark is not None:
lines.append(str(self.problem_mark))
if self.note is not None:
lines.append(self.note)
return '\n'.join(lines)
| bsd-3-clause |
haoyunfeix/crosswalk-test-suite | apptools/apptools-android-tests/apptools/manifest_versionCode.py | 12 | 3242 | #!/usr/bin/env python
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Liu, Yun <yunx.liu@intel.com>
import unittest
import os
import comm
from xml.etree import ElementTree
import json
class TestCrosswalkApptoolsFunctions(unittest.TestCase):
def test_update_app_version(self):
comm.setUp()
comm.create(self)
os.chdir('org.xwalk.test')
jsonfile = open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json", "r")
jsons = jsonfile.read()
jsonfile.close()
jsonDict = json.loads(jsons)
jsonDict["xwalk_app_version"] = "1"
json.dump(jsonDict, open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json", "w"))
with open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json") as json_file:
data = json.load(json_file)
buildcmd = comm.HOST_PREFIX + comm.PackTools + "crosswalk-app build"
buildstatus = os.popen(buildcmd).readlines()
index = 0
for x in range(len(buildstatus),0,-1):
index = x -1
if buildstatus[index].find("Using android:versionCode") != -1:
break
versionCode = buildstatus[index].strip(" *\nUsing android:versionCode").split(' ')[-1][1:-1]
root = ElementTree.parse(comm.ConstPath + "/../tools/org.xwalk.test/prj/android/AndroidManifest.xml").getroot()
attributes = root.attrib
for x in attributes.keys():
if x.find("versionCode") != -1:
versionCode_xml = attributes[x]
break
comm.run(self)
comm.clear("org.xwalk.test")
self.assertEquals(data['xwalk_app_version'].strip(os.linesep), "1")
self.assertEquals(versionCode, versionCode_xml)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
xen0l/ansible | lib/ansible/modules/remote_management/oneview/oneview_fcoe_network_facts.py | 125 | 2632 | #!/usr/bin/python
# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: oneview_fcoe_network_facts
short_description: Retrieve the facts about one or more of the OneView FCoE Networks
description:
- Retrieve the facts about one or more of the FCoE Networks from OneView.
version_added: "2.4"
requirements:
- hpOneView >= 2.0.1
author:
- Felipe Bulsoni (@fgbulsoni)
- Thiago Miotto (@tmiotto)
- Adriane Cardozo (@adriane-cardozo)
options:
name:
description:
- FCoE Network name.
extends_documentation_fragment:
- oneview
- oneview.factsparams
'''
EXAMPLES = '''
- name: Gather facts about all FCoE Networks
oneview_fcoe_network_facts:
config: /etc/oneview/oneview_config.json
delegate_to: localhost
- debug: var=fcoe_networks
- name: Gather paginated, filtered and sorted facts about FCoE Networks
oneview_fcoe_network_facts:
config: /etc/oneview/oneview_config.json
params:
start: 0
count: 3
sort: 'name:descending'
filter: 'vlanId=2'
delegate_to: localhost
- debug: var=fcoe_networks
- name: Gather facts about a FCoE Network by name
oneview_fcoe_network_facts:
config: /etc/oneview/oneview_config.json
name: Test FCoE Network Facts
delegate_to: localhost
- debug: var=fcoe_networks
'''
RETURN = '''
fcoe_networks:
description: Has all the OneView facts about the FCoE Networks.
returned: Always, but can be null.
type: dict
'''
from ansible.module_utils.oneview import OneViewModuleBase
class FcoeNetworkFactsModule(OneViewModuleBase):
def __init__(self):
argument_spec = dict(
name=dict(type='str'),
params=dict(type='dict'),
)
super(FcoeNetworkFactsModule, self).__init__(additional_arg_spec=argument_spec)
def execute_module(self):
if self.module.params['name']:
fcoe_networks = self.oneview_client.fcoe_networks.get_by('name', self.module.params['name'])
else:
fcoe_networks = self.oneview_client.fcoe_networks.get_all(**self.facts_params)
return dict(changed=False,
ansible_facts=dict(fcoe_networks=fcoe_networks))
def main():
FcoeNetworkFactsModule().run()
if __name__ == '__main__':
main()
| gpl-3.0 |
bkloppenborg/Celero | test/gtest-1.7.0/test/gtest_list_tests_unittest.py | 1898 | 6515 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's --gtest_list_tests flag.
A user can ask Google Test to list all tests by specifying the
--gtest_list_tests flag. This script tests such functionality
by invoking gtest_list_tests_unittest_ (a program written with
Google Test) the command line flags.
"""
__author__ = 'phanna@google.com (Patrick Hanna)'
import gtest_test_utils
import re
# Constants.
# The command line flag for enabling/disabling listing all tests.
LIST_TESTS_FLAG = 'gtest_list_tests'
# Path to the gtest_list_tests_unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath('gtest_list_tests_unittest_')
# The expected output when running gtest_list_tests_unittest_ with
# --gtest_list_tests
EXPECTED_OUTPUT_NO_FILTER_RE = re.compile(r"""FooDeathTest\.
Test1
Foo\.
Bar1
Bar2
DISABLED_Bar3
Abc\.
Xyz
Def
FooBar\.
Baz
FooTest\.
Test1
DISABLED_Test2
Test3
TypedTest/0\. # TypeParam = (VeryLo{245}|class VeryLo{239})\.\.\.
TestA
TestB
TypedTest/1\. # TypeParam = int\s*\*
TestA
TestB
TypedTest/2\. # TypeParam = .*MyArray<bool,\s*42>
TestA
TestB
My/TypeParamTest/0\. # TypeParam = (VeryLo{245}|class VeryLo{239})\.\.\.
TestA
TestB
My/TypeParamTest/1\. # TypeParam = int\s*\*
TestA
TestB
My/TypeParamTest/2\. # TypeParam = .*MyArray<bool,\s*42>
TestA
TestB
MyInstantiation/ValueParamTest\.
TestA/0 # GetParam\(\) = one line
TestA/1 # GetParam\(\) = two\\nlines
TestA/2 # GetParam\(\) = a very\\nlo{241}\.\.\.
TestB/0 # GetParam\(\) = one line
TestB/1 # GetParam\(\) = two\\nlines
TestB/2 # GetParam\(\) = a very\\nlo{241}\.\.\.
""")
# The expected output when running gtest_list_tests_unittest_ with
# --gtest_list_tests and --gtest_filter=Foo*.
EXPECTED_OUTPUT_FILTER_FOO_RE = re.compile(r"""FooDeathTest\.
Test1
Foo\.
Bar1
Bar2
DISABLED_Bar3
FooBar\.
Baz
FooTest\.
Test1
DISABLED_Test2
Test3
""")
# Utilities.
def Run(args):
"""Runs gtest_list_tests_unittest_ and returns the list of tests printed."""
return gtest_test_utils.Subprocess([EXE_PATH] + args,
capture_stderr=False).output
# The unit test.
class GTestListTestsUnitTest(gtest_test_utils.TestCase):
"""Tests using the --gtest_list_tests flag to list all tests."""
def RunAndVerify(self, flag_value, expected_output_re, other_flag):
"""Runs gtest_list_tests_unittest_ and verifies that it prints
the correct tests.
Args:
flag_value: value of the --gtest_list_tests flag;
None if the flag should not be present.
expected_output_re: regular expression that matches the expected
output after running command;
other_flag: a different flag to be passed to command
along with gtest_list_tests;
None if the flag should not be present.
"""
if flag_value is None:
flag = ''
flag_expression = 'not set'
elif flag_value == '0':
flag = '--%s=0' % LIST_TESTS_FLAG
flag_expression = '0'
else:
flag = '--%s' % LIST_TESTS_FLAG
flag_expression = '1'
args = [flag]
if other_flag is not None:
args += [other_flag]
output = Run(args)
if expected_output_re:
self.assert_(
expected_output_re.match(output),
('when %s is %s, the output of "%s" is "%s",\n'
'which does not match regex "%s"' %
(LIST_TESTS_FLAG, flag_expression, ' '.join(args), output,
expected_output_re.pattern)))
else:
self.assert_(
not EXPECTED_OUTPUT_NO_FILTER_RE.match(output),
('when %s is %s, the output of "%s" is "%s"'%
(LIST_TESTS_FLAG, flag_expression, ' '.join(args), output)))
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(flag_value=None,
expected_output_re=None,
other_flag=None)
def testFlag(self):
"""Tests using the --gtest_list_tests flag."""
self.RunAndVerify(flag_value='0',
expected_output_re=None,
other_flag=None)
self.RunAndVerify(flag_value='1',
expected_output_re=EXPECTED_OUTPUT_NO_FILTER_RE,
other_flag=None)
def testOverrideNonFilterFlags(self):
"""Tests that --gtest_list_tests overrides the non-filter flags."""
self.RunAndVerify(flag_value='1',
expected_output_re=EXPECTED_OUTPUT_NO_FILTER_RE,
other_flag='--gtest_break_on_failure')
def testWithFilterFlags(self):
"""Tests that --gtest_list_tests takes into account the
--gtest_filter flag."""
self.RunAndVerify(flag_value='1',
expected_output_re=EXPECTED_OUTPUT_FILTER_FOO_RE,
other_flag='--gtest_filter=Foo*')
if __name__ == '__main__':
gtest_test_utils.Main()
| apache-2.0 |
gauribhoite/personfinder | env/site-packages/jinja2/loaders.py | 333 | 17380 | # -*- coding: utf-8 -*-
"""
jinja2.loaders
~~~~~~~~~~~~~~
Jinja loader classes.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
import weakref
from types import ModuleType
from os import path
from hashlib import sha1
from jinja2.exceptions import TemplateNotFound
from jinja2.utils import open_if_exists, internalcode
from jinja2._compat import string_types, iteritems
def split_template_path(template):
"""Split a path into segments and perform a sanity check. If it detects
'..' in the path it will raise a `TemplateNotFound` error.
"""
pieces = []
for piece in template.split('/'):
if path.sep in piece \
or (path.altsep and path.altsep in piece) or \
piece == path.pardir:
raise TemplateNotFound(template)
elif piece and piece != '.':
pieces.append(piece)
return pieces
class BaseLoader(object):
"""Baseclass for all loaders. Subclass this and override `get_source` to
implement a custom loading mechanism. The environment provides a
`get_template` method that calls the loader's `load` method to get the
:class:`Template` object.
A very basic example for a loader that looks up templates on the file
system could look like this::
from jinja2 import BaseLoader, TemplateNotFound
from os.path import join, exists, getmtime
class MyLoader(BaseLoader):
def __init__(self, path):
self.path = path
def get_source(self, environment, template):
path = join(self.path, template)
if not exists(path):
raise TemplateNotFound(template)
mtime = getmtime(path)
with file(path) as f:
source = f.read().decode('utf-8')
return source, path, lambda: mtime == getmtime(path)
"""
#: if set to `False` it indicates that the loader cannot provide access
#: to the source of templates.
#:
#: .. versionadded:: 2.4
has_source_access = True
def get_source(self, environment, template):
"""Get the template source, filename and reload helper for a template.
It's passed the environment and template name and has to return a
tuple in the form ``(source, filename, uptodate)`` or raise a
`TemplateNotFound` error if it can't locate the template.
The source part of the returned tuple must be the source of the
template as unicode string or a ASCII bytestring. The filename should
be the name of the file on the filesystem if it was loaded from there,
otherwise `None`. The filename is used by python for the tracebacks
if no loader extension is used.
The last item in the tuple is the `uptodate` function. If auto
reloading is enabled it's always called to check if the template
changed. No arguments are passed so the function must store the
old state somewhere (for example in a closure). If it returns `False`
the template will be reloaded.
"""
if not self.has_source_access:
raise RuntimeError('%s cannot provide access to the source' %
self.__class__.__name__)
raise TemplateNotFound(template)
def list_templates(self):
"""Iterates over all templates. If the loader does not support that
it should raise a :exc:`TypeError` which is the default behavior.
"""
raise TypeError('this loader cannot iterate over all templates')
@internalcode
def load(self, environment, name, globals=None):
"""Loads a template. This method looks up the template in the cache
or loads one by calling :meth:`get_source`. Subclasses should not
override this method as loaders working on collections of other
loaders (such as :class:`PrefixLoader` or :class:`ChoiceLoader`)
will not call this method but `get_source` directly.
"""
code = None
if globals is None:
globals = {}
# first we try to get the source for this template together
# with the filename and the uptodate function.
source, filename, uptodate = self.get_source(environment, name)
# try to load the code from the bytecode cache if there is a
# bytecode cache configured.
bcc = environment.bytecode_cache
if bcc is not None:
bucket = bcc.get_bucket(environment, name, filename, source)
code = bucket.code
# if we don't have code so far (not cached, no longer up to
# date) etc. we compile the template
if code is None:
code = environment.compile(source, name, filename)
# if the bytecode cache is available and the bucket doesn't
# have a code so far, we give the bucket the new code and put
# it back to the bytecode cache.
if bcc is not None and bucket.code is None:
bucket.code = code
bcc.set_bucket(bucket)
return environment.template_class.from_code(environment, code,
globals, uptodate)
class FileSystemLoader(BaseLoader):
"""Loads templates from the file system. This loader can find templates
in folders on the file system and is the preferred way to load them.
The loader takes the path to the templates as string, or if multiple
locations are wanted a list of them which is then looked up in the
given order::
>>> loader = FileSystemLoader('/path/to/templates')
>>> loader = FileSystemLoader(['/path/to/templates', '/other/path'])
Per default the template encoding is ``'utf-8'`` which can be changed
by setting the `encoding` parameter to something else.
To follow symbolic links, set the *followlinks* parameter to ``True``::
>>> loader = FileSystemLoader('/path/to/templates', followlinks=True)
.. versionchanged:: 2.8+
The *followlinks* parameter was added.
"""
def __init__(self, searchpath, encoding='utf-8', followlinks=False):
if isinstance(searchpath, string_types):
searchpath = [searchpath]
self.searchpath = list(searchpath)
self.encoding = encoding
self.followlinks = followlinks
def get_source(self, environment, template):
pieces = split_template_path(template)
for searchpath in self.searchpath:
filename = path.join(searchpath, *pieces)
f = open_if_exists(filename)
if f is None:
continue
try:
contents = f.read().decode(self.encoding)
finally:
f.close()
mtime = path.getmtime(filename)
def uptodate():
try:
return path.getmtime(filename) == mtime
except OSError:
return False
return contents, filename, uptodate
raise TemplateNotFound(template)
def list_templates(self):
found = set()
for searchpath in self.searchpath:
walk_dir = os.walk(searchpath, followlinks=self.followlinks)
for dirpath, dirnames, filenames in walk_dir:
for filename in filenames:
template = os.path.join(dirpath, filename) \
[len(searchpath):].strip(os.path.sep) \
.replace(os.path.sep, '/')
if template[:2] == './':
template = template[2:]
if template not in found:
found.add(template)
return sorted(found)
class PackageLoader(BaseLoader):
"""Load templates from python eggs or packages. It is constructed with
the name of the python package and the path to the templates in that
package::
loader = PackageLoader('mypackage', 'views')
If the package path is not given, ``'templates'`` is assumed.
Per default the template encoding is ``'utf-8'`` which can be changed
by setting the `encoding` parameter to something else. Due to the nature
of eggs it's only possible to reload templates if the package was loaded
from the file system and not a zip file.
"""
def __init__(self, package_name, package_path='templates',
encoding='utf-8'):
from pkg_resources import DefaultProvider, ResourceManager, \
get_provider
provider = get_provider(package_name)
self.encoding = encoding
self.manager = ResourceManager()
self.filesystem_bound = isinstance(provider, DefaultProvider)
self.provider = provider
self.package_path = package_path
def get_source(self, environment, template):
pieces = split_template_path(template)
p = '/'.join((self.package_path,) + tuple(pieces))
if not self.provider.has_resource(p):
raise TemplateNotFound(template)
filename = uptodate = None
if self.filesystem_bound:
filename = self.provider.get_resource_filename(self.manager, p)
mtime = path.getmtime(filename)
def uptodate():
try:
return path.getmtime(filename) == mtime
except OSError:
return False
source = self.provider.get_resource_string(self.manager, p)
return source.decode(self.encoding), filename, uptodate
def list_templates(self):
path = self.package_path
if path[:2] == './':
path = path[2:]
elif path == '.':
path = ''
offset = len(path)
results = []
def _walk(path):
for filename in self.provider.resource_listdir(path):
fullname = path + '/' + filename
if self.provider.resource_isdir(fullname):
_walk(fullname)
else:
results.append(fullname[offset:].lstrip('/'))
_walk(path)
results.sort()
return results
class DictLoader(BaseLoader):
"""Loads a template from a python dict. It's passed a dict of unicode
strings bound to template names. This loader is useful for unittesting:
>>> loader = DictLoader({'index.html': 'source here'})
Because auto reloading is rarely useful this is disabled per default.
"""
def __init__(self, mapping):
self.mapping = mapping
def get_source(self, environment, template):
if template in self.mapping:
source = self.mapping[template]
return source, None, lambda: source == self.mapping.get(template)
raise TemplateNotFound(template)
def list_templates(self):
return sorted(self.mapping)
class FunctionLoader(BaseLoader):
"""A loader that is passed a function which does the loading. The
function receives the name of the template and has to return either
an unicode string with the template source, a tuple in the form ``(source,
filename, uptodatefunc)`` or `None` if the template does not exist.
>>> def load_template(name):
... if name == 'index.html':
... return '...'
...
>>> loader = FunctionLoader(load_template)
The `uptodatefunc` is a function that is called if autoreload is enabled
and has to return `True` if the template is still up to date. For more
details have a look at :meth:`BaseLoader.get_source` which has the same
return value.
"""
def __init__(self, load_func):
self.load_func = load_func
def get_source(self, environment, template):
rv = self.load_func(template)
if rv is None:
raise TemplateNotFound(template)
elif isinstance(rv, string_types):
return rv, None, None
return rv
class PrefixLoader(BaseLoader):
"""A loader that is passed a dict of loaders where each loader is bound
to a prefix. The prefix is delimited from the template by a slash per
default, which can be changed by setting the `delimiter` argument to
something else::
loader = PrefixLoader({
'app1': PackageLoader('mypackage.app1'),
'app2': PackageLoader('mypackage.app2')
})
By loading ``'app1/index.html'`` the file from the app1 package is loaded,
by loading ``'app2/index.html'`` the file from the second.
"""
def __init__(self, mapping, delimiter='/'):
self.mapping = mapping
self.delimiter = delimiter
def get_loader(self, template):
try:
prefix, name = template.split(self.delimiter, 1)
loader = self.mapping[prefix]
except (ValueError, KeyError):
raise TemplateNotFound(template)
return loader, name
def get_source(self, environment, template):
loader, name = self.get_loader(template)
try:
return loader.get_source(environment, name)
except TemplateNotFound:
# re-raise the exception with the correct fileame here.
# (the one that includes the prefix)
raise TemplateNotFound(template)
@internalcode
def load(self, environment, name, globals=None):
loader, local_name = self.get_loader(name)
try:
return loader.load(environment, local_name, globals)
except TemplateNotFound:
# re-raise the exception with the correct fileame here.
# (the one that includes the prefix)
raise TemplateNotFound(name)
def list_templates(self):
result = []
for prefix, loader in iteritems(self.mapping):
for template in loader.list_templates():
result.append(prefix + self.delimiter + template)
return result
class ChoiceLoader(BaseLoader):
"""This loader works like the `PrefixLoader` just that no prefix is
specified. If a template could not be found by one loader the next one
is tried.
>>> loader = ChoiceLoader([
... FileSystemLoader('/path/to/user/templates'),
... FileSystemLoader('/path/to/system/templates')
... ])
This is useful if you want to allow users to override builtin templates
from a different location.
"""
def __init__(self, loaders):
self.loaders = loaders
def get_source(self, environment, template):
for loader in self.loaders:
try:
return loader.get_source(environment, template)
except TemplateNotFound:
pass
raise TemplateNotFound(template)
@internalcode
def load(self, environment, name, globals=None):
for loader in self.loaders:
try:
return loader.load(environment, name, globals)
except TemplateNotFound:
pass
raise TemplateNotFound(name)
def list_templates(self):
found = set()
for loader in self.loaders:
found.update(loader.list_templates())
return sorted(found)
class _TemplateModule(ModuleType):
"""Like a normal module but with support for weak references"""
class ModuleLoader(BaseLoader):
"""This loader loads templates from precompiled templates.
Example usage:
>>> loader = ChoiceLoader([
... ModuleLoader('/path/to/compiled/templates'),
... FileSystemLoader('/path/to/templates')
... ])
Templates can be precompiled with :meth:`Environment.compile_templates`.
"""
has_source_access = False
def __init__(self, path):
package_name = '_jinja2_module_templates_%x' % id(self)
# create a fake module that looks for the templates in the
# path given.
mod = _TemplateModule(package_name)
if isinstance(path, string_types):
path = [path]
else:
path = list(path)
mod.__path__ = path
sys.modules[package_name] = weakref.proxy(mod,
lambda x: sys.modules.pop(package_name, None))
# the only strong reference, the sys.modules entry is weak
# so that the garbage collector can remove it once the
# loader that created it goes out of business.
self.module = mod
self.package_name = package_name
@staticmethod
def get_template_key(name):
return 'tmpl_' + sha1(name.encode('utf-8')).hexdigest()
@staticmethod
def get_module_filename(name):
return ModuleLoader.get_template_key(name) + '.py'
@internalcode
def load(self, environment, name, globals=None):
key = self.get_template_key(name)
module = '%s.%s' % (self.package_name, key)
mod = getattr(self.module, module, None)
if mod is None:
try:
mod = __import__(module, None, None, ['root'])
except ImportError:
raise TemplateNotFound(name)
# remove the entry from sys.modules, we only want the attribute
# on the module object we have stored on the loader.
sys.modules.pop(module, None)
return environment.template_class.from_module_dict(
environment, mod.__dict__, globals)
| apache-2.0 |
googleads/googleads-python-lib | examples/ad_manager/v202011/team_service/update_teams.py | 1 | 2271 | #!/usr/bin/env python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example updates teams by changing its description.
To determine which teams exist, run get_all_teams.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
TEAM_ID = 'INSERT_TEAM_ID_HERE'
def main(client, team_id):
# Initialize appropriate service.
team_service = client.GetService('TeamService', version='v202011')
# Create a filter statement to select a single team by ID.
statement = (ad_manager.StatementBuilder(version='v202011')
.Where('id = :teamId')
.WithBindVariable('teamId', int(team_id)))
# Get teams by statement.
response = team_service.getTeamsByStatement(statement.ToStatement())
if 'results' in response and len(response['results']):
updated_teams = []
# Update each local team object by changing its description.
for team in response['results']:
team['description'] = 'this team is great!'
updated_teams.append(team)
# Update teams on the server.
teams = team_service.updateTeams(updated_teams)
# Display results.
for team in teams:
print('Team with id "%s" and name "%s" was updated.'
% (team['id'], team['name']))
else:
print('No teams found to update.')
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client, TEAM_ID)
| apache-2.0 |
sve-odoo/odoo | addons/portal_sale/__openerp__.py | 380 | 2183 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Portal Sale',
'version': '0.1',
'category': 'Tools',
'complexity': 'easy',
'description': """
This module adds a Sales menu to your portal as soon as sale and portal are installed.
======================================================================================
After installing this module, portal users will be able to access their own documents
via the following menus:
- Quotations
- Sale Orders
- Delivery Orders
- Products (public ones)
- Invoices
- Payments/Refunds
If online payment acquirers are configured, portal users will also be given the opportunity to
pay online on their Sale Orders and Invoices that are not paid yet. Paypal is included
by default, you simply need to configure a Paypal account in the Accounting/Invoicing settings.
""",
'author': 'OpenERP SA',
'depends': ['sale', 'portal', 'payment'],
'data': [
'security/portal_security.xml',
'portal_sale_view.xml',
'portal_sale_data.xml',
'res_config_view.xml',
'security/ir.model.access.csv',
],
'auto_install': True,
'category': 'Hidden',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
starius/wt-classes | examples/make-all.py | 1 | 1140 | #!/usr/bin/python
import sys
import re
from optparse import OptionParser
entrypoints = []
anchors = []
parser = OptionParser()
parser.add_option("--cpp", dest="cpp")
parser.add_option("--template", dest="template")
parser.add_option("--wrasterimage", dest="wrasterimage", action="store_true")
(options, args) = parser.parse_args()
remove_main = re.compile("int main.+\}", re.DOTALL)
for cpp in options.cpp.split():
if not cpp.endswith('all.cpp'):
sys.stdout.write(remove_main.sub("", open(cpp).read()))
low = re.split(r'[/\\]', cpp)[-1].split('.')[0]
if not options.wrasterimage and low == 'captcha':
continue
Cap = re.search(r"create([^\s]+)App", open(cpp).read()).groups()[0]
args = {'low': low, 'Cap': Cap}
entrypoints.append('''
addEntryPoint(Wt::Application, create%(Cap)sApp, "/%(low)s");
''' % args)
anchors.append('''
new WAnchor("%(low)s", "%(Cap)s", root());
new WBreak(root());
''' % args)
sys.stdout.write(open(options.template).read() %
{'entrypoints': ''.join(entrypoints), 'anchors': ''.join(anchors)})
| gpl-2.0 |
ToontownUprising/src | toontown/parties/ScrolledFriendList.py | 5 | 2401 | from direct.gui.DirectGui import DirectFrame, DirectButton, DirectLabel
from direct.gui.DirectGui import DirectScrolledList, DirectCheckButton
from direct.gui.DirectCheckBox import DirectCheckBox
from direct.gui import DirectGuiGlobals
from toontown.toonbase import ToontownGlobals
from pandac.PandaModules import Vec3, Vec4, PlaneNode, Plane, Point3, TextNode, VBase4, NodePath
class ScrolledFriendList(DirectScrolledList):
def __init__(self, parent, gui, clickCallback = None, makeItemsCheckBoxes = False):
self.makeItemsCheckBoxes = makeItemsCheckBoxes
self.clickCallback = clickCallback
self.parent = parent
self.gui = gui
self.scrollSpeed = 1
DirectScrolledList.__init__(self, parent=parent, relief=None, incButton_image=(self.gui.find('**/inviteButtonDown_up'), self.gui.find('**/inviteButtonDown_down'), self.gui.find('**/inviteButtonDown_rollover')), incButton_relief=None, incButton_pos=(0.0, 0.0, -0.03), incButton_image3_color=Vec4(0.6, 0.6, 0.6, 0.6), decButton_image=(self.gui.find('**/inviteButtonUp_up'), self.gui.find('**/inviteButtonUp_down'), self.gui.find('**/inviteButtonUp_rollover')), decButton_relief=None, decButton_pos=(0.0, 0.0, 0.02), decButton_image3_color=Vec4(0.6, 0.6, 0.6, 0.6), itemFrame_relief=None, forceHeight=0.084, numItemsVisible=8, items=[], incButtonCallback=self.scrollButtonPressed, decButtonCallback=self.scrollButtonPressed, itemFrame_pos=(0.0, 0.0, -0.01))
self.incButtonCallback = None
self.decButtonCallback = None
self.setForceHeight()
return
def scrollButtonPressed(self):
pass
def addFriend(self, name, id):
if self.makeItemsCheckBoxes:
checkedImage = self.gui.find('**/inviteButtonChecked')
uncheckedImage = self.gui.find('**/inviteButtonUnchecked')
widget = DirectCheckButton(relief=None, scale=0.1, boxBorder=0.08, boxImage=(uncheckedImage, checkedImage, None), boxImageScale=10.0, boxRelief=None, text=name, text_align=TextNode.ALeft, text_scale=0.7, text_pos=(-3.7, -0.25), command=self.clickCallback, indicator_pos=(-4.8, 0.0, 0.0))
widget['extraArgs'] = [widget]
else:
widget = DirectLabel(relief=None, text=name, text_align=TextNode.ALeft, text_pos=(-0.6, 0.0, 0.0), scale=0.055)
widget.setPythonTag('id', id)
self.addItem(widget)
return
| mit |
dcf21/meteor-pi | src/pythonModules/meteorpi_db/meteorpi_db/sql_builder.py | 2 | 15067 | # sql_builder.py
# -------------------------------------------------
# Copyright 2016 Cambridge Science Centre.
# This file is part of Meteor Pi.
# Meteor Pi is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Meteor Pi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Meteor Pi. If not, see <http://www.gnu.org/licenses/>.
# -------------------------------------------------
# Helper functions to build SQL queries
def search_observations_sql_builder(search):
"""
Create and populate an instance of :class:`meteorpi_db.SQLBuilder` for a given
:class:`meteorpi_model.ObservationSearch`. This can then be used to retrieve the results of the search, materialise
them into :class:`meteorpi_model.Observation` instances etc.
:param ObservationSearch search:
The search to realise
:return:
A :class:`meteorpi_db.SQLBuilder` configured from the supplied search
"""
b = SQLBuilder(tables="""archive_observations o
INNER JOIN archive_semanticTypes s ON o.obsType=s.uid
INNER JOIN archive_observatories l ON o.observatory=l.uid""", where_clauses=[])
b.add_set_membership(search.obstory_ids, 'l.publicId')
b.add_sql(search.observation_type, 's.name = %s')
b.add_sql(search.observation_id, 'o.publicId = %s')
b.add_sql(search.time_min, 'o.obsTime > %s')
b.add_sql(search.time_max, 'o.obsTime < %s')
b.add_sql(search.lat_min, 'l.latitude >= %s')
b.add_sql(search.lat_max, 'l.latitude <= %s')
b.add_sql(search.long_min, 'l.longitude >= %s')
b.add_sql(search.long_max, 'l.longitude <= %s')
b.add_metadata_query_properties(meta_constraints=search.meta_constraints, id_column="observationId", id_table="o")
# Check for import / export filters
if search.exclude_imported:
b.where_clauses.append('NOT EXISTS (SELECT * FROM archive_observationImport i WHERE i.observationId = o.uid')
if search.exclude_export_to is not None:
b.where_clauses.append("""
NOT EXISTS (SELECT * FROM archive_observationExport ex
INNER JOIN archive_exportConfig c ON ex.exportConfig = c.uid
WHERE ex.observationId = o.uid AND c.exportConfigID = %s)
""")
b.sql_args.append(SQLBuilder.map_value(search.exclude_export_to))
return b
def search_obsgroups_sql_builder(search):
"""
Create and populate an instance of :class:`meteorpi_db.SQLBuilder` for a given
:class:`meteorpi_model.ObservationGroupSearch`. This can then be used to retrieve the results of the search,
materialise them into :class:`meteorpi_model.ObservationGroup` instances etc.
:param ObservationGroupSearch search:
The search to realise
:return:
A :class:`meteorpi_db.SQLBuilder` configured from the supplied search
"""
b = SQLBuilder(tables="""archive_obs_groups g
INNER JOIN archive_semanticTypes s ON g.semanticType=s.uid""", where_clauses=[])
b.add_sql(search.obstory_name, """
EXISTS (SELECT 1 FROM archive_obs_group_members x1
INNER JOIN archive_observations x2 ON x2.uid=x1.observationId
INNER JOIN archive_observatories x3 ON x3.uid=x2.observatory
WHERE x1.groupId=g.uid AND x3.publicId=%s)""")
b.add_sql(search.semantic_type, 's.name = %s')
b.add_sql(search.observation_id, """
EXISTS (SELECT 1 FROM archive_obs_group_members y1
INNER JOIN archive_observations y2 ON y2.uid=y1.observationId
WHERE y1.groupId=g.uid AND y2.publicId=%s)""")
b.add_sql(search.group_id, 'g.publicId = %s')
b.add_sql(search.time_min, 'g.time > %s')
b.add_sql(search.time_max, 'g.time < %s')
b.add_metadata_query_properties(meta_constraints=search.meta_constraints, id_column="groupId", id_table="g")
return b
def search_files_sql_builder(search):
"""
Create and populate an instance of :class:`meteorpi_db.SQLBuilder` for a given
:class:`meteorpi_model.FileRecordSearch`. This can then be used to retrieve the results of the search, materialise
them into :class:`meteorpi_model.FileRecord` instances etc.
:param FileRecordSearch search:
The search to realise
:return:
A :class:`meteorpi_db.SQLBuilder` configured from the supplied search
"""
b = SQLBuilder(tables="""archive_files f
INNER JOIN archive_semanticTypes s2 ON f.semanticType=s2.uid
INNER JOIN archive_observations o ON f.observationId=o.uid
INNER JOIN archive_semanticTypes s ON o.obsType=s.uid
INNER JOIN archive_observatories l ON o.observatory=l.uid""", where_clauses=[])
b.add_set_membership(search.obstory_ids, 'l.publicId')
b.add_sql(search.repository_fname, 'f.repositoryFname = %s')
b.add_sql(search.observation_type, 's.name = %s')
b.add_sql(search.observation_id, 'o.uid = %s')
b.add_sql(search.time_min, 'f.fileTime > %s')
b.add_sql(search.time_max, 'f.fileTime < %s')
b.add_sql(search.lat_min, 'l.latitude >= %s')
b.add_sql(search.lat_max, 'l.latitude <= %s')
b.add_sql(search.long_min, 'l.longitude >= %s')
b.add_sql(search.long_max, 'l.longitude <= %s')
b.add_sql(search.mime_type, 'f.mimeType = %s')
b.add_sql(search.semantic_type, 's2.name = %s')
b.add_metadata_query_properties(meta_constraints=search.meta_constraints, id_column="fileId", id_table="f")
# Check for import / export filters
if search.exclude_imported:
b.where_clauses.append('NOT EXISTS (SELECT * FROM archive_observationImport i WHERE i.observationId = o.uid')
if search.exclude_export_to is not None:
b.where_clauses.append("""
NOT EXISTS (SELECT * FROM archive_fileExport ex
INNER JOIN archive_exportConfig c ON ex.exportConfig = c.uid
WHERE ex.fileId = f.uid AND c.exportConfigID = %s)
""")
b.sql_args.append(SQLBuilder.map_value(search.exclude_export_to))
return b
def search_metadata_sql_builder(search):
"""
Create and populate an instance of :class:`meteorpi_db.SQLBuilder` for a given
:class:`meteorpi_model.ObservatoryMetadataSearch`. This can then be used to retrieve the results of the search,
materialise them into :class:`meteorpi_model.ObservatoryMetadata` instances etc.
:param ObservatoryMetadataSearch search:
The search to realise
:return:
A :class:`meteorpi_db.SQLBuilder` configured from the supplied search
"""
b = SQLBuilder(tables="""archive_metadata m
INNER JOIN archive_metadataFields f ON m.fieldId=f.uid
INNER JOIN archive_observatories l ON m.observatory=l.uid""", where_clauses=["m.observatory IS NOT NULL"])
b.add_set_membership(search.obstory_ids, 'l.publicId')
b.add_sql(search.field_name, 'f.metaKey = %s')
b.add_sql(search.time_min, 'm.time > %s')
b.add_sql(search.time_max, 'm.time < %s')
b.add_sql(search.lat_min, 'l.latitude >= %s')
b.add_sql(search.lat_max, 'l.latitude <= %s')
b.add_sql(search.long_min, 'l.longitude >= %s')
b.add_sql(search.long_max, 'l.longitude <= %s')
b.add_sql(search.item_id, 'm.publicId = %s')
# Check for import / export filters
if search.exclude_imported:
b.where_clauses.append('NOT EXISTS (SELECT * FROM archive_metadataImport i WHERE i.metadataId = m.uid')
if search.exclude_export_to is not None:
b.where_clauses.append("""
NOT EXISTS (SELECT * FROM archive_metadataExport ex
INNER JOIN archive_exportConfig c ON ex.exportConfig = c.uid
WHERE ex.metadataId = m.uid AND c.exportConfigID = %s)
""")
b.sql_args.append(SQLBuilder.map_value(search.exclude_export_to))
return b
class SQLBuilder(object):
"""
Helper class to make it easier to build large, potentially complex, SQL clauses.
This class contains various methods to allow SQL queries to be built without having to manage enormous strings of
SQL. It includes facilities to add metadata constraints. Also helps simplify the discovery and
debugging of issues with generated queries as we can pull out the query strings directly from this object.
"""
def __init__(self, tables, where_clauses=None):
"""
Construct a new, empty, SQLBuilder
:param where_clauses:
Optionally specify an initial array of WHERE clauses, defaults to an empty sequence. Clauses specified here
must not include the string 'WHERE', but should be e.g. ['e.statusID = s.internalID']
:param tables:
A SQL fragment defining the tables used by this SQLBuilder, e.g. 't_file f'
:ivar where_clauses:
A list of strings of SQL, which will be prefixed by 'WHERE' to construct a constraint. As with the init
parameter these will not include the 'WHERE' itself.
:return:
An unpopulated SQLBuilder, including any initial where clauses.
"""
self.tables = tables
self.sql_args = []
if where_clauses is None:
self.where_clauses = []
self.where_clauses = where_clauses
@staticmethod
def map_value(value):
"""
Perform type translation of values to be inserted into SQL queries based on their types.
:param value:
The value to map
:return:
The mapped value.
"""
if value is None:
return None
else:
return value
def add_sql(self, value, clause):
"""
Add a WHERE clause to the state.
:param value:
The unknown to bind into the state. Uses SQLBuilder._map_value() to map this into an appropriate database
compatible type.
:param clause:
A SQL fragment defining the restriction on the unknown value
"""
if value is not None:
self.sql_args.append(SQLBuilder.map_value(value))
self.where_clauses.append(clause)
def add_set_membership(self, values, column_name):
"""
Append a set membership test, creating a query of the form 'WHERE name IN (?,?...?)'.
:param values:
A list of values, or a subclass of basestring. If this is non-None and non-empty this will add a set
membership test to the state. If the supplied value is a basestring it will be wrapped in a single element
list. Values are mapped by SQLBuilder._map_value before being added, so e.g. NSString instances will work
here.
:param column_name:
The name of the column to use when checking the 'IN' condition.
"""
if values is not None and len(values) > 0:
if isinstance(values, basestring):
values = [values]
question_marks = ', '.join(["%s"] * len(values))
self.where_clauses.append('{0} IN ({1})'.format(column_name, question_marks))
for value in values:
self.sql_args.append(SQLBuilder.map_value(value))
def add_metadata_query_properties(self, meta_constraints, id_table, id_column):
"""
Construct WHERE clauses from a list of MetaConstraint objects, adding them to the query state.
:param meta_constraints:
A list of MetaConstraint objects, each of which defines a condition over metadata which must be satisfied
for results to be included in the overall query.
:raises:
ValueError if an unknown meta constraint type is encountered.
"""
for mc in meta_constraints:
meta_key = str(mc.key)
ct = mc.constraint_type
sql_template = """
{0}.uid IN (
SELECT m.{1} FROM archive_metadata m
INNER JOIN archive_metadataFields k ON m.fieldId=k.uid
WHERE m.{2} {3} %s AND k.metaKey = %s
)"""
# Add metadata value to list of SQL arguments
self.sql_args.append(SQLBuilder.map_value(mc.value))
# Add metadata key to list of SQL arguments
self.sql_args.append(meta_key)
# Put an appropriate WHERE clause
if ct == 'less':
self.where_clauses.append(sql_template.format(id_table, id_column, 'floatValue', '<='))
elif ct == 'greater':
self.where_clauses.append(sql_template.format(id_table, id_column, 'floatValue', '>='))
elif ct == 'number_equals':
self.where_clauses.append(sql_template.format(id_table, id_column, 'floatValue', '='))
elif ct == 'string_equals':
self.where_clauses.append(sql_template.format(id_table, id_column, 'stringValue', '='))
else:
raise ValueError("Unknown meta constraint type!")
def get_select_sql(self, columns, order=None, limit=0, skip=0):
"""
Build a SELECT query based on the current state of the builder.
:param columns:
SQL fragment describing which columns to select i.e. 'e.obstoryID, s.statusID'
:param order:
Optional ordering constraint, i.e. 'e.eventTime DESC'
:param limit:
Optional, used to build the 'LIMIT n' clause. If not specified no limit is imposed.
:param skip:
Optional, used to build the 'OFFSET n' clause. If not specified results are returned from the first item
available. Note that this parameter must be combined with 'order', otherwise there's no ordering imposed
on the results and subsequent queries may return overlapping data randomly. It's unlikely that this will
actually happen as almost all databases do in fact create an internal ordering, but there's no guarantee
of this (and some operations such as indexing will definitely break this property unless explicitly set).
:returns:
A SQL SELECT query, which will make use of self.sql_args when executed.
"""
sql = 'SELECT '
sql += '{0} FROM {1} '.format(columns, self.tables)
if len(self.where_clauses) > 0:
sql += ' WHERE '
sql += ' AND '.join(self.where_clauses)
if order is not None:
sql += ' ORDER BY {0}'.format(order)
if limit > 0:
sql += ' LIMIT {0} '.format(limit)
if skip > 0:
sql += ' OFFSET {0} '.format(skip)
return sql
def get_count_sql(self):
"""
Build a SELECT query which returns the count of items for an unlimited SELECT
:return:
A SQL SELECT query which returns the count of items for an unlimited query based on this SQLBuilder
"""
sql = 'SELECT COUNT(*) FROM ' + self.tables
if len(self.where_clauses) > 0:
sql += ' WHERE '
sql += ' AND '.join(self.where_clauses)
return sql
| gpl-3.0 |
SMTorg/smt | smt/applications/mfk.py | 1 | 27540 | # -*- coding: utf-8 -*-
"""
Created on Fri May 04 10:26:49 2018
@author: Mostafa Meliani <melimostafa@gmail.com>
Multi-Fidelity co-Kriging: recursive formulation with autoregressive model of
order 1 (AR1)
Adapted on January 2021 by Andres Lopez-Lopera to the new SMT version
"""
from copy import deepcopy
import numpy as np
from scipy.linalg import solve_triangular
from scipy import linalg
from scipy.spatial.distance import cdist
from packaging import version
from sklearn import __version__ as sklversion
if version.parse(sklversion) < version.parse("0.22"):
from sklearn.cross_decomposition.pls_ import PLSRegression as pls
else:
from sklearn.cross_decomposition import PLSRegression as pls
from smt.surrogate_models.krg_based import KrgBased
from smt.sampling_methods import LHS
from smt.utils.kriging_utils import (
cross_distances,
componentwise_distance,
standardization,
differences,
)
class NestedLHS(object):
def __init__(self, nlevel, xlimits, random_state=None):
"""
Constructor where values of options can be passed in.
Parameters
----------
nlevel : integer.
The number of design of experiments to be built
xlimits : ndarray
The interval of the domain in each dimension with shape (nx, 2)
random_state : Numpy RandomState object or seed number which controls random draws
"""
self.nlevel = nlevel
self.xlimits = xlimits
self.random_state = random_state
def __call__(self, nb_samples_hifi):
"""
Builds nlevel nested design of experiments of dimension dim and size n_samples.
Each doe sis built with the optmized lhs procedure.
Builds the highest level first; nested properties are ensured by deleting
the nearest neighbours in lower levels of fidelity.
Parameters
----------
nb_samples_hifi: The number of samples of the highest fidelity model.
nb_samples_fi(n-1) = 2 * nb_samples_fi(n)
Returns
------
list of length nlevel of design of experiemnts from low to high fidelity level.
"""
nt = []
for i in range(self.nlevel, 0, -1):
nt.append(pow(2, i - 1) * nb_samples_hifi)
if len(nt) != self.nlevel:
raise ValueError("nt must be a list of nlevel elements")
if np.allclose(np.sort(nt)[::-1], nt) == False:
raise ValueError("nt must be a list of decreasing integers")
doe = []
p0 = LHS(xlimits=self.xlimits, criterion="ese", random_state=self.random_state)
doe.append(p0(nt[0]))
for i in range(1, self.nlevel):
p = LHS(
xlimits=self.xlimits, criterion="ese", random_state=self.random_state
)
doe.append(p(nt[i]))
for i in range(1, self.nlevel)[::-1]:
ind = []
d = cdist(doe[i], doe[i - 1], "euclidean")
for j in range(doe[i].shape[0]):
dj = np.sort(d[j, :])
k = dj[0]
l = (np.where(d[j, :] == k))[0][0]
m = 0
while l in ind:
m = m + 1
k = dj[m]
l = (np.where(d[j, :] == k))[0][0]
ind.append(l)
doe[i - 1] = np.delete(doe[i - 1], ind, axis=0)
doe[i - 1] = np.vstack((doe[i - 1], doe[i]))
return doe
class MFK(KrgBased):
def _initialize(self):
super(MFK, self)._initialize()
declare = self.options.declare
declare(
"rho_regr",
"constant",
values=("constant", "linear", "quadratic"),
desc="Regression function type for rho",
)
declare(
"optim_var",
False,
types=bool,
values=(True, False),
desc="If True, the variance at HF samples is forced to zero",
)
declare(
"propagate_uncertainty",
True,
types=bool,
values=(True, False),
desc="If True, the variance cotribution of lower fidelity levels are considered",
)
self.name = "MFK"
def _differences(self, X, Y):
"""
Compute the distances
"""
return differences(X, Y)
def _check_list_structure(self, X, y):
"""
checks if the data structure is compatible with MFK.
sets class attributes such as (number of levels of Fidelity, training points in each level, ...)
Arguments :
X : list of arrays, each array corresponds to a fidelity level. starts from lowest to highest
y : same as X
"""
if type(X) is not list:
nlevel = 1
X = [X]
else:
nlevel = len(X)
if type(y) is not list:
y = [y]
if len(X) != len(y):
raise ValueError("X and y must have the same length.")
n_samples = np.zeros(nlevel, dtype=int)
n_features = np.zeros(nlevel, dtype=int)
n_samples_y = np.zeros(nlevel, dtype=int)
for i in range(nlevel):
n_samples[i], n_features[i] = X[i].shape
if i > 1 and n_features[i] != n_features[i - 1]:
raise ValueError("All X must have the same number of columns.")
y[i] = np.asarray(y[i]).ravel()[:, np.newaxis]
n_samples_y[i] = y[i].shape[0]
if n_samples[i] != n_samples_y[i]:
raise ValueError("X and y must have the same number of rows.")
self.nx = n_features[0]
self.nt_all = n_samples
self.nlvl = nlevel
self.ny = y[0].shape[1]
self.X = X[:]
self.y = y[:]
def _new_train(self):
"""
Overrides KrgBased implementation
Trains the Multi-Fidelity model
"""
self._new_train_init()
theta0 = self.options["theta0"].copy()
noise0 = self.options["noise0"].copy()
for lvl in range(self.nlvl):
self._new_train_iteration(lvl)
self.options["theta0"] = theta0
self.options["noise0"] = noise0
self._new_train_finalize(lvl)
def _new_train_init(self):
if self.name in ["MFKPLS", "MFKPLSK"]:
_pls = pls(self.options["n_comp"])
# As of sklearn 0.24.1 PLS with zeroed outputs raises an exception while sklearn 0.23 returns zeroed x_rotations
# For now the try/except below is a workaround to restore the 0.23 behaviour
try:
# PLS is done on the highest fidelity identified by the key None
self.m_pls = _pls.fit(
self.training_points[None][0][0].copy(),
self.training_points[None][0][1].copy(),
)
self.coeff_pls = self.m_pls.x_rotations_
except StopIteration:
self.coeff_pls = np.zeros(
self.training_points[None][0][0].shape[1], self.options["n_comp"]
)
xt = []
yt = []
i = 0
while self.training_points.get(i, None) is not None:
xt.append(self.training_points[i][0][0])
yt.append(self.training_points[i][0][1])
i = i + 1
xt.append(self.training_points[None][0][0])
yt.append(self.training_points[None][0][1])
self._check_list_structure(xt, yt)
self._check_param()
X = self.X
y = self.y
_, _, self.X_offset, self.y_mean, self.X_scale, self.y_std = standardization(
np.concatenate(xt, axis=0), np.concatenate(yt, axis=0)
)
nlevel = self.nlvl
# initialize lists
self.optimal_noise_all = nlevel * [0]
self.D_all = nlevel * [0]
self.F_all = nlevel * [0]
self.p_all = nlevel * [0]
self.q_all = nlevel * [0]
self.optimal_rlf_value = nlevel * [0]
self.optimal_par = nlevel * [{}]
self.optimal_theta = nlevel * [0]
self.X_norma_all = [(x - self.X_offset) / self.X_scale for x in X]
self.y_norma_all = [(f - self.y_mean) / self.y_std for f in y]
def _new_train_iteration(self, lvl):
n_samples = self.nt_all
self.options["noise0"] = np.array([self.options["noise0"][lvl]]).flatten()
self.options["theta0"] = self.options["theta0"][lvl, :]
self.X_norma = self.X_norma_all[lvl]
self.y_norma = self.y_norma_all[lvl]
if self.options["eval_noise"]:
if self.options["use_het_noise"]:
# hetGP works with unique design variables
(
self.X_norma,
self.index_unique, # do we need to store it?
self.nt_reps, # do we need to store it?
) = np.unique(
self.X_norma, return_inverse=True, return_counts=True, axis=0
)
self.nt_all[lvl] = self.X_norma.shape[0]
# computing the mean of the output per unique design variable (see Binois et al., 2018)
y_norma_unique = []
for i in range(self.nt_all[lvl]):
y_norma_unique.append(np.mean(self.y_norma[self.index_unique == i]))
y_norma_unique = np.array(y_norma_unique).reshape(-1, 1)
# pointwise sensible estimates of the noise variances (see Ankenman et al., 2010)
self.optimal_noise = self.options["noise0"] * np.ones(self.nt_all[lvl])
for i in range(self.nt_all[lvl]):
diff = self.y_norma[self.index_unique == i] - y_norma_unique[i]
if np.sum(diff ** 2) != 0.0:
self.optimal_noise[i] = np.std(diff, ddof=1) ** 2
self.optimal_noise = self.optimal_noise / self.nt_reps
self.optimal_noise_all[lvl] = self.optimal_noise
self.y_norma = y_norma_unique
self.X_norma_all[lvl] = self.X_norma
self.y_norma_all[lvl] = self.y_norma
else:
self.optimal_noise = self.options["noise0"] / self.y_std ** 2
self.optimal_noise_all[lvl] = self.optimal_noise
# Calculate matrix of distances D between samples
self.D_all[lvl] = cross_distances(self.X_norma)
# Regression matrix and parameters
self.F_all[lvl] = self._regression_types[self.options["poly"]](self.X_norma)
self.p_all[lvl] = self.F_all[lvl].shape[1]
# Concatenate the autoregressive part for levels > 0
if lvl > 0:
F_rho = self._regression_types[self.options["rho_regr"]](self.X_norma)
self.q_all[lvl] = F_rho.shape[1]
self.F_all[lvl] = np.hstack(
(
F_rho
* np.dot(
self._predict_intermediate_values(
self.X_norma, lvl, descale=False
),
np.ones((1, self.q_all[lvl])),
),
self.F_all[lvl],
)
)
else:
self.q_all[lvl] = 0
n_samples_F_i = self.F_all[lvl].shape[0]
if n_samples_F_i != n_samples[lvl]:
raise Exception(
"Number of rows in F and X do not match. Most "
"likely something is going wrong with the "
"regression model."
)
if int(self.p_all[lvl] + self.q_all[lvl]) >= n_samples_F_i:
raise Exception(
(
"Ordinary least squares problem is undetermined "
"n_samples=%d must be greater than the regression"
" model size p+q=%d."
)
% (n_samples_F_i, self.p_all[lvl] + self.q_all[lvl])
)
# Determine Gaussian Process model parameters
self.F = self.F_all[lvl]
D, self.ij = self.D_all[lvl]
self._lvl = lvl
self.nt = self.nt_all[lvl]
self.q = self.q_all[lvl]
self.p = self.p_all[lvl]
(
self.optimal_rlf_value[lvl],
self.optimal_par[lvl],
self.optimal_theta[lvl],
) = self._optimize_hyperparam(D)
if self.options["eval_noise"] and not self.options["use_het_noise"]:
tmp_list = self.optimal_theta[lvl]
self.optimal_theta[lvl] = tmp_list[:-1]
self.optimal_noise = tmp_list[-1]
self.optimal_noise_all[lvl] = self.optimal_noise
del self.y_norma, self.D, self.optimal_noise
def _new_train_finalize(self, lvl):
if self.options["eval_noise"] and self.options["optim_var"]:
X = self.X
for lvl in range(self.nlvl - 1):
self.set_training_values(
X[lvl], self._predict_intermediate_values(X[lvl], lvl + 1), name=lvl
)
self.set_training_values(
X[-1], self._predict_intermediate_values(X[-1], self.nlvl)
)
self.options["eval_noise"] = False
self._new_train()
def _componentwise_distance(self, dx, opt=0):
d = componentwise_distance(dx, self.options["corr"], self.nx)
return d
def _predict_intermediate_values(self, X, lvl, descale=True):
"""
Evaluates the model at a set of points.
Used for training the model at level lvl.
Allows to relax the order problem.
Arguments
---------
x : np.ndarray [n_evals, dim]
Evaluation point input variable values
lvl : level at which the prediction is made
Returns
-------
y : np.ndarray
Evaluation point output variable values
"""
n_eval, _ = X.shape
# if n_features_X != self.n_features:
# raise ValueError("Design must be an array of n_features columns.")
# Calculate kriging mean and variance at level 0
mu = np.zeros((n_eval, lvl))
if descale:
X = (X - self.X_offset) / self.X_scale
f = self._regression_types[self.options["poly"]](X)
f0 = self._regression_types[self.options["poly"]](X)
dx = self._differences(X, Y=self.X_norma_all[0])
d = self._componentwise_distance(dx)
beta = self.optimal_par[0]["beta"]
r_ = self._correlation_types[self.options["corr"]](
self.optimal_theta[0], d
).reshape(n_eval, self.nt_all[0])
gamma = self.optimal_par[0]["gamma"]
# Scaled predictor
mu[:, 0] = (np.dot(f, beta) + np.dot(r_, gamma)).ravel()
# Calculate recursively kriging mean and variance at level i
for i in range(1, lvl):
g = self._regression_types[self.options["rho_regr"]](X)
dx = self._differences(X, Y=self.X_norma_all[i])
d = self._componentwise_distance(dx)
r_ = self._correlation_types[self.options["corr"]](
self.optimal_theta[i], d
).reshape(n_eval, self.nt_all[i])
f = np.vstack((g.T * mu[:, i - 1], f0.T))
beta = self.optimal_par[i]["beta"]
gamma = self.optimal_par[i]["gamma"]
# scaled predictor
mu[:, i] = (np.dot(f.T, beta) + np.dot(r_, gamma)).ravel()
# scaled predictor
if descale:
mu = mu * self.y_std + self.y_mean
return mu[:, -1].reshape((n_eval, 1))
def _predict_values(self, X):
"""
Evaluates the model at a set of points.
Arguments
---------
x : np.ndarray [n_evals, dim]
Evaluation point input variable values
Returns
-------
y : np.ndarray
Evaluation point output variable values
"""
return self._predict_intermediate_values(X, self.nlvl)
def _predict_variances(self, X):
"""
Evaluates the model at a set of points.
Arguments
---------
x : np.ndarray [n_evals, dim]
Evaluation point input variable values
Returns
-------
y : np.ndarray
Evaluation point output variable values
"""
return self.predict_variances_all_levels(X)[0][:, -1]
def predict_variances_all_levels(self, X):
"""
Evaluates the model at a set of points.
Arguments
---------
x : np.ndarray [n_evals, dim]
Evaluation point input variable values
Returns
-------
y : np.ndarray
Evaluation point output variable values
"""
# Initialization X = atleast_2d(X)
nlevel = self.nlvl
sigma2_rhos = []
n_eval, n_features_X = X.shape
# if n_features_X != self.n_features:
# raise ValueError("Design must be an array of n_features columns.")
X = (X - self.X_offset) / self.X_scale
# Calculate kriging mean and variance at level 0
mu = np.zeros((n_eval, nlevel))
f = self._regression_types[self.options["poly"]](X)
f0 = self._regression_types[self.options["poly"]](X)
dx = self._differences(X, Y=self.X_norma_all[0])
d = self._componentwise_distance(dx)
# Get regression function and correlation
F = self.F_all[0]
C = self.optimal_par[0]["C"]
beta = self.optimal_par[0]["beta"]
Ft = solve_triangular(C, F, lower=True)
# yt = solve_triangular(C, self.y_norma_all[0], lower=True)
r_ = self._correlation_types[self.options["corr"]](
self.optimal_theta[0], d
).reshape(n_eval, self.nt_all[0])
gamma = self.optimal_par[0]["gamma"]
# Scaled predictor
mu[:, 0] = (np.dot(f, beta) + np.dot(r_, gamma)).ravel()
self.sigma2_rho = nlevel * [None]
MSE = np.zeros((n_eval, nlevel))
r_t = solve_triangular(C, r_.T, lower=True)
G = self.optimal_par[0]["G"]
u_ = solve_triangular(G.T, f.T - np.dot(Ft.T, r_t), lower=True)
sigma2 = self.optimal_par[0]["sigma2"] / self.y_std ** 2
MSE[:, 0] = sigma2 * (
# 1 + self.optimal_noise_all[0] - (r_t ** 2).sum(axis=0) + (u_ ** 2).sum(axis=0)
1
- (r_t ** 2).sum(axis=0)
+ (u_ ** 2).sum(axis=0)
)
# Calculate recursively kriging variance at level i
for i in range(1, nlevel):
F = self.F_all[i]
C = self.optimal_par[i]["C"]
g = self._regression_types[self.options["rho_regr"]](X)
dx = self._differences(X, Y=self.X_norma_all[i])
d = self._componentwise_distance(dx)
r_ = self._correlation_types[self.options["corr"]](
self.optimal_theta[i], d
).reshape(n_eval, self.nt_all[i])
f = np.vstack((g.T * mu[:, i - 1], f0.T))
Ft = solve_triangular(C, F, lower=True)
yt = solve_triangular(C, self.y_norma_all[i], lower=True)
r_t = solve_triangular(C, r_.T, lower=True)
G = self.optimal_par[i]["G"]
beta = self.optimal_par[i]["beta"]
# scaled predictor
sigma2 = self.optimal_par[i]["sigma2"] / self.y_std ** 2
q = self.q_all[i]
u_ = solve_triangular(G.T, f - np.dot(Ft.T, r_t), lower=True)
sigma2_rho = np.dot(
g,
sigma2 * linalg.inv(np.dot(G.T, G))[:q, :q]
+ np.dot(beta[:q], beta[:q].T),
)
sigma2_rho = (sigma2_rho * g).sum(axis=1)
sigma2_rhos.append(sigma2_rho)
if self.name in ["MFKPLS", "MFKPLSK"]:
p = self.p_all[i]
Q_ = (np.dot((yt - np.dot(Ft, beta)).T, yt - np.dot(Ft, beta)))[0, 0]
MSE[:, i] = (
# sigma2_rho * MSE[:, i - 1]
+Q_ / (2 * (self.nt_all[i] - p - q))
# * (1 + self.optimal_noise_all[i] - (r_t ** 2).sum(axis=0))
* (1 - (r_t ** 2).sum(axis=0))
+ sigma2 * (u_ ** 2).sum(axis=0)
)
else:
MSE[:, i] = sigma2 * (
# 1 + self.optimal_noise_all[i] - (r_t ** 2).sum(axis=0) + (u_ ** 2).sum(axis=0)
1
- (r_t ** 2).sum(axis=0)
+ (u_ ** 2).sum(axis=0)
) # + sigma2_rho * MSE[:, i - 1]
if self.options["propagate_uncertainty"]:
MSE[:, i] = MSE[:, i] + sigma2_rho * MSE[:, i - 1]
# scaled predictor
MSE *= self.y_std ** 2
return MSE, sigma2_rhos
def _predict_derivatives(self, x, kx):
"""
Evaluates the derivatives at a set of points.
Arguments
---------
x : np.ndarray [n_evals, dim]
Evaluation point input variable values
kx : int
The 0-based index of the input variable with respect to which derivatives are desired.
Returns
-------
y : np.ndarray*self.y_std/self.X_scale[kx])
Derivative values.
"""
lvl = self.nlvl
# Initialization
n_eval, n_features_x = x.shape
x = (x - self.X_offset) / self.X_scale
dy_dx = np.zeros((n_eval, lvl))
if self.options["corr"] != "squar_exp":
raise ValueError(
"The derivative is only available for square exponential kernel"
)
if self.options["poly"] == "constant":
df = np.zeros([n_eval, 1])
elif self.options["poly"] == "linear":
df = np.zeros((n_eval, self.nx + 1))
df[:, 1:] = 1
else:
raise ValueError(
"The derivative is only available for ordinary kriging or "
+ "universal kriging using a linear trend"
)
df0 = deepcopy(df)
if self.options["rho_regr"] != "constant":
raise ValueError(
"The derivative is only available for regression rho constant"
)
# Get pairwise componentwise L1-distances to the input training set
dx = self._differences(x, Y=self.X_norma_all[0])
d = self._componentwise_distance(dx)
# Compute the correlation function
r_ = self._correlation_types[self.options["corr"]](
self.optimal_theta[0], d
).reshape(n_eval, self.nt_all[0])
# Beta and gamma = R^-1(y-FBeta)
beta = self.optimal_par[0]["beta"]
gamma = self.optimal_par[0]["gamma"]
df_dx = np.dot(df, beta)
d_dx = x[:, kx].reshape((n_eval, 1)) - self.X_norma_all[0][:, kx].reshape(
(1, self.nt_all[0])
)
theta = self._get_theta(0)
dy_dx[:, 0] = np.ravel((df_dx - 2 * theta[kx] * np.dot(d_dx * r_, gamma)))
# Calculate recursively derivative at level i
for i in range(1, lvl):
g = self._regression_types[self.options["rho_regr"]](x)
dx = self._differences(x, Y=self.X_norma_all[i])
d = self._componentwise_distance(dx)
r_ = self._correlation_types[self.options["corr"]](
self.optimal_theta[i], d
).reshape(n_eval, self.nt_all[i])
df = np.vstack((g.T * dy_dx[:, i - 1], df0.T))
beta = self.optimal_par[i]["beta"]
gamma = self.optimal_par[i]["gamma"]
df_dx = np.dot(df.T, beta)
d_dx = x[:, kx].reshape((n_eval, 1)) - self.X_norma_all[i][:, kx].reshape(
(1, self.nt_all[i])
)
theta = self._get_theta(i)
# scaled predictor
dy_dx[:, i] = np.ravel(df_dx - 2 * theta[kx] * np.dot(d_dx * r_, gamma))
return dy_dx[:, -1] * self.y_std / self.X_scale[kx]
def _get_theta(self, i):
return self.optimal_theta[i]
def _check_param(self):
"""
Overrides KrgBased implementation
This function checks some parameters of the model.
"""
if self.name in ["MFKPLS", "MFKPLSK"]:
d = self.options["n_comp"]
else:
d = self.nx
if self.options["corr"] == "act_exp":
raise ValueError("act_exp correlation function must be used with MGP")
if self.name in ["MFKPLS"]:
if self.options["corr"] not in ["squar_exp", "abs_exp"]:
raise ValueError(
"MFKPLS only works with a squared exponential or an absolute exponential kernel"
)
elif self.name in ["MFKPLSK"]:
if self.options["corr"] not in ["squar_exp"]:
raise ValueError(
"MFKPLSK only works with a squared exponential kernel (until we prove the contrary)"
)
if isinstance(self.options["theta0"], np.ndarray):
if self.options["theta0"].shape != (self.nlvl, d):
raise ValueError(
"the dimensions of theta0 %s should coincide to the number of dim %s"
% (self.options["theta0"].shape, (self.nlvl, d))
)
else:
if len(self.options["theta0"]) != d:
if len(self.options["theta0"]) == 1:
self.options["theta0"] *= np.ones((self.nlvl, d))
elif len(self.options["theta0"]) == self.nlvl:
self.options["theta0"] = np.array(self.options["theta0"]).reshape(
-1, 1
)
self.options["theta0"] *= np.ones((1, d))
else:
raise ValueError(
"the length of theta0 (%s) should be equal to the number of dim (%s) or levels of fidelity (%s)."
% (len(self.options["theta0"]), d, self.nlvl)
)
else:
self.options["theta0"] *= np.ones((self.nlvl, 1))
if len(self.options["noise0"]) != self.nlvl:
if len(self.options["noise0"]) == 1:
self.options["noise0"] = self.nlvl * [self.options["noise0"]]
else:
raise ValueError(
"the length of noise0 (%s) should be equal to the number of levels of fidelity (%s)."
% (len(self.options["noise0"]), self.nlvl)
)
for i in range(self.nlvl):
if self.options["use_het_noise"]:
if len(self.X[i]) == len(np.unique(self.X[i])):
if len(self.options["noise0"][i]) != self.nt_all[i]:
if len(self.options["noise0"][i]) == 1:
self.options["noise0"][i] *= np.ones(self.nt_all[i])
else:
raise ValueError(
"for the level of fidelity %s, the length of noise0 (%s) should be equal to the number of observations (%s)."
% (i, len(self.options["noise0"][i]), self.nt_all[i])
)
else:
if len(self.options["noise0"][i]) != 1:
raise ValueError(
"for the level of fidelity %s, the length of noise0 (%s) should be equal to one."
% (i, len(self.options["noise0"][i]))
)
| bsd-3-clause |
danalec/dotfiles | sublime/.config/sublime-text-3/Packages/anaconda_go/plugin/handlers_go/anagonda/context/motion.py | 2 | 3351 |
# Copyright (C) 2013 - 2016 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import sys
import json
import shlex
from subprocess import PIPE
from process import spawn
from .error import AnaGondaError
from .base import AnaGondaContext
_go_get = 'github.com/fatih/motion'
class MotionError(AnaGondaError):
"""Fires on Motion errors
"""
class Motion(AnaGondaContext):
"""Context to run motion tool into anaconda_go
"""
def __init__(self, fp, dp, offset, mode, include, pc, env_ctx):
self.file_path = fp
self.dir_path = dp
self._offset = offset
self.mode = mode
self._include = include
self._parse_comments = pc
super(Motion, self).__init__(env_ctx, _go_get)
def __enter__(self):
"""Check binary existence and perform command
"""
super(Motion, self).__enter__()
if not self._bin_found:
raise MotionError('{0} not found...'.format(self.binary))
return self.motion()
@property
def scope(self):
"""Determine the motion scope infering from arguments passed to
"""
if self.file_path is None and self.dir_path is not None:
return '-dir'
if self.file_path is not None and self.dir_path is None:
return '-file'
if self.file_path is not None and self.dir_path is not None:
if self.mode == 'decls':
return '-dir'
return '-file'
@property
def path(self):
"""Return the right path based in the scope
"""
return {
'-dir': self.dir_path, '-file': self.file_path
}.get(self.scope)
@property
def offset(self):
"""Return the offset always that -file scope is in use
"""
offset = {'-file': self._offset, '-dir': ''}.get(self.scope)
if offset is not None and offset != '':
offset = '-offset {0}'.format(offset)
return offset if offset is not None else ''
@property
def parse_comments(self):
"""If parse comments is active add it to the command
"""
return {True: '-parse-comments'}.get(self._parse_comments, '')
@property
def include(self):
"""If include is set return the whole syntax
"""
return '-include {0}'.format(self._include) \
if self._include is not None else ''
def motion(self):
"""Run the motion command and return back json object with the results
"""
args = shlex.split('"{0}" {1} \'{2}\' {3} -mode {4} {5}{6}{7}'.format(
self.binary, self.scope, self.path,
self.offset, self.mode, self.include, self.parse_comments,
' -shift 1' if self.mode == 'prev' else ''
))
motion = spawn(args, stdout=PIPE, stderr=PIPE, env=self.env)
out, err = motion.communicate()
if err is not None and len(err) > 0:
if sys.version_info >= (3,):
err = err.decode('utf8')
raise MotionError(err)
if sys.version_info >= (3,):
out = out.decode('utf8')
return json.loads(out)
@property
def binary(self):
"""Return back the binary path
"""
return self.get_binary('motion')
| mit |
MSusik/invenio | invenio/modules/formatter/format_elements/bfe_meta_files.py | 3 | 2954 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element - return the files of a record"""
from invenio.config import CFG_WEBSEARCH_ENABLE_GOOGLESCHOLAR
from invenio.modules.formatter.format_elements.bfe_fulltext import get_files
from invenio.legacy.bibdocfile.api import BibRecDocs, decompose_bibdocfile_url
def format_element(bfo, file_format='pdf'):
"""Return the files attached to this record, in order to be
embedded as a Google Scholar tag.
@param file_format: the format to include in this output
"""
if not CFG_WEBSEARCH_ENABLE_GOOGLESCHOLAR:
return ""
bibarchive = BibRecDocs(bfo.recID)
(files, old_versions_p, additionals_p) = get_files(bfo)
filtered_files = []
if files.has_key('main_urls') and \
files['main_urls'].has_key('Main'):
filtered_files = [f[0] for f in files['main_urls']['Main'] if f[2] == file_format and \
not url_is_hidden(f[0], bibarchive)]
if not filtered_files:
# Fall back to other doctypes
if files.has_key('main_urls'):
for doctype, list_of_files in files['main_urls'].iteritems():
filtered_files.extend([f[0] for f in list_of_files if f[2] == file_format and \
not url_is_hidden(f[0], bibarchive)])
if not filtered_files:
# Fall back to external urls
if files.has_key('others_urls'):
filtered_files.extend([file_url for file_url, file_name in files['others_urls'] \
if file_url.endswith('.' + file_format)])
tags = ['<meta name="citation_pdf_url" content="%s" />' % url for url in filtered_files]
return "\n".join(tags)
def url_is_hidden(url, bibarchive):
"""
Return True if the given URL should be hidden according to given
BibRecDocs structure.
"""
try:
(recid, docname, docformat) = decompose_bibdocfile_url(url)
return bibarchive.get_bibdoc(docname).hidden_p(docformat)
except:
return False
return False
def escape_values(bfo):
"""
Called by BibFormat in order to check if output of this element
should be escaped.
"""
return 0
| gpl-2.0 |
EricRahm/log-spam-hell | logspam/bisect.py | 1 | 10157 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from collections import Counter
from logspam import WARNING_RE
from logspam.cli import BaseCommandLineArgs
from logspam.logs import retrieve_test_logs
from mozregression.bisector import (
Bisector, Bisection, NightlyHandler, IntegrationHandler)
from mozregression.dates import parse_date
from mozregression.errors import DateFormatError
from mozregression.fetch_build_info import IntegrationInfoFetcher
from mozregression.fetch_configs import create_config
from mozregression.json_pushes import JsonPushes
from mozregression.log import init_logger
from mozregression.test_runner import TestRunner
import re
class WarningBisector(object):
def __init__(self, good, bad, platform, warning,
warning_limit, warning_re, ignore_lines,
required_test):
init_logger()
self.use_nightly = True
try:
self.good = parse_date(good)
self.bad = parse_date(bad)
except DateFormatError:
# This hopefully a revision range. We can bypass nightly and
# go directly to InboundHandler. That itself is a bit of a misnomer,
# it will still bisect m-c builds, but by changeset range, not date
# range.
self.use_nightly = False
self.good = good
self.bad = bad
self.ignore_lines = ignore_lines
self.test_runner = WarningTestRunner(
warning, platform,
ignore_lines=ignore_lines,
warning_re=warning_re,
warning_limit=warning_limit,
required_test=required_test)
# Convert the platform to a mozregression friendly version.
# Also avoid overwriting the os module by *not* using |os| for a
# variable name.
(_os, bits) = re.match(r'([a-zA-Z]+)-?([0-9]+)?', platform).groups()
if not bits or bits not in (32, 64):
bits = 32
# windows7-32
# windows7-32-vm
# win32
# win64
if '64' in platform:
bits = 64
if _os.startswith('win'):
_os = 'win'
print("_os = %s bits = %s" % (_os, bits))
# TODO(ER): We might be able to ditch this.
self.fetch_config = create_config('firefox', _os, int(bits))
# Hardcode to m-c for now.
self.fetch_config.set_repo('mozilla-central')
self.fetch_config.set_build_type('debug')
class FakeDownloadManager:
def focus_download(self, foo):
pass
dm = FakeDownloadManager()
self.bisector = Bisector(self.fetch_config, self.test_runner, dm, False, None)
def bisect(self):
if self.use_nightly:
result = self.bisect_nightly()
else:
result = self.bisect_inbound(self.good, self.bad)
(good, bad) = result
if self.test_runner.check_for_move(self.fetch_config.repo, good):
print("You should probably try bisecting again from the good revision")
print("Done bisecting I guess")
return result
def bisect_nightly(self):
handler = NightlyHandler(ensure_good_and_bad=True)
result = self.bisector.bisect(handler, self.good, self.bad)
if result == Bisection.FINISHED:
print("Got as far as we can go bisecting nightlies...")
handler.print_range()
print("Switching bisection method to taskcluster")
result = self.bisect_inbound(handler.good_revision, handler.bad_revision)
else:
# TODO(ER): maybe this should be an exception...
result = (None, None)
return result
def bisect_inbound(self, good_rev, bad_rev):
# Remember, InboundHandler is just a changeset based bisector. It will
# still potentially bisect m-c first.
handler = InboundHandler()
result = self.bisector.bisect(handler, good_rev, bad_rev, expand=0)
if result == Bisection.FINISHED:
print("No more m-c revisions :(")
handler.print_range()
# Try switching over to the integration branch.
if len(handler.build_range) == 2:
result = handler.handle_merge()
if result:
branch, good_rev, bad_rev = result
self.fetch_config.set_repo(branch)
return self.bisect_inbound(good_rev, bad_rev)
return (handler.good_revision, handler.bad_revision)
class BisectCommandLineArgs(BaseCommandLineArgs):
@staticmethod
def do_bisect(args):
print("do_bisect called")
print(args)
bisector = WarningBisector(args.good, args.bad, args.platform,
args.warning, args.warning_limit,
args.warning_re, args.ignore_lines,
args.required_test)
# TODO(ER): Get the pushlog for bad, check for the file the warning is
# in in the changeset.
(good, bad) = bisector.bisect()
def add_command(self, p):
parser = p.add_parser('bisect',
help='Attempts to find the changeset that introduced a given '
'warning through bisection.')
self.add_arguments(parser)
parser.set_defaults(func=BisectCommandLineArgs.do_bisect)
def add_arguments(self, p):
# TODO(ER): add a date/revision parser
p.add_argument('good', action='store', default=None,
help='Last known good date. Will be validated.')
p.add_argument('bad', action='store', default=None,
help='Last known bad date.')
p.add_argument('warning', nargs='?',
help='The text of a warning you want the full details of.')
super(BisectCommandLineArgs, self).add_arguments(p)
p.add_argument('--ignore-lines', action='store_true', default=False,
help='Ignore line numbers when bisecting warnings. Useful if' \
' the line number of the warning has changed. Not so ' \
'useful if there are a lot of similar warnings in the ' \
'file.')
p.add_argument('--warning-limit', action='store', type=int, default=1000,
help='The threshold of warnings for going from good to ' \
'bad. Default: 1000.')
p.add_argument('--required-test', action='store', default=None,
help='Test that must be present to compare revisions')
class WarningTestRunner(TestRunner):
"""
TestRunner to use in conjunction with bisection.
"""
def __init__(self, warning, platform='linux64', ignore_lines=False,
warning_re=WARNING_RE, warning_limit=1000,
required_test=None):
TestRunner.__init__(self)
self.warning = warning
self.warning_re = warning_re
self.platform = platform
self.ignore_lines = ignore_lines
self.warning_limit = warning_limit
self.required_test = required_test or ""
def check_for_move(self, repo, changeset):
"""
Checks if the warning has moved lines but still exists.
"""
if self.ignore_lines:
return False
files = retrieve_test_logs(
repo, changeset[:12],
self.platform, warning_re=self.warning_re)
combined_warnings = Counter()
for log in files:
if log:
combined_warnings.update(log.warnings)
possible_move_found = False
normalized = re.match(r'^(.*), line [0-9]+$', self.warning).group(1)
for (k, v) in combined_warnings.items():
if k.startswith(normalized) and v > self.warning_limit:
print("Possible line move:\n %d - %s" % (v, k))
possible_move_found = True
if possible_move_found:
jp = JsonPushes(repo)
push = jp.push(changeset)
print("Try this date: %s" % push.utc_date)
return possible_move_found
def evaluate(self, build_info, allow_back=False):
files = retrieve_test_logs(
build_info.repo_name, build_info.changeset[:12],
self.platform, warning_re=self.warning_re)
# Somewhat arbitrary, but we need to make sure there are enough tests
# run in order to make a reasonable evaluation of the amount of
# warnings present.
if not files or len(files) < 20:
# Tell the bisector to skip this build.
print("Skipping build %s, not enough tests run" % build_info.changeset[:12])
return 's'
combined_warnings = Counter()
found_test = False
for log in files:
if log:
combined_warnings.update(log.warnings)
if not found_test:
found_test = self.required_test in log.job_name
if self.ignore_lines:
normalized = re.match(r'^(.*), line [0-9]+$', self.warning).group(1)
total = 0
for (k, v) in combined_warnings.items():
if k.startswith(normalized):
total += v
print("%d - %s" % (total, normalized))
else:
total = combined_warnings[self.warning]
print("%d - %s" % (total, self.warning))
if not found_test:
print("Skipping build %s, required test %s was not run" % (
build_info.changeset[:12], self.required_test))
return 's'
if total > self.warning_limit:
print("%d > %d" % (total, self.warning_limit))
return 'b'
else:
print("%d <= %d" % (total, self.warning_limit))
return 'g'
def run_once(self, build_info):
return 0 if self.evaluate(build_info) == 'g' else 1
| mpl-2.0 |
sudosurootdev/external_chromium_org | third_party/markdown/extensions/wikilinks.py | 109 | 7050 | # markdown is released under the BSD license
# Copyright 2007, 2008 The Python Markdown Project (v. 1.7 and later)
# Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
# Copyright 2004 Manfred Stienstra (the original version)
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE PYTHON MARKDOWN PROJECT ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL ANY CONTRIBUTORS TO THE PYTHON MARKDOWN PROJECT
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
'''
WikiLinks Extension for Python-Markdown
======================================
Converts [[WikiLinks]] to relative links. Requires Python-Markdown 2.0+
Basic usage:
>>> import markdown
>>> text = "Some text with a [[WikiLink]]."
>>> html = markdown.markdown(text, ['wikilinks'])
>>> print html
<p>Some text with a <a class="wikilink" href="/WikiLink/">WikiLink</a>.</p>
Whitespace behavior:
>>> print markdown.markdown('[[ foo bar_baz ]]', ['wikilinks'])
<p><a class="wikilink" href="/foo_bar_baz/">foo bar_baz</a></p>
>>> print markdown.markdown('foo [[ ]] bar', ['wikilinks'])
<p>foo bar</p>
To define custom settings the simple way:
>>> print markdown.markdown(text,
... ['wikilinks(base_url=/wiki/,end_url=.html,html_class=foo)']
... )
<p>Some text with a <a class="foo" href="/wiki/WikiLink.html">WikiLink</a>.</p>
Custom settings the complex way:
>>> md = markdown.Markdown(
... extensions = ['wikilinks'],
... extension_configs = {'wikilinks': [
... ('base_url', 'http://example.com/'),
... ('end_url', '.html'),
... ('html_class', '') ]},
... safe_mode = True)
>>> print md.convert(text)
<p>Some text with a <a href="http://example.com/WikiLink.html">WikiLink</a>.</p>
Use MetaData with mdx_meta.py (Note the blank html_class in MetaData):
>>> text = """wiki_base_url: http://example.com/
... wiki_end_url: .html
... wiki_html_class:
...
... Some text with a [[WikiLink]]."""
>>> md = markdown.Markdown(extensions=['meta', 'wikilinks'])
>>> print md.convert(text)
<p>Some text with a <a href="http://example.com/WikiLink.html">WikiLink</a>.</p>
MetaData should not carry over to next document:
>>> print md.convert("No [[MetaData]] here.")
<p>No <a class="wikilink" href="/MetaData/">MetaData</a> here.</p>
Define a custom URL builder:
>>> def my_url_builder(label, base, end):
... return '/bar/'
>>> md = markdown.Markdown(extensions=['wikilinks'],
... extension_configs={'wikilinks' : [('build_url', my_url_builder)]})
>>> print md.convert('[[foo]]')
<p><a class="wikilink" href="/bar/">foo</a></p>
From the command line:
python markdown.py -x wikilinks(base_url=http://example.com/,end_url=.html,html_class=foo) src.txt
By [Waylan Limberg](http://achinghead.com/).
License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
Dependencies:
* [Python 2.3+](http://python.org)
* [Markdown 2.0+](http://packages.python.org/Markdown/)
'''
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..inlinepatterns import Pattern
from ..util import etree
import re
def build_url(label, base, end):
""" Build a url from the label, a base, and an end. """
clean_label = re.sub(r'([ ]+_)|(_[ ]+)|([ ]+)', '_', label)
return '%s%s%s'% (base, clean_label, end)
class WikiLinkExtension(Extension):
def __init__(self, configs):
# set extension defaults
self.config = {
'base_url' : ['/', 'String to append to beginning or URL.'],
'end_url' : ['/', 'String to append to end of URL.'],
'html_class' : ['wikilink', 'CSS hook. Leave blank for none.'],
'build_url' : [build_url, 'Callable formats URL from label.'],
}
# Override defaults with user settings
for key, value in configs :
self.setConfig(key, value)
def extendMarkdown(self, md, md_globals):
self.md = md
# append to end of inline patterns
WIKILINK_RE = r'\[\[([\w0-9_ -]+)\]\]'
wikilinkPattern = WikiLinks(WIKILINK_RE, self.getConfigs())
wikilinkPattern.md = md
md.inlinePatterns.add('wikilink', wikilinkPattern, "<not_strong")
class WikiLinks(Pattern):
def __init__(self, pattern, config):
super(WikiLinks, self).__init__(pattern)
self.config = config
def handleMatch(self, m):
if m.group(2).strip():
base_url, end_url, html_class = self._getMeta()
label = m.group(2).strip()
url = self.config['build_url'](label, base_url, end_url)
a = etree.Element('a')
a.text = label
a.set('href', url)
if html_class:
a.set('class', html_class)
else:
a = ''
return a
def _getMeta(self):
""" Return meta data or config data. """
base_url = self.config['base_url']
end_url = self.config['end_url']
html_class = self.config['html_class']
if hasattr(self.md, 'Meta'):
if 'wiki_base_url' in self.md.Meta:
base_url = self.md.Meta['wiki_base_url'][0]
if 'wiki_end_url' in self.md.Meta:
end_url = self.md.Meta['wiki_end_url'][0]
if 'wiki_html_class' in self.md.Meta:
html_class = self.md.Meta['wiki_html_class'][0]
return base_url, end_url, html_class
def makeExtension(configs=None) :
return WikiLinkExtension(configs=configs)
| bsd-3-clause |
petteyg/intellij-community | python/lib/Lib/compileall.py | 251 | 5283 | """Module/script to "compile" all .py files to .pyc (or .pyo) file.
When called as a script with arguments, this compiles the directories
given as arguments recursively; the -l option prevents it from
recursing into directories.
Without arguments, if compiles all modules on sys.path, without
recursing into subdirectories. (Even though it should do so for
packages -- for now, you'll have to deal with packages separately.)
See module py_compile for details of the actual byte-compilation.
"""
import os
import sys
import py_compile
__all__ = ["compile_dir","compile_path"]
def compile_dir(dir, maxlevels=10, ddir=None,
force=0, rx=None, quiet=0):
"""Byte-compile all modules in the given directory tree.
Arguments (only dir is required):
dir: the directory to byte-compile
maxlevels: maximum recursion level (default 10)
ddir: if given, purported directory name (this is the
directory name that will show up in error messages)
force: if 1, force compilation, even if timestamps are up-to-date
quiet: if 1, be quiet during compilation
"""
if not quiet:
print 'Listing', dir, '...'
try:
names = os.listdir(dir)
except os.error:
print "Can't list", dir
names = []
names.sort()
success = 1
for name in names:
fullname = os.path.join(dir, name)
if ddir is not None:
dfile = os.path.join(ddir, name)
else:
dfile = None
if rx is not None:
mo = rx.search(fullname)
if mo:
continue
if os.path.isfile(fullname):
head, tail = name[:-3], name[-3:]
if tail == '.py':
cfile = fullname + (__debug__ and 'c' or 'o')
ftime = os.stat(fullname).st_mtime
try: ctime = os.stat(cfile).st_mtime
except os.error: ctime = 0
if (ctime > ftime) and not force: continue
if not quiet:
print 'Compiling', fullname, '...'
try:
ok = py_compile.compile(fullname, None, dfile, True)
except KeyboardInterrupt:
raise KeyboardInterrupt
except py_compile.PyCompileError,err:
if quiet:
print 'Compiling', fullname, '...'
print err.msg
success = 0
except IOError, e:
print "Sorry", e
success = 0
else:
if ok == 0:
success = 0
elif maxlevels > 0 and \
name != os.curdir and name != os.pardir and \
os.path.isdir(fullname) and \
not os.path.islink(fullname):
if not compile_dir(fullname, maxlevels - 1, dfile, force, rx, quiet):
success = 0
return success
def compile_path(skip_curdir=1, maxlevels=0, force=0, quiet=0):
"""Byte-compile all module on sys.path.
Arguments (all optional):
skip_curdir: if true, skip current directory (default true)
maxlevels: max recursion level (default 0)
force: as for compile_dir() (default 0)
quiet: as for compile_dir() (default 0)
"""
success = 1
for dir in sys.path:
if (not dir or dir == os.curdir) and skip_curdir:
print 'Skipping current directory'
else:
success = success and compile_dir(dir, maxlevels, None,
force, quiet=quiet)
return success
def main():
"""Script main program."""
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], 'lfqd:x:')
except getopt.error, msg:
print msg
print "usage: python compileall.py [-l] [-f] [-q] [-d destdir] " \
"[-x regexp] [directory ...]"
print "-l: don't recurse down"
print "-f: force rebuild even if timestamps are up-to-date"
print "-q: quiet operation"
print "-d destdir: purported directory name for error messages"
print " if no directory arguments, -l sys.path is assumed"
print "-x regexp: skip files matching the regular expression regexp"
print " the regexp is search for in the full path of the file"
sys.exit(2)
maxlevels = 10
ddir = None
force = 0
quiet = 0
rx = None
for o, a in opts:
if o == '-l': maxlevels = 0
if o == '-d': ddir = a
if o == '-f': force = 1
if o == '-q': quiet = 1
if o == '-x':
import re
rx = re.compile(a)
if ddir:
if len(args) != 1:
print "-d destdir require exactly one directory argument"
sys.exit(2)
success = 1
try:
if args:
for dir in args:
if not compile_dir(dir, maxlevels, ddir,
force, rx, quiet):
success = 0
else:
success = compile_path()
except KeyboardInterrupt:
print "\n[interrupt]"
success = 0
return success
if __name__ == '__main__':
exit_status = int(not main())
sys.exit(exit_status)
| apache-2.0 |
arjoly/scikit-learn | examples/gaussian_process/plot_gpc_iris.py | 81 | 2231 | """
=====================================================
Gaussian process classification (GPC) on iris dataset
=====================================================
This example illustrates the predicted probability of GPC for an isotropic
and anisotropic RBF kernel on a two-dimensional version for the iris-dataset.
The anisotropic RBF kernel obtains slightly higher log-marginal-likelihood by
assigning different length-scales to the two feature dimensions.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
y = np.array(iris.target, dtype=int)
h = .02 # step size in the mesh
kernel = 1.0 * RBF([1.0])
gpc_rbf_isotropic = GaussianProcessClassifier(kernel=kernel).fit(X, y)
kernel = 1.0 * RBF([1.0, 1.0])
gpc_rbf_anisotropic = GaussianProcessClassifier(kernel=kernel).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
titles = ["Isotropic RBF", "Anisotropic RBF"]
plt.figure(figsize=(10, 5))
for i, clf in enumerate((gpc_rbf_isotropic, gpc_rbf_anisotropic)):
# Plot the predicted probabilities. For that, we will assign a color to
# each point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(1, 2, i + 1)
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape((xx.shape[0], xx.shape[1], 3))
plt.imshow(Z, extent=(x_min, x_max, y_min, y_max), origin="lower")
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=np.array(["r", "g", "b"])[y])
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title("%s, LML: %.3f" %
(titles[i], clf.log_marginal_likelihood(clf.kernel_.theta)))
plt.tight_layout()
plt.show()
| bsd-3-clause |
Kefkius/pycoin | pycoin/block.py | 13 | 6490 | # -*- coding: utf-8 -*-
"""
Parse and stream Bitcoin blocks as either Block or BlockHeader structures.
The MIT License (MIT)
Copyright (c) 2013 by Richard Kiss
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import struct
import io
from .encoding import double_sha256
from .merkle import merkle
from .serialize.bitcoin_streamer import parse_struct, stream_struct
from .serialize import b2h, b2h_rev
from .tx import Tx
class BadMerkleRootError(Exception):
pass
def difficulty_max_mask_for_bits(bits):
prefix = bits >> 24
mask = (bits & 0x7ffff) << (8 * (prefix - 3))
return mask
class BlockHeader(object):
"""A BlockHeader is a block with the transaction data removed. With a
complete Merkle tree database, it can be reconstructed from the
merkle_root."""
@classmethod
def parse(self, f):
"""Parse the BlockHeader from the file-like object in the standard way
that blocks are sent in the network (well, except we ignore the
transaction information)."""
(version, previous_block_hash, merkle_root,
timestamp, difficulty, nonce) = struct.unpack("<L32s32sLLL", f.read(4+32+32+4*3))
return self(version, previous_block_hash, merkle_root, timestamp, difficulty, nonce)
def __init__(self, version, previous_block_hash, merkle_root, timestamp, difficulty, nonce):
self.version = version
self.previous_block_hash = previous_block_hash
self.merkle_root = merkle_root
self.timestamp = timestamp
self.difficulty = difficulty
self.nonce = nonce
def hash(self):
"""Calculate the hash for the block header. Note that this has the bytes
in the opposite order from how the header is usually displayed (so the
long string of 00 bytes is at the end, not the beginning)."""
if not hasattr(self, "__hash"):
s = io.BytesIO()
self.stream_header(s)
self.__hash = double_sha256(s.getvalue())
return self.__hash
def stream_header(self, f):
"""Stream the block header in the standard way to the file-like object f."""
stream_struct("L##LLL", f, self.version, self.previous_block_hash,
self.merkle_root, self.timestamp, self.difficulty, self.nonce)
def stream(self, f):
"""Stream the block header in the standard way to the file-like object f.
The Block subclass also includes the transactions."""
self.stream_header(f)
def id(self):
"""Returns the hash of the block displayed with the bytes in the order
they are usually displayed in."""
return b2h_rev(self.hash())
def previous_block_id(self):
"""Returns the hash of the previous block, with the bytes in the order
they are usually displayed in."""
return b2h_rev(self.previous_block_hash)
def __str__(self):
return "BlockHeader [%s] (previous %s)" % (self.id(), self.previous_block_id())
def __repr__(self):
return "BlockHeader [%s] (previous %s)" % (self.id(), self.previous_block_id())
class Block(BlockHeader):
"""A Block is an element of the Bitcoin chain. Generating a block
yields a reward!"""
@classmethod
def parse(self, f):
"""Parse the Block from the file-like object in the standard way
that blocks are sent in the network."""
(version, previous_block_hash, merkle_root, timestamp,
difficulty, nonce, count) = parse_struct("L##LLLI", f)
txs = []
for i in range(count):
offset_in_block = f.tell()
tx = Tx.parse(f)
txs.append(tx)
tx.offset_in_block = offset_in_block
block = self(version, previous_block_hash, merkle_root, timestamp, difficulty, nonce, txs)
for tx in txs:
tx.block = block
return block
def __init__(self, version, previous_block_hash, merkle_root, timestamp, difficulty, nonce, txs):
self.version = version
self.previous_block_hash = previous_block_hash
self.merkle_root = merkle_root
self.timestamp = timestamp
self.difficulty = difficulty
self.nonce = nonce
self.txs = txs
def as_blockheader(self):
return BlockHeader(self.version, self.previous_block_hash, self.merkle_root,
self.timestamp, self.difficulty, self.nonce)
def stream(self, f):
"""Stream the block in the standard way to the file-like object f."""
stream_struct("L##LLLI", f, self.version, self.previous_block_hash,
self.merkle_root, self.timestamp, self.difficulty, self.nonce, len(self.txs))
for t in self.txs:
t.stream(f)
def check_merkle_hash(self):
"""Raise a BadMerkleRootError if the Merkle hash of the
transactions does not match the Merkle hash included in the block."""
calculated_hash = merkle([tx.hash() for tx in self.txs], double_sha256)
if calculated_hash != self.merkle_root:
raise BadMerkleRootError(
"calculated %s but block contains %s" % (b2h(calculated_hash), b2h(self.merkle_root)))
def __str__(self):
return "Block [%s] (previous %s) [tx count: %d]" % (
self.id(), self.previous_block_id(), len(self.txs))
def __repr__(self):
return "Block [%s] (previous %s) [tx count: %d] %s" % (
self.id(), self.previous_block_id(), len(self.txs), self.txs)
| mit |
damycra/django-rest-framework | rest_framework/utils/breadcrumbs.py | 15 | 2005 | from __future__ import unicode_literals
from django.core.urlresolvers import get_script_prefix, resolve
def get_breadcrumbs(url):
"""
Given a url returns a list of breadcrumbs, which are each a
tuple of (name, url).
"""
from rest_framework.settings import api_settings
from rest_framework.views import APIView
view_name_func = api_settings.VIEW_NAME_FUNCTION
def breadcrumbs_recursive(url, breadcrumbs_list, prefix, seen):
"""
Add tuples of (name, url) to the breadcrumbs list,
progressively chomping off parts of the url.
"""
try:
(view, unused_args, unused_kwargs) = resolve(url)
except Exception:
pass
else:
# Check if this is a REST framework view,
# and if so add it to the breadcrumbs
cls = getattr(view, 'cls', None)
if cls is not None and issubclass(cls, APIView):
# Don't list the same view twice in a row.
# Probably an optional trailing slash.
if not seen or seen[-1] != view:
suffix = getattr(view, 'suffix', None)
name = view_name_func(cls, suffix)
breadcrumbs_list.insert(0, (name, prefix + url))
seen.append(view)
if url == '':
# All done
return breadcrumbs_list
elif url.endswith('/'):
# Drop trailing slash off the end and continue to try to
# resolve more breadcrumbs
url = url.rstrip('/')
return breadcrumbs_recursive(url, breadcrumbs_list, prefix, seen)
# Drop trailing non-slash off the end and continue to try to
# resolve more breadcrumbs
url = url[:url.rfind('/') + 1]
return breadcrumbs_recursive(url, breadcrumbs_list, prefix, seen)
prefix = get_script_prefix().rstrip('/')
url = url[len(prefix):]
return breadcrumbs_recursive(url, [], prefix, [])
| bsd-2-clause |
onestarshang/flask_super_config | venv/lib/python2.7/site-packages/pip/utils/__init__.py | 82 | 26144 | from __future__ import absolute_import
import contextlib
import errno
import locale
import logging
import re
import os
import posixpath
import shutil
import stat
import subprocess
import sys
import tarfile
import zipfile
from pip.exceptions import InstallationError
from pip.compat import console_to_str, stdlib_pkgs
from pip.locations import (
site_packages, user_site, running_under_virtualenv, virtualenv_no_global,
write_delete_marker_file,
)
from pip._vendor import pkg_resources
from pip._vendor.six.moves import input
from pip._vendor.six import PY2
from pip._vendor.retrying import retry
if PY2:
from io import BytesIO as StringIO
else:
from io import StringIO
__all__ = ['rmtree', 'display_path', 'backup_dir',
'ask', 'Inf', 'normalize_name', 'splitext',
'format_size', 'is_installable_dir',
'is_svn_page', 'file_contents',
'split_leading_dir', 'has_leading_dir',
'make_path_relative', 'normalize_path',
'renames', 'get_terminal_size', 'get_prog',
'unzip_file', 'untar_file', 'unpack_file', 'call_subprocess',
'captured_stdout', 'remove_tracebacks', 'ensure_dir',
'ARCHIVE_EXTENSIONS', 'SUPPORTED_EXTENSIONS']
logger = logging.getLogger(__name__)
BZ2_EXTENSIONS = ('.tar.bz2', '.tbz')
ZIP_EXTENSIONS = ('.zip', '.whl')
TAR_EXTENSIONS = ('.tar.gz', '.tgz', '.tar')
ARCHIVE_EXTENSIONS = ZIP_EXTENSIONS + BZ2_EXTENSIONS + TAR_EXTENSIONS
try:
import bz2 # noqa
SUPPORTED_EXTENSIONS = ZIP_EXTENSIONS + BZ2_EXTENSIONS + TAR_EXTENSIONS
except ImportError:
logger.debug('bz2 module is not available')
SUPPORTED_EXTENSIONS = ZIP_EXTENSIONS + TAR_EXTENSIONS
def import_or_raise(pkg_or_module_string, ExceptionType, *args, **kwargs):
try:
return __import__(pkg_or_module_string)
except ImportError:
raise ExceptionType(*args, **kwargs)
def ensure_dir(path):
"""os.path.makedirs without EEXIST."""
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def get_prog():
try:
if os.path.basename(sys.argv[0]) in ('__main__.py', '-c'):
return "%s -m pip" % sys.executable
except (AttributeError, TypeError, IndexError):
pass
return 'pip'
# Retry every half second for up to 3 seconds
@retry(stop_max_delay=3000, wait_fixed=500)
def rmtree(dir, ignore_errors=False):
shutil.rmtree(dir, ignore_errors=ignore_errors,
onerror=rmtree_errorhandler)
def rmtree_errorhandler(func, path, exc_info):
"""On Windows, the files in .svn are read-only, so when rmtree() tries to
remove them, an exception is thrown. We catch that here, remove the
read-only attribute, and hopefully continue without problems."""
# if file type currently read only
if os.stat(path).st_mode & stat.S_IREAD:
# convert to read/write
os.chmod(path, stat.S_IWRITE)
# use the original function to repeat the operation
func(path)
return
else:
raise
def display_path(path):
"""Gives the display value for a given path, making it relative to cwd
if possible."""
path = os.path.normcase(os.path.abspath(path))
if sys.version_info[0] == 2:
path = path.decode(sys.getfilesystemencoding(), 'replace')
path = path.encode(sys.getdefaultencoding(), 'replace')
if path.startswith(os.getcwd() + os.path.sep):
path = '.' + path[len(os.getcwd()):]
return path
def backup_dir(dir, ext='.bak'):
"""Figure out the name of a directory to back up the given dir to
(adding .bak, .bak2, etc)"""
n = 1
extension = ext
while os.path.exists(dir + extension):
n += 1
extension = ext + str(n)
return dir + extension
def ask_path_exists(message, options):
for action in os.environ.get('PIP_EXISTS_ACTION', '').split():
if action in options:
return action
return ask(message, options)
def ask(message, options):
"""Ask the message interactively, with the given possible responses"""
while 1:
if os.environ.get('PIP_NO_INPUT'):
raise Exception(
'No input was expected ($PIP_NO_INPUT set); question: %s' %
message
)
response = input(message)
response = response.strip().lower()
if response not in options:
print(
'Your response (%r) was not one of the expected responses: '
'%s' % (response, ', '.join(options))
)
else:
return response
class _Inf(object):
"""I am bigger than everything!"""
def __eq__(self, other):
if self is other:
return True
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
return False
def __le__(self, other):
return False
def __gt__(self, other):
return True
def __ge__(self, other):
return True
def __repr__(self):
return 'Inf'
Inf = _Inf() # this object is not currently used as a sortable in our code
del _Inf
_normalize_re = re.compile(r'[^a-z]', re.I)
def normalize_name(name):
return _normalize_re.sub('-', name.lower())
def format_size(bytes):
if bytes > 1000 * 1000:
return '%.1fMB' % (bytes / 1000.0 / 1000)
elif bytes > 10 * 1000:
return '%ikB' % (bytes / 1000)
elif bytes > 1000:
return '%.1fkB' % (bytes / 1000.0)
else:
return '%ibytes' % bytes
def is_installable_dir(path):
"""Return True if `path` is a directory containing a setup.py file."""
if not os.path.isdir(path):
return False
setup_py = os.path.join(path, 'setup.py')
if os.path.isfile(setup_py):
return True
return False
def is_svn_page(html):
"""
Returns true if the page appears to be the index page of an svn repository
"""
return (re.search(r'<title>[^<]*Revision \d+:', html) and
re.search(r'Powered by (?:<a[^>]*?>)?Subversion', html, re.I))
def file_contents(filename):
with open(filename, 'rb') as fp:
return fp.read().decode('utf-8')
def split_leading_dir(path):
path = str(path)
path = path.lstrip('/').lstrip('\\')
if '/' in path and (('\\' in path and path.find('/') < path.find('\\')) or
'\\' not in path):
return path.split('/', 1)
elif '\\' in path:
return path.split('\\', 1)
else:
return path, ''
def has_leading_dir(paths):
"""Returns true if all the paths have the same leading path name
(i.e., everything is in one subdirectory in an archive)"""
common_prefix = None
for path in paths:
prefix, rest = split_leading_dir(path)
if not prefix:
return False
elif common_prefix is None:
common_prefix = prefix
elif prefix != common_prefix:
return False
return True
def make_path_relative(path, rel_to):
"""
Make a filename relative, where the filename path, and it is
relative to rel_to
>>> make_path_relative('/usr/share/something/a-file.pth',
... '/usr/share/another-place/src/Directory')
'../../../something/a-file.pth'
>>> make_path_relative('/usr/share/something/a-file.pth',
... '/home/user/src/Directory')
'../../../usr/share/something/a-file.pth'
>>> make_path_relative('/usr/share/a-file.pth', '/usr/share/')
'a-file.pth'
"""
path_filename = os.path.basename(path)
path = os.path.dirname(path)
path = os.path.normpath(os.path.abspath(path))
rel_to = os.path.normpath(os.path.abspath(rel_to))
path_parts = path.strip(os.path.sep).split(os.path.sep)
rel_to_parts = rel_to.strip(os.path.sep).split(os.path.sep)
while path_parts and rel_to_parts and path_parts[0] == rel_to_parts[0]:
path_parts.pop(0)
rel_to_parts.pop(0)
full_parts = ['..'] * len(rel_to_parts) + path_parts + [path_filename]
if full_parts == ['']:
return '.' + os.path.sep
return os.path.sep.join(full_parts)
def normalize_path(path, resolve_symlinks=True):
"""
Convert a path to its canonical, case-normalized, absolute version.
"""
path = os.path.expanduser(path)
if resolve_symlinks:
path = os.path.realpath(path)
else:
path = os.path.abspath(path)
return os.path.normcase(path)
def splitext(path):
"""Like os.path.splitext, but take off .tar too"""
base, ext = posixpath.splitext(path)
if base.lower().endswith('.tar'):
ext = base[-4:] + ext
base = base[:-4]
return base, ext
def renames(old, new):
"""Like os.renames(), but handles renaming across devices."""
# Implementation borrowed from os.renames().
head, tail = os.path.split(new)
if head and tail and not os.path.exists(head):
os.makedirs(head)
shutil.move(old, new)
head, tail = os.path.split(old)
if head and tail:
try:
os.removedirs(head)
except OSError:
pass
def is_local(path):
"""
Return True if path is within sys.prefix, if we're running in a virtualenv.
If we're not in a virtualenv, all paths are considered "local."
"""
if not running_under_virtualenv():
return True
return normalize_path(path).startswith(normalize_path(sys.prefix))
def dist_is_local(dist):
"""
Return True if given Distribution object is installed locally
(i.e. within current virtualenv).
Always True if we're not in a virtualenv.
"""
return is_local(dist_location(dist))
def dist_in_usersite(dist):
"""
Return True if given Distribution is installed in user site.
"""
norm_path = normalize_path(dist_location(dist))
return norm_path.startswith(normalize_path(user_site))
def dist_in_site_packages(dist):
"""
Return True if given Distribution is installed in
distutils.sysconfig.get_python_lib().
"""
return normalize_path(
dist_location(dist)
).startswith(normalize_path(site_packages))
def dist_is_editable(dist):
"""Is distribution an editable install?"""
# TODO: factor out determining editableness out of FrozenRequirement
from pip import FrozenRequirement
req = FrozenRequirement.from_dist(dist, [])
return req.editable
def get_installed_distributions(local_only=True,
skip=stdlib_pkgs,
include_editables=True,
editables_only=False,
user_only=False):
"""
Return a list of installed Distribution objects.
If ``local_only`` is True (default), only return installations
local to the current virtualenv, if in a virtualenv.
``skip`` argument is an iterable of lower-case project names to
ignore; defaults to stdlib_pkgs
If ``editables`` is False, don't report editables.
If ``editables_only`` is True , only report editables.
If ``user_only`` is True , only report installations in the user
site directory.
"""
if local_only:
local_test = dist_is_local
else:
def local_test(d):
return True
if include_editables:
def editable_test(d):
return True
else:
def editable_test(d):
return not dist_is_editable(d)
if editables_only:
def editables_only_test(d):
return dist_is_editable(d)
else:
def editables_only_test(d):
return True
if user_only:
user_test = dist_in_usersite
else:
def user_test(d):
return True
return [d for d in pkg_resources.working_set
if local_test(d) and
d.key not in skip and
editable_test(d) and
editables_only_test(d) and
user_test(d)
]
def egg_link_path(dist):
"""
Return the path for the .egg-link file if it exists, otherwise, None.
There's 3 scenarios:
1) not in a virtualenv
try to find in site.USER_SITE, then site_packages
2) in a no-global virtualenv
try to find in site_packages
3) in a yes-global virtualenv
try to find in site_packages, then site.USER_SITE
(don't look in global location)
For #1 and #3, there could be odd cases, where there's an egg-link in 2
locations.
This method will just return the first one found.
"""
sites = []
if running_under_virtualenv():
if virtualenv_no_global():
sites.append(site_packages)
else:
sites.append(site_packages)
if user_site:
sites.append(user_site)
else:
if user_site:
sites.append(user_site)
sites.append(site_packages)
for site in sites:
egglink = os.path.join(site, dist.project_name) + '.egg-link'
if os.path.isfile(egglink):
return egglink
def dist_location(dist):
"""
Get the site-packages location of this distribution. Generally
this is dist.location, except in the case of develop-installed
packages, where dist.location is the source code location, and we
want to know where the egg-link file is.
"""
egg_link = egg_link_path(dist)
if egg_link:
return egg_link
return dist.location
def get_terminal_size():
"""Returns a tuple (x, y) representing the width(x) and the height(x)
in characters of the terminal window."""
def ioctl_GWINSZ(fd):
try:
import fcntl
import termios
import struct
cr = struct.unpack(
'hh',
fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234')
)
except:
return None
if cr == (0, 0):
return None
return cr
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
cr = (os.environ.get('LINES', 25), os.environ.get('COLUMNS', 80))
return int(cr[1]), int(cr[0])
def current_umask():
"""Get the current umask which involves having to set it temporarily."""
mask = os.umask(0)
os.umask(mask)
return mask
def unzip_file(filename, location, flatten=True):
"""
Unzip the file (with path `filename`) to the destination `location`. All
files are written based on system defaults and umask (i.e. permissions are
not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs.
"""
ensure_dir(location)
zipfp = open(filename, 'rb')
try:
zip = zipfile.ZipFile(zipfp, allowZip64=True)
leading = has_leading_dir(zip.namelist()) and flatten
for info in zip.infolist():
name = info.filename
data = zip.read(name)
fn = name
if leading:
fn = split_leading_dir(name)[1]
fn = os.path.join(location, fn)
dir = os.path.dirname(fn)
if fn.endswith('/') or fn.endswith('\\'):
# A directory
ensure_dir(fn)
else:
ensure_dir(dir)
fp = open(fn, 'wb')
try:
fp.write(data)
finally:
fp.close()
mode = info.external_attr >> 16
# if mode and regular file and any execute permissions for
# user/group/world?
if mode and stat.S_ISREG(mode) and mode & 0o111:
# make dest file have execute for user/group/world
# (chmod +x) no-op on windows per python docs
os.chmod(fn, (0o777 - current_umask() | 0o111))
finally:
zipfp.close()
def untar_file(filename, location):
"""
Untar the file (with path `filename`) to the destination `location`.
All files are written based on system defaults and umask (i.e. permissions
are not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs.
"""
ensure_dir(location)
if filename.lower().endswith('.gz') or filename.lower().endswith('.tgz'):
mode = 'r:gz'
elif filename.lower().endswith(BZ2_EXTENSIONS):
mode = 'r:bz2'
elif filename.lower().endswith('.tar'):
mode = 'r'
else:
logger.warning(
'Cannot determine compression type for file %s', filename,
)
mode = 'r:*'
tar = tarfile.open(filename, mode)
try:
# note: python<=2.5 doesn't seem to know about pax headers, filter them
leading = has_leading_dir([
member.name for member in tar.getmembers()
if member.name != 'pax_global_header'
])
for member in tar.getmembers():
fn = member.name
if fn == 'pax_global_header':
continue
if leading:
fn = split_leading_dir(fn)[1]
path = os.path.join(location, fn)
if member.isdir():
ensure_dir(path)
elif member.issym():
try:
tar._extract_member(member, path)
except Exception as exc:
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warning(
'In the tar file %s the member %s is invalid: %s',
filename, member.name, exc,
)
continue
else:
try:
fp = tar.extractfile(member)
except (KeyError, AttributeError) as exc:
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warning(
'In the tar file %s the member %s is invalid: %s',
filename, member.name, exc,
)
continue
ensure_dir(os.path.dirname(path))
destfp = open(path, 'wb')
try:
shutil.copyfileobj(fp, destfp)
finally:
destfp.close()
fp.close()
# member have any execute permissions for user/group/world?
if member.mode & 0o111:
# make dest file have execute for user/group/world
# no-op on windows per python docs
os.chmod(path, (0o777 - current_umask() | 0o111))
finally:
tar.close()
def unpack_file(filename, location, content_type, link):
filename = os.path.realpath(filename)
if (content_type == 'application/zip' or
filename.lower().endswith(ZIP_EXTENSIONS) or
zipfile.is_zipfile(filename)):
unzip_file(
filename,
location,
flatten=not filename.endswith('.whl')
)
elif (content_type == 'application/x-gzip' or
tarfile.is_tarfile(filename) or
filename.lower().endswith(TAR_EXTENSIONS + BZ2_EXTENSIONS)):
untar_file(filename, location)
elif (content_type and content_type.startswith('text/html') and
is_svn_page(file_contents(filename))):
# We don't really care about this
from pip.vcs.subversion import Subversion
Subversion('svn+' + link.url).unpack(location)
else:
# FIXME: handle?
# FIXME: magic signatures?
logger.critical(
'Cannot unpack file %s (downloaded from %s, content-type: %s); '
'cannot detect archive format',
filename, location, content_type,
)
raise InstallationError(
'Cannot determine archive format of %s' % location
)
def remove_tracebacks(output):
pattern = (r'(?:\W+File "(?:.*)", line (?:.*)\W+(?:.*)\W+\^\W+)?'
r'Syntax(?:Error|Warning): (?:.*)')
output = re.sub(pattern, '', output)
if PY2:
return output
# compileall.compile_dir() prints different messages to stdout
# in Python 3
return re.sub(r"\*\*\* Error compiling (?:.*)", '', output)
def call_subprocess(cmd, show_stdout=True, cwd=None,
raise_on_returncode=True,
command_level=logging.DEBUG, command_desc=None,
extra_environ=None):
if command_desc is None:
cmd_parts = []
for part in cmd:
if ' ' in part or '\n' in part or '"' in part or "'" in part:
part = '"%s"' % part.replace('"', '\\"')
cmd_parts.append(part)
command_desc = ' '.join(cmd_parts)
if show_stdout:
stdout = None
else:
stdout = subprocess.PIPE
logger.log(command_level, "Running command %s", command_desc)
env = os.environ.copy()
if extra_environ:
env.update(extra_environ)
try:
proc = subprocess.Popen(
cmd, stderr=subprocess.STDOUT, stdin=None, stdout=stdout,
cwd=cwd, env=env)
except Exception as exc:
logger.critical(
"Error %s while executing command %s", exc, command_desc,
)
raise
all_output = []
if stdout is not None:
while True:
line = console_to_str(proc.stdout.readline())
if not line:
break
line = line.rstrip()
all_output.append(line + '\n')
logger.debug(line)
if not all_output:
returned_stdout, returned_stderr = proc.communicate()
all_output = [returned_stdout or '']
proc.wait()
if proc.returncode:
if raise_on_returncode:
if all_output:
logger.info(
'Complete output from command %s:', command_desc,
)
logger.info(
''.join(all_output) +
'\n----------------------------------------'
)
raise InstallationError(
'Command "%s" failed with error code %s in %s'
% (command_desc, proc.returncode, cwd))
else:
logger.warning(
'Command "%s" had error code %s in %s',
command_desc, proc.returncode, cwd,
)
if stdout is not None:
return remove_tracebacks(''.join(all_output))
def read_text_file(filename):
"""Return the contents of *filename*.
Try to decode the file contents with utf-8, the preferred system encoding
(e.g., cp1252 on some Windows machines), and latin1, in that order.
Decoding a byte string with latin1 will never raise an error. In the worst
case, the returned string will contain some garbage characters.
"""
with open(filename, 'rb') as fp:
data = fp.read()
encodings = ['utf-8', locale.getpreferredencoding(False), 'latin1']
for enc in encodings:
try:
data = data.decode(enc)
except UnicodeDecodeError:
continue
break
assert type(data) != bytes # Latin1 should have worked.
return data
def _make_build_dir(build_dir):
os.makedirs(build_dir)
write_delete_marker_file(build_dir)
class FakeFile(object):
"""Wrap a list of lines in an object with readline() to make
ConfigParser happy."""
def __init__(self, lines):
self._gen = (l for l in lines)
def readline(self):
try:
try:
return next(self._gen)
except NameError:
return self._gen.next()
except StopIteration:
return ''
def __iter__(self):
return self._gen
class StreamWrapper(StringIO):
@classmethod
def from_stream(cls, orig_stream):
cls.orig_stream = orig_stream
return cls()
# compileall.compile_dir() needs stdout.encoding to print to stdout
@property
def encoding(self):
return self.orig_stream.encoding
@contextlib.contextmanager
def captured_output(stream_name):
"""Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO.
Taken from Lib/support/__init__.py in the CPython repo.
"""
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, StreamWrapper.from_stream(orig_stdout))
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as stdout:
print('hello')
self.assertEqual(stdout.getvalue(), 'hello\n')
Taken from Lib/support/__init__.py in the CPython repo.
"""
return captured_output('stdout')
class cached_property(object):
"""A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property.
Source: https://github.com/bottlepy/bottle/blob/0.11.5/bottle.py#L175
"""
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None:
# We're being accessed from the class itself, not from an object
return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
| gpl-2.0 |
sunu/oppia | extensions/rich_text_components/Image/Image.py | 15 | 2465 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, softwar
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from extensions.rich_text_components import base
class Image(base.BaseRichTextComponent):
"""A rich-text component representing an inline image."""
name = 'Image'
category = 'Basic Input'
description = 'An image.'
frontend_name = 'image'
tooltip = 'Insert image'
_customization_arg_specs = [{
'name': 'filepath',
'description': (
'The name of the image file. (Allowed extensions: gif, jpeg, jpg, '
'png.)'),
'schema': {
'type': 'custom',
'obj_type': 'Filepath',
},
'default_value': '',
}, {
'name': 'alt',
'description': 'Alt text (for screen readers)',
'schema': {
'type': 'unicode',
},
'default_value': '',
}]
icon_data_url = (
'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAA'
'ABGdBTUEAAK/INwWK6QAAABl0RVh0%0AU29mdHdhcmUAQWRvYmUgSW1hZ2VSZWFkeXHJZ'
'TwAAAHwSURBVDjLpZM9a1RBFIafM/fevfcmC7uQ%0AjWEjUZKAYBHEVEb/gIWFjVVSWEj'
'6gI0/wt8gprPQykIsTP5BQLAIhBVBzRf52Gw22bk7c8YiZslu%0AgggZppuZ55z3nfdIC'
'IHrrBhg%2BePaa1WZPyk0s%2B6KWwM1khiyhDcvns4uxQAaZOHJo4nRLMtEJPpn%0AxY6'
'Cd10%2BfNl4DpwBTqymaZrJ8uoBHfZoyTqTYzvkSRMXlP2jnG8bFYbCXWJGePlsEq8iPQ'
'mFA2Mi%0AjEBhtpis7ZCWftC0LZx3xGnK1ESd741hqqUaqgMeAChgjGDDLqXkgMPTJtZ3'
'KJzDhTZpmtK2OSO5%0AIRB6xvQDRAhOsb5Lx1lOu5ZCHV4B6RLUExvh4s%2BZntHhDJAx'
'Sqs9TCDBqsc6j0iJdqtMuTROFBkI%0AcllCCGcSytFNfm1tU8k2GRo2pOI43h9ie6tOvT'
'JFbORyDsJFQHKD8fw%2BP9dWqJZ/I96TdEa5Nb1A%0AOavjVfti0dfB%2Bt4iXhWvyh27'
'y9zEbRRobG7z6fgVeqSoKvB5oIMQEODx7FLvIJo55KS9R7b5ldrD%0AReajpC%2BZ5z7G'
'AHJFXn1exedVbG36ijwOmJgl0kS7lXtjD0DkLyqc70uPnSuIIwk9QCmWd%2B9XGnOF%0A'
'DzP/M5xxBInhLYBcd5z/AAZv2pOvFcS/AAAAAElFTkSuQmCC%0A'
)
| apache-2.0 |
morph027/ansible-modules-extras | monitoring/zabbix_screen.py | 18 | 17179 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013-2014, Epic Games, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: zabbix_screen
short_description: Zabbix screen creates/updates/deletes
description:
- This module allows you to create, modify and delete Zabbix screens and associated graph data.
version_added: "2.0"
author:
- "(@cove)"
- "Tony Minfei Ding"
- "Harrison Gu (@harrisongu)"
requirements:
- "python >= 2.6"
- zabbix-api
options:
server_url:
description:
- Url of Zabbix server, with protocol (http or https).
required: true
aliases: [ "url" ]
login_user:
description:
- Zabbix user name.
required: true
login_password:
description:
- Zabbix user password.
required: true
http_login_user:
description:
- Basic Auth login
required: false
default: None
version_added: "2.1"
http_login_password:
description:
- Basic Auth password
required: false
default: None
version_added: "2.1"
timeout:
description:
- The timeout of API request (seconds).
default: 10
screens:
description:
- List of screens to be created/updated/deleted(see example).
- If the screen(s) already been added, the screen(s) name won't be updated.
- When creating or updating screen(s), C(screen_name), C(host_group) are required.
- When deleting screen(s), the C(screen_name) is required.
- 'The available states are: C(present) (default) and C(absent). If the screen(s) already exists, and the state is not C(absent), the screen(s) will just be updated as needed.'
required: true
notes:
- Too many concurrent updates to the same screen may cause Zabbix to return errors, see examples for a workaround if needed.
'''
EXAMPLES = '''
# Create/update a screen.
- name: Create a new screen or update an existing screen's items
local_action:
module: zabbix_screen
server_url: http://monitor.example.com
login_user: username
login_password: password
screens:
- screen_name: ExampleScreen1
host_group: Example group1
state: present
graph_names:
- Example graph1
- Example graph2
graph_width: 200
graph_height: 100
# Create/update multi-screen
- name: Create two of new screens or update the existing screens' items
local_action:
module: zabbix_screen
server_url: http://monitor.example.com
login_user: username
login_password: password
screens:
- screen_name: ExampleScreen1
host_group: Example group1
state: present
graph_names:
- Example graph1
- Example graph2
graph_width: 200
graph_height: 100
- screen_name: ExampleScreen2
host_group: Example group2
state: present
graph_names:
- Example graph1
- Example graph2
graph_width: 200
graph_height: 100
# Limit the Zabbix screen creations to one host since Zabbix can return an error when doing concurent updates
- name: Create a new screen or update an existing screen's items
local_action:
module: zabbix_screen
server_url: http://monitor.example.com
login_user: username
login_password: password
state: present
screens:
- screen_name: ExampleScreen
host_group: Example group
state: present
graph_names:
- Example graph1
- Example graph2
graph_width: 200
graph_height: 100
when: inventory_hostname==groups['group_name'][0]
'''
try:
from zabbix_api import ZabbixAPI, ZabbixAPISubClass
from zabbix_api import ZabbixAPIException
from zabbix_api import Already_Exists
HAS_ZABBIX_API = True
except ImportError:
HAS_ZABBIX_API = False
# Extend the ZabbixAPI
# Since the zabbix-api python module too old (version 1.0, and there's no higher version so far), it doesn't support the 'screenitem' api call,
# we have to inherit the ZabbixAPI class to add 'screenitem' support.
class ZabbixAPIExtends(ZabbixAPI):
screenitem = None
def __init__(self, server, timeout, user, passwd, **kwargs):
ZabbixAPI.__init__(self, server, timeout=timeout, user=user, passwd=passwd)
self.screenitem = ZabbixAPISubClass(self, dict({"prefix": "screenitem"}, **kwargs))
class Screen(object):
def __init__(self, module, zbx):
self._module = module
self._zapi = zbx
# get group id by group name
def get_host_group_id(self, group_name):
if group_name == "":
self._module.fail_json(msg="group_name is required")
hostGroup_list = self._zapi.hostgroup.get({'output': 'extend', 'filter': {'name': group_name}})
if len(hostGroup_list) < 1:
self._module.fail_json(msg="Host group not found: %s" % group_name)
else:
hostGroup_id = hostGroup_list[0]['groupid']
return hostGroup_id
# get monitored host_id by host_group_id
def get_host_ids_by_group_id(self, group_id):
host_list = self._zapi.host.get({'output': 'extend', 'groupids': group_id, 'monitored_hosts': 1})
if len(host_list) < 1:
self._module.fail_json(msg="No host in the group.")
else:
host_ids = []
for i in host_list:
host_id = i['hostid']
host_ids.append(host_id)
return host_ids
# get screen
def get_screen_id(self, screen_name):
if screen_name == "":
self._module.fail_json(msg="screen_name is required")
try:
screen_id_list = self._zapi.screen.get({'output': 'extend', 'search': {"name": screen_name}})
if len(screen_id_list) >= 1:
screen_id = screen_id_list[0]['screenid']
return screen_id
return None
except Exception as e:
self._module.fail_json(msg="Failed to get screen %s from Zabbix: %s" % (screen_name, e))
# create screen
def create_screen(self, screen_name, h_size, v_size):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
screen = self._zapi.screen.create({'name': screen_name, 'hsize': h_size, 'vsize': v_size})
return screen['screenids'][0]
except Exception as e:
self._module.fail_json(msg="Failed to create screen %s: %s" % (screen_name, e))
# update screen
def update_screen(self, screen_id, screen_name, h_size, v_size):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.screen.update({'screenid': screen_id, 'hsize': h_size, 'vsize': v_size})
except Exception as e:
self._module.fail_json(msg="Failed to update screen %s: %s" % (screen_name, e))
# delete screen
def delete_screen(self, screen_id, screen_name):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.screen.delete([screen_id])
except Exception as e:
self._module.fail_json(msg="Failed to delete screen %s: %s" % (screen_name, e))
# get graph ids
def get_graph_ids(self, hosts, graph_name_list):
graph_id_lists = []
vsize = 1
for host in hosts:
graph_id_list = self.get_graphs_by_host_id(graph_name_list, host)
size = len(graph_id_list)
if size > 0:
graph_id_lists.extend(graph_id_list)
if vsize < size:
vsize = size
return graph_id_lists, vsize
# getGraphs
def get_graphs_by_host_id(self, graph_name_list, host_id):
graph_ids = []
for graph_name in graph_name_list:
graphs_list = self._zapi.graph.get({'output': 'extend', 'search': {'name': graph_name}, 'hostids': host_id})
graph_id_list = []
if len(graphs_list) > 0:
for graph in graphs_list:
graph_id = graph['graphid']
graph_id_list.append(graph_id)
if len(graph_id_list) > 0:
graph_ids.extend(graph_id_list)
return graph_ids
# get screen items
def get_screen_items(self, screen_id):
screen_item_list = self._zapi.screenitem.get({'output': 'extend', 'screenids': screen_id})
return screen_item_list
# delete screen items
def delete_screen_items(self, screen_id, screen_item_id_list):
try:
if len(screen_item_id_list) == 0:
return True
screen_item_list = self.get_screen_items(screen_id)
if len(screen_item_list) > 0:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.screenitem.delete(screen_item_id_list)
return True
return False
except ZabbixAPIException:
pass
# get screen's hsize and vsize
def get_hsize_vsize(self, hosts, v_size):
h_size = len(hosts)
if h_size == 1:
if v_size == 1:
h_size = 1
elif v_size in range(2, 9):
h_size = 2
else:
h_size = 3
v_size = (v_size - 1) / h_size + 1
return h_size, v_size
# create screen_items
def create_screen_items(self, screen_id, hosts, graph_name_list, width, height, h_size):
if len(hosts) < 4:
if width is None or width < 0:
width = 500
else:
if width is None or width < 0:
width = 200
if height is None or height < 0:
height = 100
try:
# when there're only one host, only one row is not good.
if len(hosts) == 1:
graph_id_list = self.get_graphs_by_host_id(graph_name_list, hosts[0])
for i, graph_id in enumerate(graph_id_list):
if graph_id is not None:
self._zapi.screenitem.create({'screenid': screen_id, 'resourcetype': 0, 'resourceid': graph_id,
'width': width, 'height': height,
'x': i % h_size, 'y': i / h_size, 'colspan': 1, 'rowspan': 1,
'elements': 0, 'valign': 0, 'halign': 0,
'style': 0, 'dynamic': 0, 'sort_triggers': 0})
else:
for i, host in enumerate(hosts):
graph_id_list = self.get_graphs_by_host_id(graph_name_list, host)
for j, graph_id in enumerate(graph_id_list):
if graph_id is not None:
self._zapi.screenitem.create({'screenid': screen_id, 'resourcetype': 0, 'resourceid': graph_id,
'width': width, 'height': height,
'x': i, 'y': j, 'colspan': 1, 'rowspan': 1,
'elements': 0, 'valign': 0, 'halign': 0,
'style': 0, 'dynamic': 0, 'sort_triggers': 0})
except Already_Exists:
pass
def main():
module = AnsibleModule(
argument_spec=dict(
server_url=dict(type='str', required=True, aliases=['url']),
login_user=dict(type='str', required=True),
login_password=dict(type='str', required=True, no_log=True),
http_login_user=dict(type='str', required=False, default=None),
http_login_password=dict(type='str', required=False, default=None, no_log=True),
timeout=dict(type='int', default=10),
screens=dict(type='list', required=True)
),
supports_check_mode=True
)
if not HAS_ZABBIX_API:
module.fail_json(msg="Missing requried zabbix-api module (check docs or install with: pip install zabbix-api)")
server_url = module.params['server_url']
login_user = module.params['login_user']
login_password = module.params['login_password']
http_login_user = module.params['http_login_user']
http_login_password = module.params['http_login_password']
timeout = module.params['timeout']
screens = module.params['screens']
zbx = None
# login to zabbix
try:
zbx = ZabbixAPIExtends(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password)
zbx.login(login_user, login_password)
except Exception as e:
module.fail_json(msg="Failed to connect to Zabbix server: %s" % e)
screen = Screen(module, zbx)
created_screens = []
changed_screens = []
deleted_screens = []
for zabbix_screen in screens:
screen_name = zabbix_screen['screen_name']
screen_id = screen.get_screen_id(screen_name)
state = "absent" if "state" in zabbix_screen and zabbix_screen['state'] == "absent" else "present"
if state == "absent":
if screen_id:
screen_item_list = screen.get_screen_items(screen_id)
screen_item_id_list = []
for screen_item in screen_item_list:
screen_item_id = screen_item['screenitemid']
screen_item_id_list.append(screen_item_id)
screen.delete_screen_items(screen_id, screen_item_id_list)
screen.delete_screen(screen_id, screen_name)
deleted_screens.append(screen_name)
else:
host_group = zabbix_screen['host_group']
graph_names = zabbix_screen['graph_names']
graph_width = None
if 'graph_width' in zabbix_screen:
graph_width = zabbix_screen['graph_width']
graph_height = None
if 'graph_height' in zabbix_screen:
graph_height = zabbix_screen['graph_height']
host_group_id = screen.get_host_group_id(host_group)
hosts = screen.get_host_ids_by_group_id(host_group_id)
screen_item_id_list = []
resource_id_list = []
graph_ids, v_size = screen.get_graph_ids(hosts, graph_names)
h_size, v_size = screen.get_hsize_vsize(hosts, v_size)
if not screen_id:
# create screen
screen_id = screen.create_screen(screen_name, h_size, v_size)
screen.create_screen_items(screen_id, hosts, graph_names, graph_width, graph_height, h_size)
created_screens.append(screen_name)
else:
screen_item_list = screen.get_screen_items(screen_id)
for screen_item in screen_item_list:
screen_item_id = screen_item['screenitemid']
resource_id = screen_item['resourceid']
screen_item_id_list.append(screen_item_id)
resource_id_list.append(resource_id)
# when the screen items changed, then update
if graph_ids != resource_id_list:
deleted = screen.delete_screen_items(screen_id, screen_item_id_list)
if deleted:
screen.update_screen(screen_id, screen_name, h_size, v_size)
screen.create_screen_items(screen_id, hosts, graph_names, graph_width, graph_height, h_size)
changed_screens.append(screen_name)
if created_screens and changed_screens:
module.exit_json(changed=True, result="Successfully created screen(s): %s, and updated screen(s): %s" % (",".join(created_screens), ",".join(changed_screens)))
elif created_screens:
module.exit_json(changed=True, result="Successfully created screen(s): %s" % ",".join(created_screens))
elif changed_screens:
module.exit_json(changed=True, result="Successfully updated screen(s): %s" % ",".join(changed_screens))
elif deleted_screens:
module.exit_json(changed=True, result="Successfully deleted screen(s): %s" % ",".join(deleted_screens))
else:
module.exit_json(changed=False)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
ghchinoy/tensorflow | tensorflow/python/data/experimental/kernel_tests/optimization/map_and_filter_fusion_test.py | 4 | 4273 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the `MapAndFilterFusion` optimization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.data.experimental.ops import optimization
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
def _map_and_filter_fusion_test_cases():
"""Generates test cases for the MapAndFilterFusion optimization."""
identity = lambda x: x
increment = lambda x: x + 1
minus_five = lambda x: x - 5
def increment_and_square(x):
y = x + 1
return y * y
take_all = lambda x: constant_op.constant(True)
is_zero = lambda x: math_ops.equal(x, 0)
is_odd = lambda x: math_ops.equal(x % 2, 0)
greater = lambda x: math_ops.greater(x + 5, 0)
functions = [identity, increment, minus_five, increment_and_square]
filters = [take_all, is_zero, is_odd, greater]
tests = []
for x, fun in enumerate(functions):
for y, predicate in enumerate(filters):
tests.append(("Mixed{}{}".format(x, y), fun, predicate))
# Multi output
tests.append(("Multi1", lambda x: (x, x),
lambda x, y: constant_op.constant(True)))
tests.append(
("Multi2", lambda x: (x, 2),
lambda x, y: math_ops.equal(x * math_ops.cast(y, dtypes.int64), 0)))
return tuple(tests)
@test_util.run_all_in_graph_and_eager_modes
class MapAndFilterFusionTest(test_base.DatasetTestBase, parameterized.TestCase):
def _testMapAndFilter(self, dataset, function, predicate):
expected_output = []
for x in range(10):
r = function(x)
if isinstance(r, tuple):
b = predicate(*r) # Pass tuple as multiple arguments.
else:
b = predicate(r)
if self.evaluate(b):
expected_output.append(r)
self.assertDatasetProduces(dataset, expected_output=expected_output)
@parameterized.named_parameters(*_map_and_filter_fusion_test_cases())
def testMapFilterFusion(self, function, predicate):
dataset = dataset_ops.Dataset.range(10).apply(
optimization.assert_next(["Map", "Filter",
"Map"])).map(function).filter(predicate)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.map_and_filter_fusion = True
dataset = dataset.with_options(options)
self._testMapAndFilter(dataset, function, predicate)
def testCapturedInputs(self):
a = constant_op.constant(3, dtype=dtypes.int64)
b = constant_op.constant(4, dtype=dtypes.int64)
some_tensor = math_ops.mul(a, b)
function = lambda x: x * x
def predicate(y):
return math_ops.less(math_ops.cast(y, dtypes.int64), some_tensor)
# We are currently not supporting functions with captured inputs.
dataset = dataset_ops.Dataset.range(10).apply(
optimization.assert_next(["Map",
"Filter"])).map(function).filter(predicate)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.map_and_filter_fusion = True
dataset = dataset.with_options(options)
self._testMapAndFilter(dataset, function, predicate)
if __name__ == "__main__":
test.main()
| apache-2.0 |
allenp/odoo | addons/mail/models/res_partner.py | 6 | 12495 | # -*- coding: utf-8 -*-
import logging
import threading
from openerp import _, api, fields, models, tools
from openerp.osv import expression
_logger = logging.getLogger(__name__)
class Partner(models.Model):
""" Update partner to add a field about notification preferences. Add a generic opt-out field that can be used
to restrict usage of automatic email templates. """
_name = "res.partner"
_inherit = ['res.partner', 'mail.thread']
_mail_flat_thread = False
_mail_mass_mailing = _('Customers')
notify_email = fields.Selection([
('none', 'Never'),
('always', 'All Messages')],
'Email Messages and Notifications', required=True,
oldname='notification_email_send', default='always',
help="Policy to receive emails for new messages pushed to your personal Inbox:\n"
"- Never: no emails are sent\n"
"- All Messages: for every notification you receive in your Inbox")
opt_out = fields.Boolean(
'Opt-Out', help="If opt-out is checked, this contact has refused to receive emails for mass mailing and marketing campaign. "
"Filter 'Available for Mass Mailing' allows users to filter the partners when performing mass mailing.")
channel_ids = fields.Many2many('mail.channel', 'mail_channel_partner', 'partner_id', 'channel_id', string='Channels')
@api.multi
def message_get_suggested_recipients(self):
recipients = super(Partner, self).message_get_suggested_recipients()
for partner in self:
partner._message_add_suggested_recipient(recipients, partner=partner, reason=_('Partner Profile'))
return recipients
@api.multi
def message_get_default_recipients(self):
return dict((res_id, {'partner_ids': [res_id], 'email_to': False, 'email_cc': False}) for res_id in self.ids)
@api.model
def _notify_prepare_template_context(self, message):
# compute signature
signature = ""
if message.author_id and message.author_id.user_ids and message.author_id.user_ids[0].signature:
signature = message.author_id.user_ids[0].signature
elif message.author_id:
signature = "<p>--<br />%s</p>" % message.author_id.name
# compute Sent by
if message.author_id and message.author_id.user_ids:
user = message.author_id.user_ids[0]
else:
user = self.env.user
if user.company_id.website:
website_url = 'http://%s' % user.company_id.website if not user.company_id.website.lower().startswith(('http:', 'https:')) else user.company_id.website
else:
website_url = False
company_name = user.company_id.name
model_name = False
if message.model:
model_name = self.env['ir.model'].sudo().search([('model', '=', self.env[message.model]._name)]).name_get()[0][1]
record_name = message.record_name
tracking = []
for tracking_value in message.tracking_value_ids:
tracking.append((tracking_value.field_desc,
tracking_value.get_old_display_value()[0],
tracking_value.get_new_display_value()[0]))
return {
'signature': signature,
'website_url': website_url,
'company_name': company_name,
'model_name': model_name,
'record_name': record_name,
'tracking': tracking,
}
@api.model
def _notify_prepare_email_values(self, message):
# compute email references
references = message.parent_id.message_id if message.parent_id else False
# custom values
custom_values = dict()
if message.model and message.res_id and self.pool.get(message.model) and hasattr(self.pool[message.model], 'message_get_email_values'):
custom_values = self.env[message.model].browse(message.res_id).message_get_email_values(message)
mail_values = {
'mail_message_id': message.id,
'auto_delete': self._context.get('mail_auto_delete', True),
'references': references,
}
mail_values.update(custom_values)
return mail_values
@api.model
def _notify_send(self, body, subject, recipients, **mail_values):
emails = self.env['mail.mail']
recipients_nbr, recipients_max = 0, 50
email_chunks = [recipients[x:x + recipients_max] for x in xrange(0, len(recipients), recipients_max)]
for email_chunk in email_chunks:
create_values = {
'body_html': body,
'subject': subject,
'recipient_ids': [(4, recipient.id) for recipient in email_chunk],
}
create_values.update(mail_values)
emails |= self.env['mail.mail'].create(create_values)
return emails, recipients_nbr
@api.multi
def _notify(self, message, force_send=False, user_signature=True):
# TDE TODO: model-dependant ? (like customer -> always email ?)
message_sudo = message.sudo()
email_channels = message.channel_ids.filtered(lambda channel: channel.email_send)
self.sudo().search([
'|',
('id', 'in', self.ids),
('channel_ids', 'in', email_channels.ids),
('email', '!=', message_sudo.author_id and message_sudo.author_id.email or message.email_from),
('notify_email', '!=', 'none')])._notify_by_email(message, force_send=force_send, user_signature=user_signature)
self._notify_by_chat(message)
return True
@api.multi
def _notify_by_email(self, message, force_send=False, user_signature=True):
""" Method to send email linked to notified messages. The recipients are
the recordset on which this method is called. """
if not self.ids:
return True
# existing custom notification email
base_template = None
if message.model:
base_template = self.env.ref('mail.mail_template_data_notification_email_%s' % message.model.replace('.', '_'), raise_if_not_found=False)
if not base_template:
base_template = self.env.ref('mail.mail_template_data_notification_email_default')
base_template_ctx = self._notify_prepare_template_context(message)
if not user_signature:
base_template_ctx['signature'] = False
base_mail_values = self._notify_prepare_email_values(message)
# classify recipients: actions / no action
if message.model and message.res_id and hasattr(self.env[message.model], '_message_notification_recipients'):
recipients = self.env[message.model].browse(message.res_id)._message_notification_recipients(message, self)
else:
recipients = self.env['mail.thread']._message_notification_recipients(message, self)
emails = self.env['mail.mail']
recipients_nbr, recipients_max = 0, 50
for email_type, recipient_template_values in recipients.iteritems():
if recipient_template_values['followers']:
# generate notification email content
template_fol_values = dict(base_template_ctx, **recipient_template_values) # fixme: set button_unfollow to none
template_fol_values['button_follow'] = False
template_fol = base_template.with_context(**template_fol_values)
# generate templates for followers and not followers
fol_values = template_fol.generate_email(message.id, fields=['body_html', 'subject'])
# send email
new_emails, new_recipients_nbr = self._notify_send(fol_values['body'], fol_values['subject'], recipient_template_values['followers'], **base_mail_values)
emails |= new_emails
recipients_nbr += new_recipients_nbr
if recipient_template_values['not_followers']:
# generate notification email content
template_not_values = dict(base_template_ctx, **recipient_template_values) # fixme: set button_follow to none
template_not_values['button_unfollow'] = False
template_not = base_template.with_context(**template_not_values)
# generate templates for followers and not followers
not_values = template_not.generate_email(message.id, fields=['body_html', 'subject'])
# send email
new_emails, new_recipients_nbr = self._notify_send(not_values['body'], not_values['subject'], recipient_template_values['not_followers'], **base_mail_values)
emails |= new_emails
recipients_nbr += new_recipients_nbr
# NOTE:
# 1. for more than 50 followers, use the queue system
# 2. do not send emails immediately if the registry is not loaded,
# to prevent sending email during a simple update of the database
# using the command-line.
if force_send and recipients_nbr < recipients_max and \
(not self.pool._init or getattr(threading.currentThread(), 'testing', False)):
emails.send()
return True
@api.multi
def _notify_by_chat(self, message):
""" Broadcast the message to all the partner since """
message_values = message.message_format()[0]
notifications = []
for partner in self:
notifications.append([(self._cr.dbname, 'ir.needaction', partner.id), dict(message_values)])
self.env['bus.bus'].sendmany(notifications)
@api.model
def get_needaction_count(self):
""" compute the number of needaction of the current user """
if self.env.user.partner_id:
self.env.cr.execute("""
SELECT count(*) as needaction_count
FROM mail_message_res_partner_needaction_rel R
WHERE R.res_partner_id = %s """, (self.env.user.partner_id.id,))
return self.env.cr.dictfetchall()[0].get('needaction_count')
_logger.error('Call to needaction_count without partner_id')
return 0
@api.model
def get_mention_suggestions(self, search, channel, exclude=None, limit=8):
""" Return 'limit'-first partners' id, name and email such that the name or email matches a
'search' string. Prioritize partners registered to channel 'channel[channel_id]' if
given, or partners that are followers of a document identified by 'channel[res_model]'
and 'channel[res_id]' otherwise, then users, and finally extend the research to all
partners. Exclude partners whose id is in 'exclude'. """
if exclude is None:
exclude = []
members = []
users = []
partners = []
search_dom = expression.AND([
expression.OR([[('name', 'ilike', search)], [('email', 'ilike', search)]]),
[('id', 'not in', exclude)]
])
fields = ['id', 'name', 'email']
def search_partners(domain, fields, limit, exclude):
partners = self.search_read(domain, fields, limit=limit)
limit -= len(partners)
exclude += [partner['id'] for partner in partners]
return partners, limit, exclude
# Search users registered to the channel
if 'channel_id' in channel:
domain = expression.AND([[('channel_ids', 'in', [channel['channel_id']])], search_dom])
members, limit, exclude = search_partners(domain, fields, limit, exclude)
else:
domain = expression.AND([
[('res_model', '=', channel['res_model'])],
[('res_id', '=', channel['res_id'])]
])
followers = self.env['mail.followers'].search(domain)
domain = expression.AND([[('id', 'in', followers.mapped('partner_id').ids)], search_dom])
members, limit, exclude = search_partners(domain, fields, limit, exclude)
if limit > 0:
# Search users
domain = expression.AND([[('user_ids.id', '!=', False)], search_dom])
users, limit, exclude = search_partners(domain, fields, limit, exclude)
if limit > 0:
# Search partners
partners = self.search_read(search_dom, fields, limit=limit)
return [members, users, partners]
| gpl-3.0 |
chepazzo/ansible-eos | library/eos_config.py | 5 | 14405 | #!/usr/bin/python
#
# Copyright (c) 2015, Arista Networks, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of Arista Networks nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
DOCUMENTATION = """
---
module: eos_config
short_description: Sends configuration commands to the node
description:
- The eos_config module evalues the current configuration for specific
commands. If the commands are either present or absent (depending on
the function argument, the eos_config module will configure the node
using the command argument.
version_added: 1.0.0
category: System
author: Arista EOS+
requirements:
- Arista EOS 4.13.7M or later with command API enabled
- Python Client for eAPI 0.3.0 or later
notes:
- This module does not support idempotent operations.
- Supports eos metaparameters for using the eAPI transport
- This module does not support stateful configuration
options:
command:
description:
- Specifies the configuration command to send to the node if the
expression does not evalute to true.
required: true
default: null
choices: []
aliases: []
version_added: 1.0.0
section:
description:
- Restricts the configuration evaluation to a single configuration
section. If the configuration section argument is not provided,
then the global configuration is used.
required: false
default: null
choices: []
aliases: []
version_added: 1.0.0
regexp:
description:
- Specifies the expression to evalute the current node's running
configuration. The value can be any valid regular expression.
This optional argument will default to use the command
argument if none is provided.
required: false
default: null
choices: []
aliases: ['expression']
version_added: 1.1.0
"""
EXAMPLES = """
- name: idempotent operation for removing a SVI
eos_config:
command='no interface Vlan100'
regexp='interface Vlan100'
state=absent
- name: non-idempotent operation for removing a SVI
eos_config:
command='no interface Vlan100'
- name: ensure default route is present
eos_config:
command='ip route 0.0.0.0/0 192.168.1.254'
- name: configure interface range to be shutdown if it isn't already
eos_config:
command='shutdown'
regexp='(?<=[^no ] )shutdown'
section='interface {{ item }}'
with_items:
- Ethernet1
- Ethernet2
- Ethernet3
"""
import re
#<<EOS_COMMON_MODULE_START>>
import syslog
import collections
from ansible.module_utils.basic import *
try:
import pyeapi
PYEAPI_AVAILABLE = True
except ImportError:
PYEAPI_AVAILABLE = False
DEFAULT_SYSLOG_PRIORITY = syslog.LOG_NOTICE
DEFAULT_CONNECTION = 'localhost'
TRANSPORTS = ['socket', 'http', 'https', 'http_local']
class EosAnsibleModule(AnsibleModule):
meta_args = {
'config': dict(),
'username': dict(),
'password': dict(),
'host': dict(),
'connection': dict(default=DEFAULT_CONNECTION),
'transport': dict(choices=TRANSPORTS),
'port': dict(),
'debug': dict(type='bool', default='false'),
'logging': dict(type='bool', default='true')
}
stateful_args = {
'state': dict(default='present', choices=['present', 'absent']),
}
def __init__(self, stateful=True, *args, **kwargs):
kwargs['argument_spec'].update(self.meta_args)
self._stateful = stateful
if stateful:
kwargs['argument_spec'].update(self.stateful_args)
super(EosAnsibleModule, self).__init__(*args, **kwargs)
self.result = dict(changed=False, changes=dict())
self._debug = kwargs.get('debug') or self.boolean(self.params['debug'])
self._logging = kwargs.get('logging') or self.params['logging']
self.log('DEBUG flag is %s' % self._debug)
self.debug('pyeapi_version', self.check_pyeapi())
self.debug('stateful', self._stateful)
self.debug('params', self.params)
self._attributes = self.map_argument_spec()
self.validate()
self._node = self.connect()
self._instance = None
self.desired_state = self.params['state'] if self._stateful else None
self.exit_after_flush = kwargs.get('exit_after_flush')
@property
def instance(self):
if self._instance:
return self._instance
func = self.func('instance')
if not func:
self.fail('Module does not support "instance"')
try:
self._instance = func(self)
except Exception as exc:
self.fail('instance[error]: %s' % exc.message)
self.log("called instance: %s" % self._instance)
return self._instance
@property
def attributes(self):
return self._attributes
@property
def node(self):
if self._node:
return self._node
self._node = self.connect()
return self._node
def check_pyeapi(self):
if not PYEAPI_AVAILABLE:
self.fail('Unable to import pyeapi, is it installed?')
return pyeapi.__version__
def map_argument_spec(self):
"""map_argument_spec maps only the module argument spec to attrs
This method will map the argumentspec minus the meta_args to attrs
and return the attrs. This returns a dict object that includes only
the original argspec plus the stateful_args (if self._stateful=True)
Returns:
dict: Returns a dict object that includes the original
argument_spec plus stateful_args with values minus meta_args
"""
keys = set(self.params).difference(self.meta_args)
attrs = dict()
attrs = dict([(k, self.params[k]) for k in self.params if k in keys])
if 'CHECKMODE' in attrs:
del attrs['CHECKMODE']
return attrs
def validate(self):
for key, value in self.attributes.iteritems():
func = self.func('validate_%s' % key)
if func:
self.attributes[key] = func(value)
def create(self):
if not self.check_mode:
func = self.func('create')
if not func:
self.fail('Module must define "create" function')
return self.invoke(func, self)
def remove(self):
if not self.check_mode:
func = self.func('remove')
if not func:
self.fail('Module most define "remove" function')
return self.invoke(func, self)
def flush(self, exit_after_flush=False):
self.exit_after_flush = exit_after_flush
if self.desired_state == 'present' or not self._stateful:
if self.instance.get('state') == 'absent':
changed = self.create()
self.result['changed'] = changed or True
self.refresh()
changeset = self.attributes.viewitems() - self.instance.viewitems()
if self._debug:
self.debug('desired_state', self.attributes)
self.debug('current_state', self.instance)
changes = self.update(changeset)
if changes:
self.result['changes'] = changes
self.result['changed'] = True
self._attributes.update(changes)
flush = self.func('flush')
if flush:
self.invoke(flush, self)
elif self.desired_state == 'absent' and self._stateful:
if self.instance.get('state') == 'present':
changed = self.remove()
self.result['changed'] = changed or True
elif self._stateful:
if self.desired_state != self.instance.get('state'):
changed = self.invoke(self.instance.get('state'))
self.result['changed'] = changed or True
self.refresh()
self.result['instance'] = self.instance
if self.exit_after_flush:
self.exit()
def update(self, changeset):
changes = dict()
for key, value in changeset:
if value is not None:
changes[key] = value
func = self.func('set_%s' % key)
if func and not self.check_mode:
try:
self.invoke(func, self)
except Exception as exc:
self.fail(exc.message)
return changes
def connect(self):
if self.params['config']:
pyeapi.load_config(self.params['config'])
config = dict()
if self.params['connection']:
config = pyeapi.config_for(self.params['connection'])
if not config:
msg = 'Connection name "%s" not found' % self.params['connection']
self.fail(msg)
if self.params['username']:
config['username'] = self.params['username']
if self.params['password']:
config['password'] = self.params['password']
if self.params['transport']:
config['transport'] = self.params['transport']
if self.params['port']:
config['port'] = self.params['port']
if self.params['host']:
config['host'] = self.params['host']
if 'transport' not in config:
self.fail('Connection must define a transport')
connection = pyeapi.client.make_connection(**config)
node = pyeapi.client.Node(connection, **config)
try:
resp = node.enable('show version')
self.debug('eos_version', resp[0]['result']['version'])
self.debug('eos_model', resp[0]['result']['modelName'])
except (pyeapi.eapilib.ConnectionError, pyeapi.eapilib.CommandError):
self.fail('unable to connect to %s' % node)
else:
self.log('Connected to node %s' % node)
self.debug('node', str(node))
return node
def config(self, commands):
self.result['changed'] = True
if not self.check_mode:
self.node.config(commands)
def api(self, module):
return self.node.api(module)
def func(self, name):
return globals().get(name)
def invoke(self, func, *args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as exc:
self.fail(exc.message)
def invoke_function(self, name, *args, **kwargs):
func = self.func(name)
if func:
return self.invoke(func, *args, **kwargs)
def fail(self, msg):
self.invoke_function('on_fail', self)
self.log('ERROR: %s' % msg, syslog.LOG_ERR)
self.fail_json(msg=msg)
def exit(self):
self.invoke_function('on_exit', self)
self.log('Module completed successfully')
self.exit_json(**self.result)
def refresh(self):
self._instance = None
def debug(self, key, value):
if self._debug:
if 'debug' not in self.result:
self.result['debug'] = dict()
self.result['debug'][key] = value
def log(self, message, priority=None):
if self._logging:
syslog.openlog('ansible-eos')
priority = priority or DEFAULT_SYSLOG_PRIORITY
syslog.syslog(priority, str(message))
@classmethod
def add_state(cls, name):
cls.stateful_args['state']['choices'].append(name)
#<<EOS_COMMON_MODULE_END>>
def section(module):
try:
if module.attributes['section']:
regex = r'^%s$' % module.attributes['section']
return module.node.section(regex)
else:
return module.node.running_config
except TypeError:
return str()
def config(module):
commands = list()
if module.attributes['section']:
commands.append(module.attributes['section'])
commands.append(module.attributes['command'])
module.debug('commands', commands)
module.config(commands)
def main():
""" The main module routine called when the module is run by Ansible
"""
argument_spec = dict(
command=dict(required=True),
section=dict(),
regexp=dict(aliases=['expression']),
state=dict(default='present', choices=['present', 'absent'])
)
module = EosAnsibleModule(argument_spec=argument_spec)
command = module.attributes['command'].strip()
regexp = module.attributes['regexp']
state = module.attributes['state']
if regexp:
regexp = re.compile(r'{0}'.format(regexp), re.M)
cfg = section(module)
module.debug('running_config', cfg)
if state == 'absent':
if regexp:
if regexp.search(cfg, re.M):
config(module)
elif command in cfg:
config(module)
elif state == 'present':
if regexp:
if not regexp.search(cfg, re.M):
config(module)
elif command not in cfg:
config(module)
module.exit()
main()
| bsd-3-clause |
luthfii/xsched | tools/ocaml/libs/xentoollog/genlevels.py | 9 | 2642 | #!/usr/bin/python
import sys
def read_levels():
f = open('../../../libxc/xentoollog.h', 'r')
levels = []
record = False
for l in f.readlines():
if 'XTL_NUM_LEVELS' in l:
break
if record == True:
levels.append(l.split(',')[0].strip())
if 'XTL_NONE' in l:
record = True
f.close()
olevels = [level[4:].capitalize() for level in levels]
return levels, olevels
# .ml
def gen_ml(olevels):
s = ""
s += "type level = \n"
for level in olevels:
s += '\t| %s\n' % level
s += "\nlet level_to_string level =\n"
s += "\tmatch level with\n"
for level in olevels:
s += '\t| %s -> "%s"\n' % (level, level)
s += "\nlet level_to_prio level =\n"
s += "\tmatch level with\n"
for index,level in enumerate(olevels):
s += '\t| %s -> %d\n' % (level, index)
return s
# .mli
def gen_mli(olevels):
s = ""
s += "type level = \n"
for level in olevels:
s += '\t| %s\n' % level
return s
# .c
def gen_c(level):
s = ""
s += "static value Val_level(xentoollog_level c_level)\n"
s += "{\n"
s += "\tswitch (c_level) {\n"
s += "\tcase XTL_NONE: /* Not a real value */\n"
s += '\t\tcaml_raise_sys_error(caml_copy_string("Val_level XTL_NONE"));\n'
s += "\t\tbreak;\n"
for index,level in enumerate(levels):
s += "\tcase %s:\n\t\treturn Val_int(%d);\n" % (level, index)
s += """\tcase XTL_NUM_LEVELS: /* Not a real value! */
\t\tcaml_raise_sys_error(
\t\t\tcaml_copy_string("Val_level XTL_NUM_LEVELS"));
#if 0 /* Let the compiler catch this */
\tdefault:
\t\tcaml_raise_sys_error(caml_copy_string("Val_level Unknown"));
\t\tbreak;
#endif
\t}
\tabort();
}
"""
return s
def autogen_header(open_comment, close_comment):
s = open_comment + " AUTO-GENERATED FILE DO NOT EDIT " + close_comment + "\n"
s += open_comment + " autogenerated by \n"
s += reduce(lambda x,y: x + " ", range(len(open_comment + " ")), "")
s += "%s" % " ".join(sys.argv)
s += "\n " + close_comment + "\n\n"
return s
if __name__ == '__main__':
if len(sys.argv) < 3:
print >>sys.stderr, "Usage: genlevels.py <mli> <ml> <c-inc>"
sys.exit(1)
levels, olevels = read_levels()
_mli = sys.argv[1]
mli = open(_mli, 'w')
mli.write(autogen_header("(*", "*)"))
_ml = sys.argv[2]
ml = open(_ml, 'w')
ml.write(autogen_header("(*", "*)"))
_cinc = sys.argv[3]
cinc = open(_cinc, 'w')
cinc.write(autogen_header("/*", "*/"))
mli.write(gen_mli(olevels))
mli.write("\n")
ml.write(gen_ml(olevels))
ml.write("\n")
cinc.write(gen_c(levels))
cinc.write("\n")
ml.write("(* END OF AUTO-GENERATED CODE *)\n")
ml.close()
mli.write("(* END OF AUTO-GENERATED CODE *)\n")
mli.close()
cinc.close()
| gpl-2.0 |
felixbuenemann/sentry | src/sentry/web/forms/fields.py | 22 | 4794 | """
sentry.web.forms.fields
~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import six
from django.core.validators import URLValidator
from django.forms.widgets import RadioFieldRenderer, TextInput, Widget
from django.forms.util import flatatt
from django.forms import (
Field, CharField, IntegerField, Textarea, TypedChoiceField, ValidationError
)
from django.utils.encoding import force_unicode
from django.utils.html import format_html
from sentry.utils.http import parse_uri_match
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from sentry.models import User
class CustomTypedChoiceField(TypedChoiceField):
# A patched version of TypedChoiceField which correctly validates a 0
# as a real input that may be invalid
# See https://github.com/django/django/pull/3774
def validate(self, value):
"""
Validates that the input is in self.choices.
"""
super(CustomTypedChoiceField, self).validate(value)
# this will validate itself twice due to the internal ChoiceField
# validation
if value is not None and not self.valid_value(value):
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': value},
)
class RangeInput(TextInput):
input_type = 'range'
class RadioFieldRenderer(RadioFieldRenderer):
"""
This is identical to Django's builtin widget, except that
it renders as a Bootstrap2 compatible widget. Would be great if
we didn't have to create this stupid code, but Django widgets are not
flexible.
"""
def render(self):
return mark_safe(u'\n<div class="inputs-list">%s</div>\n' % u'\n'.join([force_unicode(w) for w in self]))
class UserField(CharField):
class widget(TextInput):
def render(self, name, value, attrs=None):
if not attrs:
attrs = {}
if 'placeholder' not in attrs:
attrs['placeholder'] = 'username'
if isinstance(value, six.integer_types):
value = User.objects.get(id=value).username
return super(UserField.widget, self).render(name, value, attrs)
def clean(self, value):
value = super(UserField, self).clean(value)
if not value:
return None
try:
return User.objects.get(username=value)
except User.DoesNotExist:
raise ValidationError(_('Invalid username'))
class RangeField(IntegerField):
widget = RangeInput
def __init__(self, *args, **kwargs):
self.step_value = kwargs.pop('step_value', None)
super(RangeField, self).__init__(*args, **kwargs)
def widget_attrs(self, widget):
attrs = super(RangeField, self).widget_attrs(widget)
attrs.setdefault('min', self.min_value)
attrs.setdefault('max', self.max_value)
attrs.setdefault('step', self.step_value)
return attrs
class ReadOnlyTextWidget(Widget):
def render(self, name, value, attrs):
final_attrs = self.build_attrs(attrs)
if not value:
value = mark_safe("<em>%s</em>" % _("Not set"))
return format_html("<div{0}>{1}</div>", flatatt(final_attrs), value)
class ReadOnlyTextField(Field):
widget = ReadOnlyTextWidget
def __init__(self, *args, **kwargs):
kwargs.setdefault("required", False)
super(ReadOnlyTextField, self).__init__(*args, **kwargs)
def bound_data(self, data, initial):
# Always return initial because the widget doesn't
# render an input field.
return initial
class OriginsField(CharField):
# Special case origins that don't fit the normal regex pattern, but are valid
WHITELIST_ORIGINS = ('*')
_url_validator = URLValidator()
widget = Textarea(
attrs={
'placeholder': mark_safe(_('e.g. example.com or https://example.com')),
'class': 'span8',
},
)
def clean(self, value):
if not value:
return []
values = filter(bool, (v.strip() for v in value.split('\n')))
for value in values:
if not self.is_valid_origin(value):
raise ValidationError('%r is not an acceptable value' % value)
return values
def is_valid_origin(self, value):
if value in self.WHITELIST_ORIGINS:
return True
bits = parse_uri_match(value)
# ports are not supported on matching expressions (yet)
if ':' in bits.domain:
return False
return True
| bsd-3-clause |
rcarrillocruz/ansible | docs/bin/plugin_formatter.py | 33 | 16896 | #!/usr/bin/env python
# (c) 2012, Jan-Piet Mens <jpmens () gmail.com>
# (c) 2012-2014, Michael DeHaan <michael@ansible.com> and others
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
__metaclass__ = type
import cgi
import datetime
import glob
import optparse
import os
import re
import sys
import warnings
import yaml
from collections import defaultdict
from jinja2 import Environment, FileSystemLoader
from six import iteritems
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_bytes
from ansible.utils import plugin_docs
#####################################################################################
# constants and paths
# if a module is added in a version of Ansible older than this, don't print the version added information
# in the module documentation because everyone is assumed to be running something newer than this already.
TO_OLD_TO_BE_NOTABLE = 1.3
# Get parent directory of the directory this script lives in
MODULEDIR = os.path.abspath(os.path.join(
os.path.dirname(os.path.realpath(__file__)), os.pardir, 'lib', 'ansible', 'modules'
))
# The name of the DOCUMENTATION template
EXAMPLE_YAML = os.path.abspath(os.path.join(
os.path.dirname(os.path.realpath(__file__)), os.pardir, 'examples', 'DOCUMENTATION.yml'
))
_ITALIC = re.compile(r"I\(([^)]+)\)")
_BOLD = re.compile(r"B\(([^)]+)\)")
_MODULE = re.compile(r"M\(([^)]+)\)")
_URL = re.compile(r"U\(([^)]+)\)")
_CONST = re.compile(r"C\(([^)]+)\)")
DEPRECATED = b" (D)"
def rst_ify(text):
''' convert symbols like I(this is in italics) to valid restructured text '''
try:
t = _ITALIC.sub(r'*' + r"\1" + r"*", text)
t = _BOLD.sub(r'**' + r"\1" + r"**", t)
t = _MODULE.sub(r':ref:`' + r"\1 <\1>" + r"`", t)
t = _URL.sub(r"\1", t)
t = _CONST.sub(r'``' + r"\1" + r"``", t)
except Exception as e:
raise AnsibleError("Could not process (%s) : %s" % (str(text), str(e)))
return t
def html_ify(text):
''' convert symbols like I(this is in italics) to valid HTML '''
t = cgi.escape(text)
t = _ITALIC.sub("<em>" + r"\1" + "</em>", t)
t = _BOLD.sub("<b>" + r"\1" + "</b>", t)
t = _MODULE.sub("<span class='module'>" + r"\1" + "</span>", t)
t = _URL.sub("<a href='" + r"\1" + "'>" + r"\1" + "</a>", t)
t = _CONST.sub("<code>" + r"\1" + "</code>", t)
return t
def rst_fmt(text, fmt):
''' helper for Jinja2 to do format strings '''
return fmt % (text)
def rst_xline(width, char="="):
''' return a restructured text line of a given length '''
return char * width
def write_data(text, options, outputname, module):
''' dumps module output to a file or the screen, as requested '''
if options.output_dir is not None:
fname = os.path.join(options.output_dir, outputname % module)
fname = fname.replace(".py", "")
f = open(fname, 'wb')
f.write(to_bytes(text))
f.close()
else:
print(text)
def list_modules(module_dir, depth=0):
''' returns a hash of categories, each category being a hash of module names to file paths '''
categories = dict()
module_info = dict()
aliases = defaultdict(set)
# * windows powershell modules have documentation stubs in python docstring
# format (they are not executed) so skip the ps1 format files
# * One glob level for every module level that we're going to traverse
files = (
glob.glob("%s/*.py" % module_dir) +
glob.glob("%s/*/*.py" % module_dir) +
glob.glob("%s/*/*/*.py" % module_dir) +
glob.glob("%s/*/*/*/*.py" % module_dir)
)
for module_path in files:
if module_path.endswith('__init__.py'):
continue
category = categories
mod_path_only = module_path
# Start at the second directory because we don't want the "vendor"
mod_path_only = os.path.dirname(module_path[len(module_dir):])
# directories (core, extras)
for new_cat in mod_path_only.split('/')[1:]:
if new_cat not in category:
category[new_cat] = dict()
category = category[new_cat]
module = os.path.splitext(os.path.basename(module_path))[0]
if module in plugin_docs.BLACKLIST['MODULE']:
# Do not list blacklisted modules
continue
if module.startswith("_") and os.path.islink(module_path):
source = os.path.splitext(os.path.basename(os.path.realpath(module_path)))[0]
module = module.replace("_", "", 1)
aliases[source].add(module)
continue
category[module] = module_path
module_info[module] = module_path
# keep module tests out of becoming module docs
if 'test' in categories:
del categories['test']
return module_info, categories, aliases
def generate_parser():
''' generate an optparse parser '''
p = optparse.OptionParser(
version='%prog 1.0',
usage='usage: %prog [options] arg1 arg2',
description='Generate module documentation from metadata',
)
p.add_option("-A", "--ansible-version", action="store", dest="ansible_version", default="unknown", help="Ansible version number")
p.add_option("-M", "--module-dir", action="store", dest="module_dir", default=MODULEDIR, help="Ansible library path")
p.add_option("-T", "--template-dir", action="store", dest="template_dir", default="hacking/templates", help="directory containing Jinja2 templates")
p.add_option("-t", "--type", action='store', dest='type', choices=['rst'], default='rst', help="Document type")
p.add_option("-v", "--verbose", action='store_true', default=False, help="Verbose")
p.add_option("-o", "--output-dir", action="store", dest="output_dir", default=None, help="Output directory for module files")
p.add_option("-I", "--includes-file", action="store", dest="includes_file", default=None, help="Create a file containing list of processed modules")
p.add_option('-V', action='version', help='Show version number and exit')
return p
def jinja2_environment(template_dir, typ):
env = Environment(loader=FileSystemLoader(template_dir),
variable_start_string="@{",
variable_end_string="}@",
trim_blocks=True)
env.globals['xline'] = rst_xline
if typ == 'rst':
env.filters['convert_symbols_to_format'] = rst_ify
env.filters['html_ify'] = html_ify
env.filters['fmt'] = rst_fmt
env.filters['xline'] = rst_xline
template = env.get_template('plugin.rst.j2')
outputname = "%s_module.rst"
else:
raise Exception("unknown module format type: %s" % typ)
return env, template, outputname
def too_old(added):
if not added:
return False
try:
added_tokens = str(added).split(".")
readded = added_tokens[0] + "." + added_tokens[1]
added_float = float(readded)
except ValueError as e:
warnings.warn("Could not parse %s: %s" % (added, str(e)))
return False
return (added_float < TO_OLD_TO_BE_NOTABLE)
def process_module(module, options, env, template, outputname, module_map, aliases):
fname = module_map[module]
if isinstance(fname, dict):
return "SKIPPED"
basename = os.path.basename(fname)
deprecated = False
# ignore files with extensions
if not basename.endswith(".py"):
return
elif module.startswith("_"):
if os.path.islink(fname):
return # ignore, its an alias
deprecated = True
module = module.replace("_", "", 1)
print("rendering: %s" % module)
# use ansible core library to parse out doc metadata YAML and plaintext examples
doc, examples, returndocs, metadata = plugin_docs.get_docstring(fname, verbose=options.verbose)
# crash if module is missing documentation and not explicitly hidden from docs index
if doc is None:
sys.exit("*** ERROR: MODULE MISSING DOCUMENTATION: %s, %s ***\n" % (fname, module))
if metadata is None:
sys.exit("*** ERROR: MODULE MISSING METADATA: %s, %s ***\n" % (fname, module))
if deprecated and 'deprecated' not in doc:
sys.exit("*** ERROR: DEPRECATED MODULE MISSING 'deprecated' DOCUMENTATION: %s, %s ***\n" % (fname, module))
if module in aliases:
doc['aliases'] = aliases[module]
all_keys = []
if 'version_added' not in doc:
sys.exit("*** ERROR: missing version_added in: %s ***\n" % module)
added = 0
if doc['version_added'] == 'historical':
del doc['version_added']
else:
added = doc['version_added']
# don't show version added information if it's too old to be called out
if too_old(added):
del doc['version_added']
if 'options' in doc and doc['options']:
for (k, v) in iteritems(doc['options']):
# don't show version added information if it's too old to be called out
if 'version_added' in doc['options'][k] and too_old(doc['options'][k]['version_added']):
del doc['options'][k]['version_added']
if 'description' not in doc['options'][k]:
raise AnsibleError("Missing required description for option %s in %s " % (k, module))
required_value = doc['options'][k].get('required', False)
if not isinstance(required_value, bool):
raise AnsibleError("Invalid required value '%s' for option '%s' in '%s' (must be truthy)" % (required_value, k, module))
if not isinstance(doc['options'][k]['description'], list):
doc['options'][k]['description'] = [doc['options'][k]['description']]
all_keys.append(k)
all_keys = sorted(all_keys)
doc['option_keys'] = all_keys
doc['filename'] = fname
doc['docuri'] = doc['module'].replace('_', '-')
doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d')
doc['ansible_version'] = options.ansible_version
doc['plainexamples'] = examples # plain text
doc['metadata'] = metadata
if returndocs:
try:
doc['returndocs'] = yaml.safe_load(returndocs)
except:
print("could not load yaml: %s" % returndocs)
raise
else:
doc['returndocs'] = None
# here is where we build the table of contents...
try:
text = template.render(doc)
except Exception as e:
raise AnsibleError("Failed to render doc for %s: %s" % (fname, str(e)))
write_data(text, options, outputname, module)
return doc['short_description']
def print_modules(module, category_file, deprecated, options, env, template, outputname, module_map, aliases):
modstring = module
if modstring.startswith('_'):
modstring = module[1:]
modname = modstring
if module in deprecated:
modstring = to_bytes(modstring) + DEPRECATED
category_file.write(b" %s - %s <%s_module>\n" % (to_bytes(modstring), to_bytes(rst_ify(module_map[module][1])), to_bytes(modname)))
def process_category(category, categories, options, env, template, outputname):
# FIXME:
# We no longer conceptually deal with a mapping of category names to
# modules to file paths. Instead we want several different records:
# (1) Mapping of module names to file paths (what's presently used
# as categories['all']
# (2) Mapping of category names to lists of module names (what you'd
# presently get from categories[category_name][subcategory_name].keys()
# (3) aliases (what's presently in categories['_aliases']
#
# list_modules() now returns those. Need to refactor this function and
# main to work with them.
module_map = categories[category]
module_info = categories['all']
aliases = {}
if '_aliases' in categories:
aliases = categories['_aliases']
category_file_path = os.path.join(options.output_dir, "list_of_%s_modules.rst" % category)
category_file = open(category_file_path, "wb")
print("*** recording category %s in %s ***" % (category, category_file_path))
# start a new category file
category = category.replace("_", " ")
category = category.title()
modules = []
deprecated = []
for module in module_map.keys():
if isinstance(module_map[module], dict):
for mod in (m for m in module_map[module].keys() if m in module_info):
if mod.startswith("_"):
deprecated.append(mod)
else:
if module not in module_info:
continue
if module.startswith("_"):
deprecated.append(module)
modules.append(module)
modules.sort(key=lambda k: k[1:] if k.startswith('_') else k)
category_header = b"%s Modules" % (to_bytes(category.title()))
underscores = b"`" * len(category_header)
category_file.write(b"""\
%s
%s
.. toctree:: :maxdepth: 1
""" % (category_header, underscores))
sections = []
for module in modules:
if module in module_map and isinstance(module_map[module], dict):
sections.append(module)
continue
else:
print_modules(module, category_file, deprecated, options, env, template, outputname, module_info, aliases)
sections.sort()
for section in sections:
category_file.write(b"\n%s\n%s\n\n" % (to_bytes(section.replace("_", " ").title()), b'-' * len(section)))
category_file.write(b".. toctree:: :maxdepth: 1\n\n")
section_modules = list(module_map[section].keys())
section_modules.sort(key=lambda k: k[1:] if k.startswith('_') else k)
# for module in module_map[section]:
for module in (m for m in section_modules if m in module_info):
print_modules(module, category_file, deprecated, options, env, template, outputname, module_info, aliases)
category_file.write(b"""\n\n
.. note::
- %s: This marks a module as deprecated, which means a module is kept for backwards compatibility but usage is discouraged.
The module documentation details page may explain more about this rationale.
""" % DEPRECATED)
category_file.close()
# TODO: end a new category file
def validate_options(options):
''' validate option parser options '''
if not options.module_dir:
sys.exit("--module-dir is required", file=sys.stderr)
if not os.path.exists(options.module_dir):
sys.exit("--module-dir does not exist: %s" % options.module_dir, file=sys.stderr)
if not options.template_dir:
sys.exit("--template-dir must be specified")
def main():
p = generate_parser()
(options, args) = p.parse_args()
validate_options(options)
env, template, outputname = jinja2_environment(options.template_dir, options.type)
mod_info, categories, aliases = list_modules(options.module_dir)
categories['all'] = mod_info
categories['_aliases'] = aliases
category_names = [c for c in categories.keys() if not c.startswith('_')]
category_names.sort()
# Write master category list
category_list_path = os.path.join(options.output_dir, "modules_by_category.rst")
with open(category_list_path, "wb") as category_list_file:
category_list_file.write(b"Module Index\n")
category_list_file.write(b"============\n")
category_list_file.write(b"\n\n")
category_list_file.write(b".. toctree::\n")
category_list_file.write(b" :maxdepth: 1\n\n")
for category in category_names:
category_list_file.write(b" list_of_%s_modules\n" % to_bytes(category))
# Import all the docs into memory
module_map = mod_info.copy()
for modname in module_map:
result = process_module(modname, options, env, template, outputname, module_map, aliases)
if result == 'SKIPPED':
del categories['all'][modname]
else:
categories['all'][modname] = (categories['all'][modname], result)
# Render all the docs to rst via category pages
for category in category_names:
process_category(category, categories, options, env, template, outputname)
if __name__ == '__main__':
main()
| gpl-3.0 |
centaurialpha/ninja-ide | ninja_ide/gui/dialogs/traceback_widget.py | 8 | 2513 | # -*- coding: utf-8 -*-
#
# This file is part of NINJA-IDE (http://ninja-ide.org).
#
# NINJA-IDE is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
#
# NINJA-IDE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NINJA-IDE; If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from PyQt4.QtGui import QDialog
from PyQt4.QtGui import QTabWidget
from PyQt4.QtGui import QPlainTextEdit
from PyQt4.QtGui import QHBoxLayout
from PyQt4.QtGui import QVBoxLayout
from PyQt4.QtGui import QWidget
from PyQt4.QtGui import QLabel
from PyQt4.QtGui import QPushButton
from PyQt4.QtCore import SIGNAL
from ninja_ide import translations
class PluginErrorDialog(QDialog):
"""
Dialog with tabs each tab is a python traceback
"""
def __init__(self):
QDialog.__init__(self)
self.setWindowTitle(translations.TR_PLUGIN_ERROR_REPORT)
self.resize(600, 400)
vbox = QVBoxLayout(self)
label = QLabel(translations.TR_SOME_PLUGINS_REMOVED)
vbox.addWidget(label)
self._tabs = QTabWidget()
vbox.addWidget(self._tabs)
hbox = QHBoxLayout()
btnAccept = QPushButton(translations.TR_ACCEPT)
btnAccept.setMaximumWidth(100)
hbox.addWidget(btnAccept)
vbox.addLayout(hbox)
#signals
self.connect(btnAccept, SIGNAL("clicked()"), self.close)
def add_traceback(self, plugin_name, traceback_msg):
"""Add a Traceback to the widget on a new tab"""
traceback_widget = TracebackWidget(traceback_msg)
self._tabs.addTab(traceback_widget, plugin_name)
class TracebackWidget(QWidget):
"""
Represents a python traceback
"""
def __init__(self, traceback_msg):
QWidget.__init__(self)
vbox = QVBoxLayout(self)
self._editor = QPlainTextEdit()
vbox.addWidget(QLabel(translations.TR_TRACEBACK))
vbox.addWidget(self._editor)
self._editor.setReadOnly(True)
self._editor.setLineWrapMode(0)
self._editor.insertPlainText(traceback_msg)
self._editor.selectAll()
| gpl-3.0 |
tempbottle/ironpython3 | Src/StdLib/Lib/distutils/log.py | 163 | 1908 | """A simple log mechanism styled after PEP 282."""
# The class here is styled after PEP 282 so that it could later be
# replaced with a standard Python logging implementation.
DEBUG = 1
INFO = 2
WARN = 3
ERROR = 4
FATAL = 5
import sys
class Log:
def __init__(self, threshold=WARN):
self.threshold = threshold
def _log(self, level, msg, args):
if level not in (DEBUG, INFO, WARN, ERROR, FATAL):
raise ValueError('%s wrong log level' % str(level))
if level >= self.threshold:
if args:
msg = msg % args
if level in (WARN, ERROR, FATAL):
stream = sys.stderr
else:
stream = sys.stdout
if stream.errors == 'strict':
# emulate backslashreplace error handler
encoding = stream.encoding
msg = msg.encode(encoding, "backslashreplace").decode(encoding)
stream.write('%s\n' % msg)
stream.flush()
def log(self, level, msg, *args):
self._log(level, msg, args)
def debug(self, msg, *args):
self._log(DEBUG, msg, args)
def info(self, msg, *args):
self._log(INFO, msg, args)
def warn(self, msg, *args):
self._log(WARN, msg, args)
def error(self, msg, *args):
self._log(ERROR, msg, args)
def fatal(self, msg, *args):
self._log(FATAL, msg, args)
_global_log = Log()
log = _global_log.log
debug = _global_log.debug
info = _global_log.info
warn = _global_log.warn
error = _global_log.error
fatal = _global_log.fatal
def set_threshold(level):
# return the old threshold for use from tests
old = _global_log.threshold
_global_log.threshold = level
return old
def set_verbosity(v):
if v <= 0:
set_threshold(WARN)
elif v == 1:
set_threshold(INFO)
elif v >= 2:
set_threshold(DEBUG)
| apache-2.0 |
AndreaCrotti/django-pipeline | pipeline/compressors/__init__.py | 2 | 9095 | from __future__ import unicode_literals
import base64
import os
import posixpath
import re
from itertools import takewhile
from django.utils.encoding import smart_bytes, force_text
from pipeline.conf import settings
from pipeline.storage import default_storage
from pipeline.utils import to_class, relpath
from pipeline.exceptions import CompressorError
URL_DETECTOR = r'url\([\'"]?([^\s)]+\.[a-z]+[^\'"\s]*)[\'"]?\)'
URL_REPLACER = r'url\(__EMBED__(.+?)(\?\d+)?\)'
DEFAULT_TEMPLATE_FUNC = "template"
TEMPLATE_FUNC = r"""var template = function(str){var fn = new Function('obj', 'var __p=[],print=function(){__p.push.apply(__p,arguments);};with(obj||{}){__p.push(\''+str.replace(/\\/g, '\\\\').replace(/'/g, "\\'").replace(/<%=([\s\S]+?)%>/g,function(match,code){return "',"+code.replace(/\\'/g, "'")+",'";}).replace(/<%([\s\S]+?)%>/g,function(match,code){return "');"+code.replace(/\\'/g, "'").replace(/[\r\n\t]/g,' ')+"__p.push('";}).replace(/\r/g,'\\r').replace(/\n/g,'\\n').replace(/\t/g,'\\t')+"');}return __p.join('');");return fn;};"""
MIME_TYPES = {
'.png': 'image/png',
'.jpg': 'image/jpeg',
'.jpeg': 'image/jpeg',
'.gif': 'image/gif',
'.tif': 'image/tiff',
'.tiff': 'image/tiff',
'.ttf': 'font/truetype',
'.otf': 'font/opentype',
'.woff': 'font/woff'
}
EMBED_EXTS = MIME_TYPES.keys()
FONT_EXTS = ['.ttf', '.otf', '.woff']
class Compressor(object):
asset_contents = {}
def __init__(self, storage=default_storage, verbose=False):
self.storage = storage
self.verbose = verbose
@property
def js_compressor(self):
return to_class(settings.PIPELINE_JS_COMPRESSOR)
@property
def css_compressor(self):
return to_class(settings.PIPELINE_CSS_COMPRESSOR)
def compress_js(self, paths, templates=None, **kwargs):
"""Concatenate and compress JS files"""
js = self.concatenate(paths)
if templates:
js = js + self.compile_templates(templates)
if not settings.PIPELINE_DISABLE_WRAPPER:
js = "(function() { %s }).call(this);" % js
compressor = self.js_compressor
if compressor:
js = getattr(compressor(verbose=self.verbose), 'compress_js')(js)
return js
def compress_css(self, paths, output_filename, variant=None, **kwargs):
"""Concatenate and compress CSS files"""
css = self.concatenate_and_rewrite(paths, output_filename, variant)
compressor = self.css_compressor
if compressor:
css = getattr(compressor(verbose=self.verbose), 'compress_css')(css)
if not variant:
return css
elif variant == "datauri":
return self.with_data_uri(css)
else:
raise CompressorError("\"%s\" is not a valid variant" % variant)
def compile_templates(self, paths):
compiled = ""
if not paths:
return compiled
namespace = settings.PIPELINE_TEMPLATE_NAMESPACE
base_path = self.base_path(paths)
for path in paths:
contents = self.read_text(path)
contents = re.sub("\r?\n", "\\\\n", contents)
contents = re.sub("'", "\\'", contents)
name = self.template_name(path, base_path)
compiled += "%s['%s'] = %s('%s');\n" % (
namespace,
name,
settings.PIPELINE_TEMPLATE_FUNC,
contents
)
compiler = TEMPLATE_FUNC if settings.PIPELINE_TEMPLATE_FUNC == DEFAULT_TEMPLATE_FUNC else ""
return "\n".join([
"%(namespace)s = %(namespace)s || {};" % {'namespace': namespace},
compiler,
compiled
])
def base_path(self, paths):
def names_equal(name):
return all(n == name[0] for n in name[1:])
directory_levels = zip(*[p.split(os.sep) for p in paths])
return os.sep.join(x[0] for x in takewhile(names_equal, directory_levels))
def template_name(self, path, base):
"""Find out the name of a JS template"""
if not base:
path = os.path.basename(path)
if path == base:
base = os.path.dirname(path)
name = re.sub(r"^%s[\/\\]?(.*)%s$" % (
re.escape(base), re.escape(settings.PIPELINE_TEMPLATE_EXT)
), r"\1", path)
return re.sub(r"[\/\\]", "_", name)
def concatenate_and_rewrite(self, paths, output_filename, variant=None):
"""Concatenate together files and rewrite urls"""
stylesheets = []
for path in paths:
def reconstruct(match):
asset_path = match.group(1)
if asset_path.startswith("http") or asset_path.startswith("//"):
return "url(%s)" % asset_path
asset_url = self.construct_asset_path(asset_path, path,
output_filename, variant)
return "url(%s)" % asset_url
content = self.read_text(path)
# content needs to be unicode to avoid explosions with non-ascii chars
content = re.sub(URL_DETECTOR, reconstruct, content)
stylesheets.append(content)
return '\n'.join(stylesheets)
def concatenate(self, paths):
"""Concatenate together a list of files"""
return "\n".join([self.read_text(path) for path in paths])
def construct_asset_path(self, asset_path, css_path, output_filename, variant=None):
"""Return a rewritten asset URL for a stylesheet"""
public_path = self.absolute_path(asset_path, os.path.dirname(css_path).replace('\\', '/'))
if self.embeddable(public_path, variant):
return "__EMBED__%s" % public_path
if not posixpath.isabs(asset_path):
asset_path = self.relative_path(public_path, output_filename)
return asset_path
def embeddable(self, path, variant):
"""Is the asset embeddable ?"""
name, ext = os.path.splitext(path)
font = ext in FONT_EXTS
if not variant:
return False
if not (re.search(settings.PIPELINE_EMBED_PATH, path.replace('\\', '/')) and self.storage.exists(path)):
return False
if not ext in EMBED_EXTS:
return False
if not (font or len(self.encoded_content(path)) < settings.PIPELINE_EMBED_MAX_IMAGE_SIZE):
return False
return True
def with_data_uri(self, css):
def datauri(match):
path = match.group(1)
mime_type = self.mime_type(path)
data = self.encoded_content(path)
return "url(\"data:%s;charset=utf-8;base64,%s\")" % (mime_type, data)
return re.sub(URL_REPLACER, datauri, css)
def encoded_content(self, path):
"""Return the base64 encoded contents"""
if path in self.__class__.asset_contents:
return self.__class__.asset_contents[path]
data = self.read_bytes(path)
self.__class__.asset_contents[path] = base64.b64encode(data)
return self.__class__.asset_contents[path]
def mime_type(self, path):
"""Get mime-type from filename"""
name, ext = os.path.splitext(path)
return MIME_TYPES[ext]
def absolute_path(self, path, start):
"""
Return the absolute public path for an asset,
given the path of the stylesheet that contains it.
"""
if posixpath.isabs(path):
path = posixpath.join(default_storage.location, path)
else:
path = posixpath.join(start, path)
return posixpath.normpath(path)
def relative_path(self, absolute_path, output_filename):
"""Rewrite paths relative to the output stylesheet path"""
absolute_path = posixpath.join(settings.PIPELINE_ROOT, absolute_path)
output_path = posixpath.join(settings.PIPELINE_ROOT, posixpath.dirname(output_filename))
return relpath(absolute_path, output_path)
def read_bytes(self, path):
"""Read file content in binary mode"""
file = default_storage.open(path)
content = file.read()
file.close()
return content
def read_text(self, path):
content = self.read_bytes(path)
return force_text(content)
class CompressorBase(object):
def __init__(self, verbose):
self.verbose = verbose
def filter_css(self, css):
raise NotImplementedError
def filter_js(self, js):
raise NotImplementedError
class SubProcessCompressor(CompressorBase):
def execute_command(self, command, content):
import subprocess
pipe = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE,
stdin=subprocess.PIPE, stderr=subprocess.PIPE)
if content:
content = smart_bytes(content)
stdout, stderr = pipe.communicate(content)
if stderr.strip() and pipe.returncode != 0:
raise CompressorError(stderr)
elif self.verbose:
print(stderr)
return force_text(stdout)
| mit |
mrrrgn/build-mozharness | configs/b2g_bumper/v2.1s.py | 1 | 3817 | #!/usr/bin/env python
config = {
"exes": {
# Get around the https warnings
"hg": ['/usr/local/bin/hg', "--config", "web.cacerts=/etc/pki/tls/certs/ca-bundle.crt"],
"hgtool.py": ["/usr/local/bin/hgtool.py"],
"gittool.py": ["/usr/local/bin/gittool.py"],
},
'gecko_pull_url': 'https://hg.mozilla.org/releases/mozilla-b2g34_v2_1s/',
'gecko_push_url': 'ssh://hg.mozilla.org/releases/mozilla-b2g34_v2_1s/',
'gecko_local_dir': 'mozilla-b2g34_v2_1s',
'git_ref_cache': '/builds/b2g_bumper/git_ref_cache.json',
'manifests_repo': 'https://git.mozilla.org/b2g/b2g-manifest.git',
'manifests_revision': 'origin/v2.1s',
'hg_user': 'B2G Bumper Bot <release+b2gbumper@mozilla.com>',
"ssh_key": "~/.ssh/ffxbld_rsa",
"ssh_user": "ffxbld",
'hgtool_base_bundle_urls': ['https://ftp-ssl.mozilla.org/pub/mozilla.org/firefox/bundles'],
'gaia_repo_url': 'https://hg.mozilla.org/integration/gaia-2_1s',
'gaia_revision_file': 'b2g/config/gaia.json',
'gaia_max_revisions': 5,
# Which git branch this hg repo corresponds to
'gaia_git_branch': 'v2.1s',
'gaia_mapper_project': 'gaia',
'mapper_url': 'http://cruncher.build.mozilla.org/mapper/{project}/{vcs}/{rev}',
'devices': {
'dolphin': {
'ignore_projects': ['gecko'],
'ignore_groups': ['darwin'],
},
'dolphin-512': {
'ignore_projects': ['gecko'],
'ignore_groups': ['darwin'],
},
'emulator-kk': {
'ignore_projects': ['gecko'],
'ignore_groups': ['darwin'],
},
'emulator-jb': {
'ignore_projects': ['gecko'],
'ignore_groups': ['darwin'],
},
'emulator-ics': {
'ignore_projects': ['gecko'],
'ignore_groups': ['darwin'],
'manifest_file': 'emulator.xml',
},
# Equivalent to emulator-ics - see bug 916134
# Remove once the above bug resolved
'emulator': {
'ignore_projects': ['gecko'],
'ignore_groups': ['darwin'],
'manifest_file': 'emulator.xml',
},
'flame': {
'ignore_projects': ['gecko'],
'ignore_groups': ['darwin'],
},
'flame-kk': {
'ignore_projects': ['gecko'],
'ignore_groups': ['darwin'],
},
'nexus-4': {
'ignore_projects': ['gecko'],
'ignore_groups': ['darwin'],
},
},
'repo_remote_mappings': {
'https://android.googlesource.com/': 'https://git.mozilla.org/external/aosp',
'git://codeaurora.org/': 'https://git.mozilla.org/external/caf',
'git://github.com/mozilla-b2g/': 'https://git.mozilla.org/b2g',
'git://github.com/mozilla/': 'https://git.mozilla.org/b2g',
'https://git.mozilla.org/releases': 'https://git.mozilla.org/releases',
'http://android.git.linaro.org/git-ro/': 'https://git.mozilla.org/external/linaro',
'http://sprdsource.spreadtrum.com:8085/b2g/android': 'https://git.mozilla.org/external/sprd-aosp',
'git://github.com/apitrace/': 'https://git.mozilla.org/external/apitrace',
'git://github.com/t2m-foxfone/': 'https://git.mozilla.org/external/t2m-foxfone',
# Some mappings to ourself, we want to leave these as-is!
'https://git.mozilla.org/external/aosp': 'https://git.mozilla.org/external/aosp',
'https://git.mozilla.org/external/caf': 'https://git.mozilla.org/external/caf',
'https://git.mozilla.org/b2g': 'https://git.mozilla.org/b2g',
'https://git.mozilla.org/external/apitrace': 'https://git.mozilla.org/external/apitrace',
'https://git.mozilla.org/external/t2m-foxfone': 'https://git.mozilla.org/external/t2m-foxfone',
},
}
| mpl-2.0 |
MarkWh1te/xueqiu_predict | python3_env/lib/python3.4/site-packages/sqlalchemy/pool.py | 3 | 49811 | # sqlalchemy/pool.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Connection pooling for DB-API connections.
Provides a number of connection pool implementations for a variety of
usage scenarios and thread behavior requirements imposed by the
application, DB-API or database itself.
Also provides a DB-API 2.0 connection proxying mechanism allowing
regular DB-API connect() methods to be transparently managed by a
SQLAlchemy connection pool.
"""
import time
import traceback
import weakref
from . import exc, log, event, interfaces, util
from .util import queue as sqla_queue
from .util import threading, memoized_property, \
chop_traceback
from collections import deque
proxies = {}
def manage(module, **params):
"""Return a proxy for a DB-API module that automatically
pools connections.
Given a DB-API 2.0 module and pool management parameters, returns
a proxy for the module that will automatically pool connections,
creating new connection pools for each distinct set of connection
arguments sent to the decorated module's connect() function.
:param module: a DB-API 2.0 database module
:param poolclass: the class used by the pool module to provide
pooling. Defaults to :class:`.QueuePool`.
:param \*\*params: will be passed through to *poolclass*
"""
try:
return proxies[module]
except KeyError:
return proxies.setdefault(module, _DBProxy(module, **params))
def clear_managers():
"""Remove all current DB-API 2.0 managers.
All pools and connections are disposed.
"""
for manager in proxies.values():
manager.close()
proxies.clear()
reset_rollback = util.symbol('reset_rollback')
reset_commit = util.symbol('reset_commit')
reset_none = util.symbol('reset_none')
class _ConnDialect(object):
"""partial implementation of :class:`.Dialect`
which provides DBAPI connection methods.
When a :class:`.Pool` is combined with an :class:`.Engine`,
the :class:`.Engine` replaces this with its own
:class:`.Dialect`.
"""
def do_rollback(self, dbapi_connection):
dbapi_connection.rollback()
def do_commit(self, dbapi_connection):
dbapi_connection.commit()
def do_close(self, dbapi_connection):
dbapi_connection.close()
class Pool(log.Identified):
"""Abstract base class for connection pools."""
_dialect = _ConnDialect()
def __init__(self,
creator, recycle=-1, echo=None,
use_threadlocal=False,
logging_name=None,
reset_on_return=True,
listeners=None,
events=None,
dialect=None,
_dispatch=None):
"""
Construct a Pool.
:param creator: a callable function that returns a DB-API
connection object. The function will be called with
parameters.
:param recycle: If set to non -1, number of seconds between
connection recycling, which means upon checkout, if this
timeout is surpassed the connection will be closed and
replaced with a newly opened connection. Defaults to -1.
:param logging_name: String identifier which will be used within
the "name" field of logging records generated within the
"sqlalchemy.pool" logger. Defaults to a hexstring of the object's
id.
:param echo: If True, connections being pulled and retrieved
from the pool will be logged to the standard output, as well
as pool sizing information. Echoing can also be achieved by
enabling logging for the "sqlalchemy.pool"
namespace. Defaults to False.
:param use_threadlocal: If set to True, repeated calls to
:meth:`connect` within the same application thread will be
guaranteed to return the same connection object, if one has
already been retrieved from the pool and has not been
returned yet. Offers a slight performance advantage at the
cost of individual transactions by default. The
:meth:`.Pool.unique_connection` method is provided to return
a consistenty unique connection to bypass this behavior
when the flag is set.
.. warning:: The :paramref:`.Pool.use_threadlocal` flag
**does not affect the behavior** of :meth:`.Engine.connect`.
:meth:`.Engine.connect` makes use of the
:meth:`.Pool.unique_connection` method which **does not use thread
local context**. To produce a :class:`.Connection` which refers
to the :meth:`.Pool.connect` method, use
:meth:`.Engine.contextual_connect`.
Note that other SQLAlchemy connectivity systems such as
:meth:`.Engine.execute` as well as the orm
:class:`.Session` make use of
:meth:`.Engine.contextual_connect` internally, so these functions
are compatible with the :paramref:`.Pool.use_threadlocal` setting.
.. seealso::
:ref:`threadlocal_strategy` - contains detail on the
"threadlocal" engine strategy, which provides a more comprehensive
approach to "threadlocal" connectivity for the specific
use case of using :class:`.Engine` and :class:`.Connection` objects
directly.
:param reset_on_return: Determine steps to take on
connections as they are returned to the pool.
reset_on_return can have any of these values:
* ``"rollback"`` - call rollback() on the connection,
to release locks and transaction resources.
This is the default value. The vast majority
of use cases should leave this value set.
* ``True`` - same as 'rollback', this is here for
backwards compatibility.
* ``"commit"`` - call commit() on the connection,
to release locks and transaction resources.
A commit here may be desirable for databases that
cache query plans if a commit is emitted,
such as Microsoft SQL Server. However, this
value is more dangerous than 'rollback' because
any data changes present on the transaction
are committed unconditionally.
* ``None`` - don't do anything on the connection.
This setting should only be made on a database
that has no transaction support at all,
namely MySQL MyISAM. By not doing anything,
performance can be improved. This
setting should **never be selected** for a
database that supports transactions,
as it will lead to deadlocks and stale
state.
* ``"none"`` - same as ``None``
.. versionadded:: 0.9.10
* ``False`` - same as None, this is here for
backwards compatibility.
.. versionchanged:: 0.7.6
:paramref:`.Pool.reset_on_return` accepts ``"rollback"``
and ``"commit"`` arguments.
:param events: a list of 2-tuples, each of the form
``(callable, target)`` which will be passed to :func:`.event.listen`
upon construction. Provided here so that event listeners
can be assigned via :func:`.create_engine` before dialect-level
listeners are applied.
:param listeners: Deprecated. A list of
:class:`~sqlalchemy.interfaces.PoolListener`-like objects or
dictionaries of callables that receive events when DB-API
connections are created, checked out and checked in to the
pool. This has been superseded by
:func:`~sqlalchemy.event.listen`.
:param dialect: a :class:`.Dialect` that will handle the job
of calling rollback(), close(), or commit() on DBAPI connections.
If omitted, a built-in "stub" dialect is used. Applications that
make use of :func:`~.create_engine` should not use this parameter
as it is handled by the engine creation strategy.
.. versionadded:: 1.1 - ``dialect`` is now a public parameter
to the :class:`.Pool`.
"""
if logging_name:
self.logging_name = self._orig_logging_name = logging_name
else:
self._orig_logging_name = None
log.instance_logger(self, echoflag=echo)
self._threadconns = threading.local()
self._creator = creator
self._recycle = recycle
self._invalidate_time = 0
self._use_threadlocal = use_threadlocal
if reset_on_return in ('rollback', True, reset_rollback):
self._reset_on_return = reset_rollback
elif reset_on_return in ('none', None, False, reset_none):
self._reset_on_return = reset_none
elif reset_on_return in ('commit', reset_commit):
self._reset_on_return = reset_commit
else:
raise exc.ArgumentError(
"Invalid value for 'reset_on_return': %r"
% reset_on_return)
self.echo = echo
if _dispatch:
self.dispatch._update(_dispatch, only_propagate=False)
if dialect:
self._dialect = dialect
if events:
for fn, target in events:
event.listen(self, target, fn)
if listeners:
util.warn_deprecated(
"The 'listeners' argument to Pool (and "
"create_engine()) is deprecated. Use event.listen().")
for l in listeners:
self.add_listener(l)
@property
def _creator(self):
return self.__dict__['_creator']
@_creator.setter
def _creator(self, creator):
self.__dict__['_creator'] = creator
self._invoke_creator = self._should_wrap_creator(creator)
def _should_wrap_creator(self, creator):
"""Detect if creator accepts a single argument, or is sent
as a legacy style no-arg function.
"""
try:
argspec = util.get_callable_argspec(self._creator, no_self=True)
except TypeError:
return lambda crec: creator()
defaulted = argspec[3] is not None and len(argspec[3]) or 0
positionals = len(argspec[0]) - defaulted
# look for the exact arg signature that DefaultStrategy
# sends us
if (argspec[0], argspec[3]) == (['connection_record'], (None,)):
return creator
# or just a single positional
elif positionals == 1:
return creator
# all other cases, just wrap and assume legacy "creator" callable
# thing
else:
return lambda crec: creator()
def _close_connection(self, connection):
self.logger.debug("Closing connection %r", connection)
try:
self._dialect.do_close(connection)
except Exception:
self.logger.error("Exception closing connection %r",
connection, exc_info=True)
@util.deprecated(
2.7, "Pool.add_listener is deprecated. Use event.listen()")
def add_listener(self, listener):
"""Add a :class:`.PoolListener`-like object to this pool.
``listener`` may be an object that implements some or all of
PoolListener, or a dictionary of callables containing implementations
of some or all of the named methods in PoolListener.
"""
interfaces.PoolListener._adapt_listener(self, listener)
def unique_connection(self):
"""Produce a DBAPI connection that is not referenced by any
thread-local context.
This method is equivalent to :meth:`.Pool.connect` when the
:paramref:`.Pool.use_threadlocal` flag is not set to True.
When :paramref:`.Pool.use_threadlocal` is True, the
:meth:`.Pool.unique_connection` method provides a means of bypassing
the threadlocal context.
"""
return _ConnectionFairy._checkout(self)
def _create_connection(self):
"""Called by subclasses to create a new ConnectionRecord."""
return _ConnectionRecord(self)
def _invalidate(self, connection, exception=None):
"""Mark all connections established within the generation
of the given connection as invalidated.
If this pool's last invalidate time is before when the given
connection was created, update the timestamp til now. Otherwise,
no action is performed.
Connections with a start time prior to this pool's invalidation
time will be recycled upon next checkout.
"""
rec = getattr(connection, "_connection_record", None)
if not rec or self._invalidate_time < rec.starttime:
self._invalidate_time = time.time()
if getattr(connection, 'is_valid', False):
connection.invalidate(exception)
def recreate(self):
"""Return a new :class:`.Pool`, of the same class as this one
and configured with identical creation arguments.
This method is used in conjunction with :meth:`dispose`
to close out an entire :class:`.Pool` and create a new one in
its place.
"""
raise NotImplementedError()
def dispose(self):
"""Dispose of this pool.
This method leaves the possibility of checked-out connections
remaining open, as it only affects connections that are
idle in the pool.
See also the :meth:`Pool.recreate` method.
"""
raise NotImplementedError()
def connect(self):
"""Return a DBAPI connection from the pool.
The connection is instrumented such that when its
``close()`` method is called, the connection will be returned to
the pool.
"""
if not self._use_threadlocal:
return _ConnectionFairy._checkout(self)
try:
rec = self._threadconns.current()
except AttributeError:
pass
else:
if rec is not None:
return rec._checkout_existing()
return _ConnectionFairy._checkout(self, self._threadconns)
def _return_conn(self, record):
"""Given a _ConnectionRecord, return it to the :class:`.Pool`.
This method is called when an instrumented DBAPI connection
has its ``close()`` method called.
"""
if self._use_threadlocal:
try:
del self._threadconns.current
except AttributeError:
pass
self._do_return_conn(record)
def _do_get(self):
"""Implementation for :meth:`get`, supplied by subclasses."""
raise NotImplementedError()
def _do_return_conn(self, conn):
"""Implementation for :meth:`return_conn`, supplied by subclasses."""
raise NotImplementedError()
def status(self):
raise NotImplementedError()
class _ConnectionRecord(object):
"""Internal object which maintains an individual DBAPI connection
referenced by a :class:`.Pool`.
The :class:`._ConnectionRecord` object always exists for any particular
DBAPI connection whether or not that DBAPI connection has been
"checked out". This is in contrast to the :class:`._ConnectionFairy`
which is only a public facade to the DBAPI connection while it is checked
out.
A :class:`._ConnectionRecord` may exist for a span longer than that
of a single DBAPI connection. For example, if the
:meth:`._ConnectionRecord.invalidate`
method is called, the DBAPI connection associated with this
:class:`._ConnectionRecord`
will be discarded, but the :class:`._ConnectionRecord` may be used again,
in which case a new DBAPI connection is produced when the :class:`.Pool`
next uses this record.
The :class:`._ConnectionRecord` is delivered along with connection
pool events, including :meth:`.PoolEvents.connect` and
:meth:`.PoolEvents.checkout`, however :class:`._ConnectionRecord` still
remains an internal object whose API and internals may change.
.. seealso::
:class:`._ConnectionFairy`
"""
def __init__(self, pool, connect=True):
self.__pool = pool
if connect:
self.__connect(first_connect_check=True)
self.finalize_callback = deque()
fairy_ref = None
starttime = None
connection = None
"""A reference to the actual DBAPI connection being tracked.
May be ``None`` if this :class:`._ConnectionRecord` has been marked
as invalidated; a new DBAPI connection may replace it if the owning
pool calls upon this :class:`._ConnectionRecord` to reconnect.
"""
_soft_invalidate_time = 0
@util.memoized_property
def info(self):
"""The ``.info`` dictionary associated with the DBAPI connection.
This dictionary is shared among the :attr:`._ConnectionFairy.info`
and :attr:`.Connection.info` accessors.
.. note::
The lifespan of this dictionary is linked to the
DBAPI connection itself, meaning that it is **discarded** each time
the DBAPI connection is closed and/or invalidated. The
:attr:`._ConnectionRecord.record_info` dictionary remains
persistent throughout the lifespan of the
:class:`._ConnectionRecord` container.
"""
return {}
@util.memoized_property
def record_info(self):
"""An "info' dictionary associated with the connection record
itself.
Unlike the :attr:`._ConnectionRecord.info` dictionary, which is linked
to the lifespan of the DBAPI connection, this dictionary is linked
to the lifespan of the :class:`._ConnectionRecord` container itself
and will remain persisent throughout the life of the
:class:`._ConnectionRecord`.
.. versionadded:: 1.1
"""
return {}
@classmethod
def checkout(cls, pool):
rec = pool._do_get()
try:
dbapi_connection = rec.get_connection()
except:
with util.safe_reraise():
rec.checkin()
echo = pool._should_log_debug()
fairy = _ConnectionFairy(dbapi_connection, rec, echo)
rec.fairy_ref = weakref.ref(
fairy,
lambda ref: _finalize_fairy and
_finalize_fairy(
dbapi_connection,
rec, pool, ref, echo)
)
_refs.add(rec)
if echo:
pool.logger.debug("Connection %r checked out from pool",
dbapi_connection)
return fairy
def checkin(self):
self.fairy_ref = None
connection = self.connection
pool = self.__pool
while self.finalize_callback:
finalizer = self.finalize_callback.pop()
finalizer(connection)
if pool.dispatch.checkin:
pool.dispatch.checkin(connection, self)
pool._return_conn(self)
@property
def in_use(self):
return self.fairy_ref is not None
@property
def last_connect_time(self):
return self.starttime
def close(self):
if self.connection is not None:
self.__close()
def invalidate(self, e=None, soft=False):
"""Invalidate the DBAPI connection held by this :class:`._ConnectionRecord`.
This method is called for all connection invalidations, including
when the :meth:`._ConnectionFairy.invalidate` or
:meth:`.Connection.invalidate` methods are called, as well as when any
so-called "automatic invalidation" condition occurs.
:param e: an exception object indicating a reason for the invalidation.
:param soft: if True, the connection isn't closed; instead, this
connection will be recycled on next checkout.
.. versionadded:: 1.0.3
.. seealso::
:ref:`pool_connection_invalidation`
"""
# already invalidated
if self.connection is None:
return
if soft:
self.__pool.dispatch.soft_invalidate(self.connection, self, e)
else:
self.__pool.dispatch.invalidate(self.connection, self, e)
if e is not None:
self.__pool.logger.info(
"%sInvalidate connection %r (reason: %s:%s)",
"Soft " if soft else "",
self.connection, e.__class__.__name__, e)
else:
self.__pool.logger.info(
"%sInvalidate connection %r",
"Soft " if soft else "",
self.connection)
if soft:
self._soft_invalidate_time = time.time()
else:
self.__close()
self.connection = None
def get_connection(self):
recycle = False
if self.connection is None:
self.info.clear()
self.__connect()
elif self.__pool._recycle > -1 and \
time.time() - self.starttime > self.__pool._recycle:
self.__pool.logger.info(
"Connection %r exceeded timeout; recycling",
self.connection)
recycle = True
elif self.__pool._invalidate_time > self.starttime:
self.__pool.logger.info(
"Connection %r invalidated due to pool invalidation; " +
"recycling",
self.connection
)
recycle = True
elif self._soft_invalidate_time > self.starttime:
self.__pool.logger.info(
"Connection %r invalidated due to local soft invalidation; " +
"recycling",
self.connection
)
recycle = True
if recycle:
self.__close()
self.info.clear()
self.__connect()
return self.connection
def __close(self):
self.finalize_callback.clear()
if self.__pool.dispatch.close:
self.__pool.dispatch.close(self.connection, self)
self.__pool._close_connection(self.connection)
self.connection = None
def __connect(self, first_connect_check=False):
pool = self.__pool
# ensure any existing connection is removed, so that if
# creator fails, this attribute stays None
self.connection = None
try:
self.starttime = time.time()
connection = pool._invoke_creator(self)
pool.logger.debug("Created new connection %r", connection)
self.connection = connection
except Exception as e:
pool.logger.debug("Error on connect(): %s", e)
raise
else:
if first_connect_check:
pool.dispatch.first_connect.\
for_modify(pool.dispatch).\
exec_once(self.connection, self)
if pool.dispatch.connect:
pool.dispatch.connect(self.connection, self)
def _finalize_fairy(connection, connection_record,
pool, ref, echo, fairy=None):
"""Cleanup for a :class:`._ConnectionFairy` whether or not it's already
been garbage collected.
"""
_refs.discard(connection_record)
if ref is not None and \
connection_record.fairy_ref is not ref:
return
if connection is not None:
if connection_record and echo:
pool.logger.debug("Connection %r being returned to pool",
connection)
try:
fairy = fairy or _ConnectionFairy(
connection, connection_record, echo)
assert fairy.connection is connection
fairy._reset(pool)
# Immediately close detached instances
if not connection_record:
if pool.dispatch.close_detached:
pool.dispatch.close_detached(connection)
pool._close_connection(connection)
except BaseException as e:
pool.logger.error(
"Exception during reset or similar", exc_info=True)
if connection_record:
connection_record.invalidate(e=e)
if not isinstance(e, Exception):
raise
if connection_record:
connection_record.checkin()
_refs = set()
class _ConnectionFairy(object):
"""Proxies a DBAPI connection and provides return-on-dereference
support.
This is an internal object used by the :class:`.Pool` implementation
to provide context management to a DBAPI connection delivered by
that :class:`.Pool`.
The name "fairy" is inspired by the fact that the
:class:`._ConnectionFairy` object's lifespan is transitory, as it lasts
only for the length of a specific DBAPI connection being checked out from
the pool, and additionally that as a transparent proxy, it is mostly
invisible.
.. seealso::
:class:`._ConnectionRecord`
"""
def __init__(self, dbapi_connection, connection_record, echo):
self.connection = dbapi_connection
self._connection_record = connection_record
self._echo = echo
connection = None
"""A reference to the actual DBAPI connection being tracked."""
_connection_record = None
"""A reference to the :class:`._ConnectionRecord` object associated
with the DBAPI connection.
This is currently an internal accessor which is subject to change.
"""
_reset_agent = None
"""Refer to an object with a ``.commit()`` and ``.rollback()`` method;
if non-None, the "reset-on-return" feature will call upon this object
rather than directly against the dialect-level do_rollback() and
do_commit() methods.
In practice, a :class:`.Connection` assigns a :class:`.Transaction` object
to this variable when one is in scope so that the :class:`.Transaction`
takes the job of committing or rolling back on return if
:meth:`.Connection.close` is called while the :class:`.Transaction`
still exists.
This is essentially an "event handler" of sorts but is simplified as an
instance variable both for performance/simplicity as well as that there
can only be one "reset agent" at a time.
"""
@classmethod
def _checkout(cls, pool, threadconns=None, fairy=None):
if not fairy:
fairy = _ConnectionRecord.checkout(pool)
fairy._pool = pool
fairy._counter = 0
if threadconns is not None:
threadconns.current = weakref.ref(fairy)
if fairy.connection is None:
raise exc.InvalidRequestError("This connection is closed")
fairy._counter += 1
if not pool.dispatch.checkout or fairy._counter != 1:
return fairy
# Pool listeners can trigger a reconnection on checkout
attempts = 2
while attempts > 0:
try:
pool.dispatch.checkout(fairy.connection,
fairy._connection_record,
fairy)
return fairy
except exc.DisconnectionError as e:
pool.logger.info(
"Disconnection detected on checkout: %s", e)
fairy._connection_record.invalidate(e)
try:
fairy.connection = \
fairy._connection_record.get_connection()
except:
with util.safe_reraise():
fairy._connection_record.checkin()
attempts -= 1
pool.logger.info("Reconnection attempts exhausted on checkout")
fairy.invalidate()
raise exc.InvalidRequestError("This connection is closed")
def _checkout_existing(self):
return _ConnectionFairy._checkout(self._pool, fairy=self)
def _checkin(self):
_finalize_fairy(self.connection, self._connection_record,
self._pool, None, self._echo, fairy=self)
self.connection = None
self._connection_record = None
_close = _checkin
def _reset(self, pool):
if pool.dispatch.reset:
pool.dispatch.reset(self, self._connection_record)
if pool._reset_on_return is reset_rollback:
if self._echo:
pool.logger.debug("Connection %s rollback-on-return%s",
self.connection,
", via agent"
if self._reset_agent else "")
if self._reset_agent:
self._reset_agent.rollback()
else:
pool._dialect.do_rollback(self)
elif pool._reset_on_return is reset_commit:
if self._echo:
pool.logger.debug("Connection %s commit-on-return%s",
self.connection,
", via agent"
if self._reset_agent else "")
if self._reset_agent:
self._reset_agent.commit()
else:
pool._dialect.do_commit(self)
@property
def _logger(self):
return self._pool.logger
@property
def is_valid(self):
"""Return True if this :class:`._ConnectionFairy` still refers
to an active DBAPI connection."""
return self.connection is not None
@util.memoized_property
def info(self):
"""Info dictionary associated with the underlying DBAPI connection
referred to by this :class:`.ConnectionFairy`, allowing user-defined
data to be associated with the connection.
The data here will follow along with the DBAPI connection including
after it is returned to the connection pool and used again
in subsequent instances of :class:`._ConnectionFairy`. It is shared
with the :attr:`._ConnectionRecord.info` and :attr:`.Connection.info`
accessors.
The dictionary associated with a particular DBAPI connection is
discarded when the connection itself is discarded.
"""
return self._connection_record.info
@property
def record_info(self):
"""Info dictionary associated with the :class:`._ConnectionRecord
container referred to by this :class:`.ConnectionFairy`.
Unlike the :attr:`._ConnectionFairy.info` dictionary, the lifespan
of this dictionary is persistent across connections that are
disconnected and/or invalidated within the lifespan of a
:class:`._ConnectionRecord`.
.. versionadded:: 1.1
"""
if self._connection_record:
return self._connection_record.record_info
else:
return None
def invalidate(self, e=None, soft=False):
"""Mark this connection as invalidated.
This method can be called directly, and is also called as a result
of the :meth:`.Connection.invalidate` method. When invoked,
the DBAPI connection is immediately closed and discarded from
further use by the pool. The invalidation mechanism proceeds
via the :meth:`._ConnectionRecord.invalidate` internal method.
:param e: an exception object indicating a reason for the invalidation.
:param soft: if True, the connection isn't closed; instead, this
connection will be recycled on next checkout.
.. versionadded:: 1.0.3
.. seealso::
:ref:`pool_connection_invalidation`
"""
if self.connection is None:
util.warn("Can't invalidate an already-closed connection.")
return
if self._connection_record:
self._connection_record.invalidate(e=e, soft=soft)
if not soft:
self.connection = None
self._checkin()
def cursor(self, *args, **kwargs):
"""Return a new DBAPI cursor for the underlying connection.
This method is a proxy for the ``connection.cursor()`` DBAPI
method.
"""
return self.connection.cursor(*args, **kwargs)
def __getattr__(self, key):
return getattr(self.connection, key)
def detach(self):
"""Separate this connection from its Pool.
This means that the connection will no longer be returned to the
pool when closed, and will instead be literally closed. The
containing ConnectionRecord is separated from the DB-API connection,
and will create a new connection when next used.
Note that any overall connection limiting constraints imposed by a
Pool implementation may be violated after a detach, as the detached
connection is removed from the pool's knowledge and control.
"""
if self._connection_record is not None:
rec = self._connection_record
_refs.remove(rec)
rec.fairy_ref = None
rec.connection = None
# TODO: should this be _return_conn?
self._pool._do_return_conn(self._connection_record)
self.info = self.info.copy()
self._connection_record = None
if self._pool.dispatch.detach:
self._pool.dispatch.detach(self.connection, rec)
def close(self):
self._counter -= 1
if self._counter == 0:
self._checkin()
class SingletonThreadPool(Pool):
"""A Pool that maintains one connection per thread.
Maintains one connection per each thread, never moving a connection to a
thread other than the one which it was created in.
.. warning:: the :class:`.SingletonThreadPool` will call ``.close()``
on arbitrary connections that exist beyond the size setting of
``pool_size``, e.g. if more unique **thread identities**
than what ``pool_size`` states are used. This cleanup is
non-deterministic and not sensitive to whether or not the connections
linked to those thread identities are currently in use.
:class:`.SingletonThreadPool` may be improved in a future release,
however in its current status it is generally used only for test
scenarios using a SQLite ``:memory:`` database and is not recommended
for production use.
Options are the same as those of :class:`.Pool`, as well as:
:param pool_size: The number of threads in which to maintain connections
at once. Defaults to five.
:class:`.SingletonThreadPool` is used by the SQLite dialect
automatically when a memory-based database is used.
See :ref:`sqlite_toplevel`.
"""
def __init__(self, creator, pool_size=5, **kw):
kw['use_threadlocal'] = True
Pool.__init__(self, creator, **kw)
self._conn = threading.local()
self._all_conns = set()
self.size = pool_size
def recreate(self):
self.logger.info("Pool recreating")
return self.__class__(self._creator,
pool_size=self.size,
recycle=self._recycle,
echo=self.echo,
logging_name=self._orig_logging_name,
use_threadlocal=self._use_threadlocal,
reset_on_return=self._reset_on_return,
_dispatch=self.dispatch,
dialect=self._dialect)
def dispose(self):
"""Dispose of this pool."""
for conn in self._all_conns:
try:
conn.close()
except Exception:
# pysqlite won't even let you close a conn from a thread
# that didn't create it
pass
self._all_conns.clear()
def _cleanup(self):
while len(self._all_conns) >= self.size:
c = self._all_conns.pop()
c.close()
def status(self):
return "SingletonThreadPool id:%d size: %d" % \
(id(self), len(self._all_conns))
def _do_return_conn(self, conn):
pass
def _do_get(self):
try:
c = self._conn.current()
if c:
return c
except AttributeError:
pass
c = self._create_connection()
self._conn.current = weakref.ref(c)
if len(self._all_conns) >= self.size:
self._cleanup()
self._all_conns.add(c)
return c
class QueuePool(Pool):
"""A :class:`.Pool` that imposes a limit on the number of open connections.
:class:`.QueuePool` is the default pooling implementation used for
all :class:`.Engine` objects, unless the SQLite dialect is in use.
"""
def __init__(self, creator, pool_size=5, max_overflow=10, timeout=30,
**kw):
"""
Construct a QueuePool.
:param creator: a callable function that returns a DB-API
connection object, same as that of :paramref:`.Pool.creator`.
:param pool_size: The size of the pool to be maintained,
defaults to 5. This is the largest number of connections that
will be kept persistently in the pool. Note that the pool
begins with no connections; once this number of connections
is requested, that number of connections will remain.
``pool_size`` can be set to 0 to indicate no size limit; to
disable pooling, use a :class:`~sqlalchemy.pool.NullPool`
instead.
:param max_overflow: The maximum overflow size of the
pool. When the number of checked-out connections reaches the
size set in pool_size, additional connections will be
returned up to this limit. When those additional connections
are returned to the pool, they are disconnected and
discarded. It follows then that the total number of
simultaneous connections the pool will allow is pool_size +
`max_overflow`, and the total number of "sleeping"
connections the pool will allow is pool_size. `max_overflow`
can be set to -1 to indicate no overflow limit; no limit
will be placed on the total number of concurrent
connections. Defaults to 10.
:param timeout: The number of seconds to wait before giving up
on returning a connection. Defaults to 30.
:param \**kw: Other keyword arguments including
:paramref:`.Pool.recycle`, :paramref:`.Pool.echo`,
:paramref:`.Pool.reset_on_return` and others are passed to the
:class:`.Pool` constructor.
"""
Pool.__init__(self, creator, **kw)
self._pool = sqla_queue.Queue(pool_size)
self._overflow = 0 - pool_size
self._max_overflow = max_overflow
self._timeout = timeout
self._overflow_lock = threading.Lock()
def _do_return_conn(self, conn):
try:
self._pool.put(conn, False)
except sqla_queue.Full:
try:
conn.close()
finally:
self._dec_overflow()
def _do_get(self):
use_overflow = self._max_overflow > -1
try:
wait = use_overflow and self._overflow >= self._max_overflow
return self._pool.get(wait, self._timeout)
except sqla_queue.Empty:
if use_overflow and self._overflow >= self._max_overflow:
if not wait:
return self._do_get()
else:
raise exc.TimeoutError(
"QueuePool limit of size %d overflow %d reached, "
"connection timed out, timeout %d" %
(self.size(), self.overflow(), self._timeout))
if self._inc_overflow():
try:
return self._create_connection()
except:
with util.safe_reraise():
self._dec_overflow()
else:
return self._do_get()
def _inc_overflow(self):
if self._max_overflow == -1:
self._overflow += 1
return True
with self._overflow_lock:
if self._overflow < self._max_overflow:
self._overflow += 1
return True
else:
return False
def _dec_overflow(self):
if self._max_overflow == -1:
self._overflow -= 1
return True
with self._overflow_lock:
self._overflow -= 1
return True
def recreate(self):
self.logger.info("Pool recreating")
return self.__class__(self._creator, pool_size=self._pool.maxsize,
max_overflow=self._max_overflow,
timeout=self._timeout,
recycle=self._recycle, echo=self.echo,
logging_name=self._orig_logging_name,
use_threadlocal=self._use_threadlocal,
reset_on_return=self._reset_on_return,
_dispatch=self.dispatch,
dialect=self._dialect)
def dispose(self):
while True:
try:
conn = self._pool.get(False)
conn.close()
except sqla_queue.Empty:
break
self._overflow = 0 - self.size()
self.logger.info("Pool disposed. %s", self.status())
def status(self):
return "Pool size: %d Connections in pool: %d "\
"Current Overflow: %d Current Checked out "\
"connections: %d" % (self.size(),
self.checkedin(),
self.overflow(),
self.checkedout())
def size(self):
return self._pool.maxsize
def checkedin(self):
return self._pool.qsize()
def overflow(self):
return self._overflow
def checkedout(self):
return self._pool.maxsize - self._pool.qsize() + self._overflow
class NullPool(Pool):
"""A Pool which does not pool connections.
Instead it literally opens and closes the underlying DB-API connection
per each connection open/close.
Reconnect-related functions such as ``recycle`` and connection
invalidation are not supported by this Pool implementation, since
no connections are held persistently.
.. versionchanged:: 0.7
:class:`.NullPool` is used by the SQlite dialect automatically
when a file-based database is used. See :ref:`sqlite_toplevel`.
"""
def status(self):
return "NullPool"
def _do_return_conn(self, conn):
conn.close()
def _do_get(self):
return self._create_connection()
def recreate(self):
self.logger.info("Pool recreating")
return self.__class__(self._creator,
recycle=self._recycle,
echo=self.echo,
logging_name=self._orig_logging_name,
use_threadlocal=self._use_threadlocal,
reset_on_return=self._reset_on_return,
_dispatch=self.dispatch,
dialect=self._dialect)
def dispose(self):
pass
class StaticPool(Pool):
"""A Pool of exactly one connection, used for all requests.
Reconnect-related functions such as ``recycle`` and connection
invalidation (which is also used to support auto-reconnect) are not
currently supported by this Pool implementation but may be implemented
in a future release.
"""
@memoized_property
def _conn(self):
return self._creator()
@memoized_property
def connection(self):
return _ConnectionRecord(self)
def status(self):
return "StaticPool"
def dispose(self):
if '_conn' in self.__dict__:
self._conn.close()
self._conn = None
def recreate(self):
self.logger.info("Pool recreating")
return self.__class__(creator=self._creator,
recycle=self._recycle,
use_threadlocal=self._use_threadlocal,
reset_on_return=self._reset_on_return,
echo=self.echo,
logging_name=self._orig_logging_name,
_dispatch=self.dispatch,
dialect=self._dialect)
def _create_connection(self):
return self._conn
def _do_return_conn(self, conn):
pass
def _do_get(self):
return self.connection
class AssertionPool(Pool):
"""A :class:`.Pool` that allows at most one checked out connection at
any given time.
This will raise an exception if more than one connection is checked out
at a time. Useful for debugging code that is using more connections
than desired.
.. versionchanged:: 0.7
:class:`.AssertionPool` also logs a traceback of where
the original connection was checked out, and reports
this in the assertion error raised.
"""
def __init__(self, *args, **kw):
self._conn = None
self._checked_out = False
self._store_traceback = kw.pop('store_traceback', True)
self._checkout_traceback = None
Pool.__init__(self, *args, **kw)
def status(self):
return "AssertionPool"
def _do_return_conn(self, conn):
if not self._checked_out:
raise AssertionError("connection is not checked out")
self._checked_out = False
assert conn is self._conn
def dispose(self):
self._checked_out = False
if self._conn:
self._conn.close()
def recreate(self):
self.logger.info("Pool recreating")
return self.__class__(self._creator, echo=self.echo,
logging_name=self._orig_logging_name,
_dispatch=self.dispatch,
dialect=self._dialect)
def _do_get(self):
if self._checked_out:
if self._checkout_traceback:
suffix = ' at:\n%s' % ''.join(
chop_traceback(self._checkout_traceback))
else:
suffix = ''
raise AssertionError("connection is already checked out" + suffix)
if not self._conn:
self._conn = self._create_connection()
self._checked_out = True
if self._store_traceback:
self._checkout_traceback = traceback.format_stack()
return self._conn
class _DBProxy(object):
"""Layers connection pooling behavior on top of a standard DB-API module.
Proxies a DB-API 2.0 connect() call to a connection pool keyed to the
specific connect parameters. Other functions and attributes are delegated
to the underlying DB-API module.
"""
def __init__(self, module, poolclass=QueuePool, **kw):
"""Initializes a new proxy.
module
a DB-API 2.0 module
poolclass
a Pool class, defaulting to QueuePool
Other parameters are sent to the Pool object's constructor.
"""
self.module = module
self.kw = kw
self.poolclass = poolclass
self.pools = {}
self._create_pool_mutex = threading.Lock()
def close(self):
for key in list(self.pools):
del self.pools[key]
def __del__(self):
self.close()
def __getattr__(self, key):
return getattr(self.module, key)
def get_pool(self, *args, **kw):
key = self._serialize(*args, **kw)
try:
return self.pools[key]
except KeyError:
self._create_pool_mutex.acquire()
try:
if key not in self.pools:
kw.pop('sa_pool_key', None)
pool = self.poolclass(
lambda: self.module.connect(*args, **kw), **self.kw)
self.pools[key] = pool
return pool
else:
return self.pools[key]
finally:
self._create_pool_mutex.release()
def connect(self, *args, **kw):
"""Activate a connection to the database.
Connect to the database using this DBProxy's module and the given
connect arguments. If the arguments match an existing pool, the
connection will be returned from the pool's current thread-local
connection instance, or if there is no thread-local connection
instance it will be checked out from the set of pooled connections.
If the pool has no available connections and allows new connections
to be created, a new database connection will be made.
"""
return self.get_pool(*args, **kw).connect()
def dispose(self, *args, **kw):
"""Dispose the pool referenced by the given connect arguments."""
key = self._serialize(*args, **kw)
try:
del self.pools[key]
except KeyError:
pass
def _serialize(self, *args, **kw):
if "sa_pool_key" in kw:
return kw['sa_pool_key']
return tuple(
list(args) +
[(k, kw[k]) for k in sorted(kw)]
)
| mit |
xe1gyq/openstack | device/iot101inc.py | 3 | 2148 | #!/usr/bin/python
import paho.mqtt.client as paho
import psutil
import pywapi
import signal
import sys
import time
from threading import Thread
def functionApiWeather():
data = pywapi.get_weather_from_weather_com('MXJO0043', 'metric')
message = data['location']['name']
message = message + ", Temperature " + data['current_conditions']['temperature'] + " C"
message = message + ", Atmospheric Pressure " + data['current_conditions']['barometer']['reading'][:-3] + " mbar"
return message
def functionDataActuator(status):
print "Data Actuator Status %s" % status
def functionDataActuatorMqttOnMessage(mosq, obj, msg):
print "Data Sensor Mqtt Subscribe Message!"
functionDataActuator(msg.payload)
def functionDataActuatorMqttSubscribe():
mqttclient = paho.Client()
mqttclient.on_message = functionDataActuatorMqttOnMessage
mqttclient.connect("test.mosquitto.org", 1883, 60)
mqttclient.subscribe("IoT101/DataActuator", 0)
while mqttclient.loop() == 0:
pass
def functionDataSensor():
netdata = psutil.net_io_counters()
data = netdata.packets_sent + netdata.packets_recv
return data
def functionDataSensorMqttOnPublish(mosq, obj, msg):
print "Data Sensor Mqtt Published!"
def functionDataSensorMqttPublish():
mqttclient = paho.Client()
mqttclient.on_publish = functionDataSensorMqttOnPublish
mqttclient.connect("test.mosquitto.org", 1883, 60)
while True:
data = functionDataSensor()
topic = "IoT101/DataSensor"
mqttclient.publish(topic, data)
time.sleep(1)
def functionSignalHandler(signal, frame):
sys.exit(0)
if __name__ == '__main__':
signal.signal(signal.SIGINT, functionSignalHandler)
threadmqttpublish = Thread(target=functionDataSensorMqttPublish)
threadmqttpublish.start()
threadmqttsubscribe = Thread(target=functionDataActuatorMqttSubscribe)
threadmqttsubscribe.start()
while True:
print "Hello Internet of Things 101"
print "Data Sensor: %s " % functionDataSensor()
print "API Weather: %s " % functionApiWeather()
time.sleep(5)
# End of File
| apache-2.0 |
eevee/urwid | examples/pop_up.py | 17 | 1289 | #!/usr/bin/python
import urwid
class PopUpDialog(urwid.WidgetWrap):
"""A dialog that appears with nothing but a close button """
signals = ['close']
def __init__(self):
close_button = urwid.Button("that's pretty cool")
urwid.connect_signal(close_button, 'click',
lambda button:self._emit("close"))
pile = urwid.Pile([urwid.Text(
"^^ I'm attached to the widget that opened me. "
"Try resizing the window!\n"), close_button])
fill = urwid.Filler(pile)
self.__super.__init__(urwid.AttrWrap(fill, 'popbg'))
class ThingWithAPopUp(urwid.PopUpLauncher):
def __init__(self):
self.__super.__init__(urwid.Button("click-me"))
urwid.connect_signal(self.original_widget, 'click',
lambda button: self.open_pop_up())
def create_pop_up(self):
pop_up = PopUpDialog()
urwid.connect_signal(pop_up, 'close',
lambda button: self.close_pop_up())
return pop_up
def get_pop_up_parameters(self):
return {'left':0, 'top':1, 'overlay_width':32, 'overlay_height':7}
fill = urwid.Filler(urwid.Padding(ThingWithAPopUp(), 'center', 15))
loop = urwid.MainLoop(
fill,
[('popbg', 'white', 'dark blue')],
pop_ups=True)
loop.run()
| lgpl-2.1 |
lintzc/gpdb | src/test/tinc/tincrepo/mpp/models/test/mpp_tc/test_mpp_test_case.py | 9 | 7388 | """
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import unittest2 as unittest
from mpp.models.mpp_tc import _MPPMetaClassType
from mpp.models.mpp_tc import MPPDUT
from mpp.models import MPPTestCase
# Need to import hidden models for isinstance verification
from mpp.models.mpp_tc import __gpdbMPPTestCase__
from mpp.models.mpp_tc import __hawqMPPTestCase__
class MockMPPMetaClassTypeGPDB(_MPPMetaClassType):
""" Mock MPPMetaClassTypeGPDB to reset DUT """
_MPPMetaClassType.DUT = MPPDUT('gpdb', '1.0.0.0')
@unittest.skip('mock')
class MockMPPTestCaseGPDB(MPPTestCase):
""" Mock MPPTestCaseGPDB to test MRO and get_version """
__metaclass__ = MockMPPMetaClassTypeGPDB
def test_do_stuff(self):
self.assertTrue(True)
class MockMPPMetaClassTypeHAWQ(_MPPMetaClassType):
""" Mock MPPMetaClassTypeHAWQ to reset DUT """
_MPPMetaClassType.DUT = MPPDUT('hawq', '1.1.0.0')
@unittest.skip('mock')
class MockMPPTestCaseHAWQ(MPPTestCase):
""" Mock MPPTestCaseHAWQ to test MRO and get_version """
__metaclass__ = MockMPPMetaClassTypeHAWQ
def test_do_stuff(self):
self.assertTrue(True)
class MockMPPMetaClassTypeGPDB42(_MPPMetaClassType):
_MPPMetaClassType.DUT = MPPDUT('gpdb', '4.2')
@unittest.skip('mock')
class MockMPPTestCaseGPDB42(MPPTestCase):
__metaclass__ = MockMPPMetaClassTypeGPDB42
def test_do_stuff(self):
(product, version) = self.get_product_version()
self.assertEquals(prodcut, 'gpdb')
self.assertEquals(version, '4.2')
class MPPTestCaseTests(unittest.TestCase):
def test_get_product_version(self):
gpdb_test_case = MockMPPTestCaseGPDB('test_do_stuff')
self.assertEqual(gpdb_test_case.__class__.__product__, 'gpdb')
self.assertEqual(gpdb_test_case.__class__.__version_string__, '1.0.0.0')
self.assertTrue(isinstance(gpdb_test_case, __gpdbMPPTestCase__))
self.assertFalse(isinstance(gpdb_test_case, __hawqMPPTestCase__))
hawq_test_case = MockMPPTestCaseHAWQ('test_do_stuff')
self.assertEqual(hawq_test_case.__class__.__product__, 'hawq')
self.assertEqual(hawq_test_case.__class__.__version_string__, '1.1.0.0')
self.assertTrue(isinstance(hawq_test_case, __hawqMPPTestCase__))
self.assertFalse(isinstance(hawq_test_case, __gpdbMPPTestCase__))
@unittest.skip('mock')
class MockMPPTestCaseMetadata(MPPTestCase):
def test_without_metadata(self):
self.assertTrue(True)
def test_with_metadata(self):
"""
@gather_logs_on_failure True
@restart_on_fatal_failure True
@db_name blah
"""
self.asserTrue(True)
class MPPTestCaseMetadataTests(unittest.TestCase):
def test_default_metadata(self):
mpp_test_case = MockMPPTestCaseMetadata('test_without_metadata')
self.assertFalse(mpp_test_case.gather_logs_on_failure)
self.assertFalse(mpp_test_case.restart_on_fatal_failure)
def test_with_metadata(self):
mpp_test_case = MockMPPTestCaseMetadata('test_with_metadata')
self.assertTrue(mpp_test_case.gather_logs_on_failure)
self.assertTrue(mpp_test_case.restart_on_fatal_failure)
def test_out_dir(self):
self.assertEquals(MockMPPTestCaseMetadata.out_dir, 'output/')
self.assertEquals(MockMPPTestCaseMetadata.get_out_dir(), os.path.join(os.path.dirname(__file__), 'output/'))
def test_db_name_metadata(self):
mpp_test_case = MockMPPTestCaseMetadata('test_with_metadata')
self.assertEquals(mpp_test_case.db_name, 'blah')
def test_db_name_default(self):
mpp_test_case = MockMPPTestCaseMetadata('test_without_metadata')
self.assertEquals(mpp_test_case.db_name, None)
@unittest.skip('mock')
class MockMPPTestCaseGPOPT(MPPTestCase):
def test_without_metadata(self):
self.assertTrue(True)
def test_with_metadata_higher(self):
"""
@gpopt 2.240
"""
self.asserTrue(True)
def test_with_metadata_lower(self):
"""
@gpopt 1.0
"""
self.asserTrue(True)
def test_with_metadata_same(self):
"""
@gpopt 2.200.1
"""
self.asserTrue(True)
class MPPTestCaseGPOPTTests(unittest.TestCase):
def test_without_metadata(self):
# Test with deployed gpopt version (simulate hawq or gpdb with optimizer)
MockMPPTestCaseGPOPT.__product_environment__['gpopt'] = "2.200.1"
mpp_test_case = MockMPPTestCaseGPOPT('test_without_metadata')
self.assertTrue(mpp_test_case.skip is None)
# Test without deployed gpopt version (simulate gpdb without optimizer)
MockMPPTestCaseGPOPT.__product_environment__.pop('gpopt', None)
mpp_test_case = MockMPPTestCaseGPOPT('test_without_metadata')
self.assertTrue(mpp_test_case.skip is None)
def test_with_metadata_higher(self):
# Test with deployed gpopt version (simulate hawq or gpdb with optimizer)
MockMPPTestCaseGPOPT.__product_environment__['gpopt'] = "2.200.1"
mpp_test_case = MockMPPTestCaseGPOPT('test_with_metadata_higher')
self.assertTrue(mpp_test_case.skip is not None)
# Test without deployed gpopt version (simulate gpdb without optimizer)
MockMPPTestCaseGPOPT.__product_environment__.pop('gpopt', None)
mpp_test_case = MockMPPTestCaseGPOPT('test_with_metadata_higher')
self.assertTrue(mpp_test_case.skip is not None)
def test_with_metadata_lower(self):
# Test with deployed gpopt version (simulate hawq or gpdb with optimizer)
MockMPPTestCaseGPOPT.__product_environment__['gpopt'] = "2.200.1"
mpp_test_case = MockMPPTestCaseGPOPT('test_with_metadata_lower')
self.assertTrue(mpp_test_case.skip is None)
# Test without deployed gpopt version (simulate gpdb without optimizer)
MockMPPTestCaseGPOPT.__product_environment__.pop('gpopt', None)
mpp_test_case = MockMPPTestCaseGPOPT('test_with_metadata_lower')
self.assertTrue(mpp_test_case.skip is not None)
def test_with_metadata_same(self):
# Test with deployed gpopt version (simulate hawq or gpdb with optimizer)
MockMPPTestCaseGPOPT.__product_environment__['gpopt'] = "2.200.1"
mpp_test_case = MockMPPTestCaseGPOPT('test_with_metadata_same')
self.assertTrue(mpp_test_case.skip is None)
# Test without deployed gpopt version (simulate gpdb without optimizer)
MockMPPTestCaseGPOPT.__product_environment__.pop('gpopt', None)
mpp_test_case = MockMPPTestCaseGPOPT('test_with_metadata_same')
self.assertTrue(mpp_test_case.skip is not None)
| apache-2.0 |
arcticshores/kivy | kivy/uix/progressbar.py | 42 | 2456 | '''
Progress Bar
============
.. versionadded:: 1.0.8
.. image:: images/progressbar.jpg
:align: right
The :class:`ProgressBar` widget is used to visualize the progress of some task.
Only the horizontal mode is currently supported: the vertical mode is not
yet available.
The progress bar has no interactive elements and is a display-only widget.
To use it, simply assign a value to indicate the current progress::
from kivy.uix.progressbar import ProgressBar
pb = ProgressBar(max=1000)
# this will update the graphics automatically (75% done)
pb.value = 750
'''
__all__ = ('ProgressBar', )
from kivy.uix.widget import Widget
from kivy.properties import NumericProperty, AliasProperty
class ProgressBar(Widget):
'''Class for creating a progress bar widget.
See module documentation for more details.
'''
def __init__(self, **kwargs):
self._value = 0.
super(ProgressBar, self).__init__(**kwargs)
def _get_value(self):
return self._value
def _set_value(self, value):
value = max(0, min(self.max, value))
if value != self._value:
self._value = value
return True
value = AliasProperty(_get_value, _set_value)
'''Current value used for the slider.
:attr:`value` is an :class:`~kivy.properties.AliasProperty` that
returns the value of the progress bar. If the value is < 0 or >
:attr:`max`, it will be normalized to those boundaries.
.. versionchanged:: 1.6.0
The value is now limited to between 0 and :attr:`max`.
'''
def get_norm_value(self):
d = self.max
if d == 0:
return 0
return self.value / float(d)
def set_norm_value(self, value):
self.value = value * self.max
value_normalized = AliasProperty(get_norm_value, set_norm_value,
bind=('value', 'max'))
'''Normalized value inside the range 0-1::
>>> pb = ProgressBar(value=50, max=100)
>>> pb.value
50
>>> slider.value_normalized
0.5
:attr:`value_normalized` is an :class:`~kivy.properties.AliasProperty`.
'''
max = NumericProperty(100.)
'''Maximum value allowed for :attr:`value`.
:attr:`max` is a :class:`~kivy.properties.NumericProperty` and defaults to
100.
'''
if __name__ == '__main__':
from kivy.base import runTouchApp
runTouchApp(ProgressBar(value=50))
| mit |
SmartInfrastructures/neutron | neutron/tests/unit/extension_stubs.py | 41 | 1908 | # Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from neutron.api import extensions
from neutron import wsgi
class StubExtension(object):
def __init__(self, alias="stub_extension"):
self.alias = alias
def get_name(self):
return "Stub Extension"
def get_alias(self):
return self.alias
def get_description(self):
return ""
def get_namespace(self):
return ""
def get_updated(self):
return ""
class StubPlugin(object):
def __init__(self, supported_extensions=[]):
self.supported_extension_aliases = supported_extensions
class ExtensionExpectingPluginInterface(StubExtension):
"""Expect plugin to implement all methods in StubPluginInterface.
This extension expects plugin to implement all the methods defined
in StubPluginInterface.
"""
def get_plugin_interface(self):
return StubPluginInterface
class StubPluginInterface(extensions.PluginInterface):
@abc.abstractmethod
def get_foo(self, bar=None):
pass
class StubBaseAppController(wsgi.Controller):
def index(self, request):
return "base app index"
def show(self, request, id):
return {'fort': 'knox'}
def update(self, request, id):
return {'uneditable': 'original_value'}
| apache-2.0 |
a10networks/a10sdk-python | a10sdk/core/router/router_bgp_address_family_ipv6_network_synchronization.py | 2 | 1321 | from a10sdk.common.A10BaseClass import A10BaseClass
class Synchronization(A10BaseClass):
"""Class Description::
help Perform IGP synchronization.
Class synchronization supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param network_synchronization: {"default": 0, "optional": true, "type": "number", "description": "Perform IGP synchronization", "format": "flag"}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/router/bgp/{as_number}/address-family/ipv6/network/synchronization`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "synchronization"
self.a10_url="/axapi/v3/router/bgp/{as_number}/address-family/ipv6/network/synchronization"
self.DeviceProxy = ""
self.network_synchronization = ""
self.uuid = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.