repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
|---|---|---|---|---|---|
brandond/ansible
|
lib/ansible/modules/cloud/google/gcp_dns_managed_zone_facts.py
|
10
|
5224
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_dns_managed_zone_facts
description:
- Gather facts for GCP ManagedZone
short_description: Gather facts for GCP ManagedZone
version_added: 2.8
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
dns_name:
description:
- Restricts the list to return only zones with this domain name.
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: a managed zone facts
gcp_dns_managed_zone_facts:
dns_name: test.somewild2.example.com.
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
'''
RETURN = '''
items:
description: List of items
returned: always
type: complex
contains:
description:
description:
- A mutable string of at most 1024 characters associated with this resource
for the user's convenience. Has no effect on the managed zone's function.
returned: success
type: str
dnsName:
description:
- The DNS name of this managed zone, for instance "example.com.".
returned: success
type: str
id:
description:
- Unique identifier for the resource; defined by the server.
returned: success
type: int
name:
description:
- User assigned name for this resource.
- Must be unique within the project.
returned: success
type: str
nameServers:
description:
- Delegate your managed_zone to these virtual name servers; defined by the server
.
returned: success
type: list
nameServerSet:
description:
- Optionally specifies the NameServerSet for this ManagedZone. A NameServerSet
is a set of DNS name servers that all host the same ManagedZones. Most users
will leave this field unset.
returned: success
type: list
creationTime:
description:
- The time that this resource was created on the server.
- This is in RFC3339 text format.
returned: success
type: str
labels:
description:
- A set of key/value label pairs to assign to this ManagedZone.
returned: success
type: dict
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest
import json
################################################################################
# Main
################################################################################
def main():
module = GcpModule(argument_spec=dict(dns_name=dict(type='list', elements='str')))
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/ndev.clouddns.readwrite']
items = fetch_list(module, collection(module), module.params['dns_name'])
if items.get('managedZones'):
items = items.get('managedZones')
else:
items = []
return_value = {'items': items}
module.exit_json(**return_value)
def collection(module):
return "https://www.googleapis.com/dns/v1/projects/{project}/managedZones".format(**module.params)
def fetch_list(module, link, query):
auth = GcpSession(module, 'dns')
response = auth.get(link, params={'dnsName': query})
return return_if_object(module, response)
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main()
|
gpl-3.0
|
bonezuk/linux
|
scripts/analyze_suspend.py
|
1537
|
120394
|
#!/usr/bin/python
#
# Tool for analyzing suspend/resume timing
# Copyright (c) 2013, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
#
# Authors:
# Todd Brandt <todd.e.brandt@linux.intel.com>
#
# Description:
# This tool is designed to assist kernel and OS developers in optimizing
# their linux stack's suspend/resume time. Using a kernel image built
# with a few extra options enabled, the tool will execute a suspend and
# will capture dmesg and ftrace data until resume is complete. This data
# is transformed into a device timeline and a callgraph to give a quick
# and detailed view of which devices and callbacks are taking the most
# time in suspend/resume. The output is a single html file which can be
# viewed in firefox or chrome.
#
# The following kernel build options are required:
# CONFIG_PM_DEBUG=y
# CONFIG_PM_SLEEP_DEBUG=y
# CONFIG_FTRACE=y
# CONFIG_FUNCTION_TRACER=y
# CONFIG_FUNCTION_GRAPH_TRACER=y
#
# For kernel versions older than 3.15:
# The following additional kernel parameters are required:
# (e.g. in file /etc/default/grub)
# GRUB_CMDLINE_LINUX_DEFAULT="... initcall_debug log_buf_len=16M ..."
#
# ----------------- LIBRARIES --------------------
import sys
import time
import os
import string
import re
import platform
from datetime import datetime
import struct
# ----------------- CLASSES --------------------
# Class: SystemValues
# Description:
# A global, single-instance container used to
# store system values and test parameters
class SystemValues:
version = 3.0
verbose = False
testdir = '.'
tpath = '/sys/kernel/debug/tracing/'
fpdtpath = '/sys/firmware/acpi/tables/FPDT'
epath = '/sys/kernel/debug/tracing/events/power/'
traceevents = [
'suspend_resume',
'device_pm_callback_end',
'device_pm_callback_start'
]
modename = {
'freeze': 'Suspend-To-Idle (S0)',
'standby': 'Power-On Suspend (S1)',
'mem': 'Suspend-to-RAM (S3)',
'disk': 'Suspend-to-disk (S4)'
}
mempath = '/dev/mem'
powerfile = '/sys/power/state'
suspendmode = 'mem'
hostname = 'localhost'
prefix = 'test'
teststamp = ''
dmesgfile = ''
ftracefile = ''
htmlfile = ''
rtcwake = False
rtcwaketime = 10
rtcpath = ''
android = False
adb = 'adb'
devicefilter = []
stamp = 0
execcount = 1
x2delay = 0
usecallgraph = False
usetraceevents = False
usetraceeventsonly = False
notestrun = False
altdevname = dict()
postresumetime = 0
tracertypefmt = '# tracer: (?P<t>.*)'
firmwarefmt = '# fwsuspend (?P<s>[0-9]*) fwresume (?P<r>[0-9]*)$'
postresumefmt = '# post resume time (?P<t>[0-9]*)$'
stampfmt = '# suspend-(?P<m>[0-9]{2})(?P<d>[0-9]{2})(?P<y>[0-9]{2})-'+\
'(?P<H>[0-9]{2})(?P<M>[0-9]{2})(?P<S>[0-9]{2})'+\
' (?P<host>.*) (?P<mode>.*) (?P<kernel>.*)$'
def __init__(self):
self.hostname = platform.node()
if(self.hostname == ''):
self.hostname = 'localhost'
rtc = "rtc0"
if os.path.exists('/dev/rtc'):
rtc = os.readlink('/dev/rtc')
rtc = '/sys/class/rtc/'+rtc
if os.path.exists(rtc) and os.path.exists(rtc+'/date') and \
os.path.exists(rtc+'/time') and os.path.exists(rtc+'/wakealarm'):
self.rtcpath = rtc
def setOutputFile(self):
if((self.htmlfile == '') and (self.dmesgfile != '')):
m = re.match('(?P<name>.*)_dmesg\.txt$', self.dmesgfile)
if(m):
self.htmlfile = m.group('name')+'.html'
if((self.htmlfile == '') and (self.ftracefile != '')):
m = re.match('(?P<name>.*)_ftrace\.txt$', self.ftracefile)
if(m):
self.htmlfile = m.group('name')+'.html'
if(self.htmlfile == ''):
self.htmlfile = 'output.html'
def initTestOutput(self, subdir):
if(not self.android):
self.prefix = self.hostname
v = open('/proc/version', 'r').read().strip()
kver = string.split(v)[2]
else:
self.prefix = 'android'
v = os.popen(self.adb+' shell cat /proc/version').read().strip()
kver = string.split(v)[2]
testtime = datetime.now().strftime('suspend-%m%d%y-%H%M%S')
if(subdir != "."):
self.testdir = subdir+"/"+testtime
else:
self.testdir = testtime
self.teststamp = \
'# '+testtime+' '+self.prefix+' '+self.suspendmode+' '+kver
self.dmesgfile = \
self.testdir+'/'+self.prefix+'_'+self.suspendmode+'_dmesg.txt'
self.ftracefile = \
self.testdir+'/'+self.prefix+'_'+self.suspendmode+'_ftrace.txt'
self.htmlfile = \
self.testdir+'/'+self.prefix+'_'+self.suspendmode+'.html'
os.mkdir(self.testdir)
def setDeviceFilter(self, devnames):
self.devicefilter = string.split(devnames)
def rtcWakeAlarm(self):
os.system('echo 0 > '+self.rtcpath+'/wakealarm')
outD = open(self.rtcpath+'/date', 'r').read().strip()
outT = open(self.rtcpath+'/time', 'r').read().strip()
mD = re.match('^(?P<y>[0-9]*)-(?P<m>[0-9]*)-(?P<d>[0-9]*)', outD)
mT = re.match('^(?P<h>[0-9]*):(?P<m>[0-9]*):(?P<s>[0-9]*)', outT)
if(mD and mT):
# get the current time from hardware
utcoffset = int((datetime.now() - datetime.utcnow()).total_seconds())
dt = datetime(\
int(mD.group('y')), int(mD.group('m')), int(mD.group('d')),
int(mT.group('h')), int(mT.group('m')), int(mT.group('s')))
nowtime = int(dt.strftime('%s')) + utcoffset
else:
# if hardware time fails, use the software time
nowtime = int(datetime.now().strftime('%s'))
alarm = nowtime + self.rtcwaketime
os.system('echo %d > %s/wakealarm' % (alarm, self.rtcpath))
sysvals = SystemValues()
# Class: DeviceNode
# Description:
# A container used to create a device hierachy, with a single root node
# and a tree of child nodes. Used by Data.deviceTopology()
class DeviceNode:
name = ''
children = 0
depth = 0
def __init__(self, nodename, nodedepth):
self.name = nodename
self.children = []
self.depth = nodedepth
# Class: Data
# Description:
# The primary container for suspend/resume test data. There is one for
# each test run. The data is organized into a cronological hierarchy:
# Data.dmesg {
# root structure, started as dmesg & ftrace, but now only ftrace
# contents: times for suspend start/end, resume start/end, fwdata
# phases {
# 10 sequential, non-overlapping phases of S/R
# contents: times for phase start/end, order/color data for html
# devlist {
# device callback or action list for this phase
# device {
# a single device callback or generic action
# contents: start/stop times, pid/cpu/driver info
# parents/children, html id for timeline/callgraph
# optionally includes an ftrace callgraph
# optionally includes intradev trace events
# }
# }
# }
# }
#
class Data:
dmesg = {} # root data structure
phases = [] # ordered list of phases
start = 0.0 # test start
end = 0.0 # test end
tSuspended = 0.0 # low-level suspend start
tResumed = 0.0 # low-level resume start
tLow = 0.0 # time spent in low-level suspend (standby/freeze)
fwValid = False # is firmware data available
fwSuspend = 0 # time spent in firmware suspend
fwResume = 0 # time spent in firmware resume
dmesgtext = [] # dmesg text file in memory
testnumber = 0
idstr = ''
html_device_id = 0
stamp = 0
outfile = ''
def __init__(self, num):
idchar = 'abcdefghijklmnopqrstuvwxyz'
self.testnumber = num
self.idstr = idchar[num]
self.dmesgtext = []
self.phases = []
self.dmesg = { # fixed list of 10 phases
'suspend_prepare': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#CCFFCC', 'order': 0},
'suspend': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#88FF88', 'order': 1},
'suspend_late': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#00AA00', 'order': 2},
'suspend_noirq': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#008888', 'order': 3},
'suspend_machine': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#0000FF', 'order': 4},
'resume_machine': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#FF0000', 'order': 5},
'resume_noirq': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#FF9900', 'order': 6},
'resume_early': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#FFCC00', 'order': 7},
'resume': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#FFFF88', 'order': 8},
'resume_complete': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#FFFFCC', 'order': 9}
}
self.phases = self.sortedPhases()
def getStart(self):
return self.dmesg[self.phases[0]]['start']
def setStart(self, time):
self.start = time
self.dmesg[self.phases[0]]['start'] = time
def getEnd(self):
return self.dmesg[self.phases[-1]]['end']
def setEnd(self, time):
self.end = time
self.dmesg[self.phases[-1]]['end'] = time
def isTraceEventOutsideDeviceCalls(self, pid, time):
for phase in self.phases:
list = self.dmesg[phase]['list']
for dev in list:
d = list[dev]
if(d['pid'] == pid and time >= d['start'] and
time <= d['end']):
return False
return True
def addIntraDevTraceEvent(self, action, name, pid, time):
if(action == 'mutex_lock_try'):
color = 'red'
elif(action == 'mutex_lock_pass'):
color = 'green'
elif(action == 'mutex_unlock'):
color = 'blue'
else:
# create separate colors based on the name
v1 = len(name)*10 % 256
v2 = string.count(name, 'e')*100 % 256
v3 = ord(name[0])*20 % 256
color = '#%06X' % ((v1*0x10000) + (v2*0x100) + v3)
for phase in self.phases:
list = self.dmesg[phase]['list']
for dev in list:
d = list[dev]
if(d['pid'] == pid and time >= d['start'] and
time <= d['end']):
e = TraceEvent(action, name, color, time)
if('traceevents' not in d):
d['traceevents'] = []
d['traceevents'].append(e)
return d
break
return 0
def capIntraDevTraceEvent(self, action, name, pid, time):
for phase in self.phases:
list = self.dmesg[phase]['list']
for dev in list:
d = list[dev]
if(d['pid'] == pid and time >= d['start'] and
time <= d['end']):
if('traceevents' not in d):
return
for e in d['traceevents']:
if(e.action == action and
e.name == name and not e.ready):
e.length = time - e.time
e.ready = True
break
return
def trimTimeVal(self, t, t0, dT, left):
if left:
if(t > t0):
if(t - dT < t0):
return t0
return t - dT
else:
return t
else:
if(t < t0 + dT):
if(t > t0):
return t0 + dT
return t + dT
else:
return t
def trimTime(self, t0, dT, left):
self.tSuspended = self.trimTimeVal(self.tSuspended, t0, dT, left)
self.tResumed = self.trimTimeVal(self.tResumed, t0, dT, left)
self.start = self.trimTimeVal(self.start, t0, dT, left)
self.end = self.trimTimeVal(self.end, t0, dT, left)
for phase in self.phases:
p = self.dmesg[phase]
p['start'] = self.trimTimeVal(p['start'], t0, dT, left)
p['end'] = self.trimTimeVal(p['end'], t0, dT, left)
list = p['list']
for name in list:
d = list[name]
d['start'] = self.trimTimeVal(d['start'], t0, dT, left)
d['end'] = self.trimTimeVal(d['end'], t0, dT, left)
if('ftrace' in d):
cg = d['ftrace']
cg.start = self.trimTimeVal(cg.start, t0, dT, left)
cg.end = self.trimTimeVal(cg.end, t0, dT, left)
for line in cg.list:
line.time = self.trimTimeVal(line.time, t0, dT, left)
if('traceevents' in d):
for e in d['traceevents']:
e.time = self.trimTimeVal(e.time, t0, dT, left)
def normalizeTime(self, tZero):
# first trim out any standby or freeze clock time
if(self.tSuspended != self.tResumed):
if(self.tResumed > tZero):
self.trimTime(self.tSuspended, \
self.tResumed-self.tSuspended, True)
else:
self.trimTime(self.tSuspended, \
self.tResumed-self.tSuspended, False)
# shift the timeline so that tZero is the new 0
self.tSuspended -= tZero
self.tResumed -= tZero
self.start -= tZero
self.end -= tZero
for phase in self.phases:
p = self.dmesg[phase]
p['start'] -= tZero
p['end'] -= tZero
list = p['list']
for name in list:
d = list[name]
d['start'] -= tZero
d['end'] -= tZero
if('ftrace' in d):
cg = d['ftrace']
cg.start -= tZero
cg.end -= tZero
for line in cg.list:
line.time -= tZero
if('traceevents' in d):
for e in d['traceevents']:
e.time -= tZero
def newPhaseWithSingleAction(self, phasename, devname, start, end, color):
for phase in self.phases:
self.dmesg[phase]['order'] += 1
self.html_device_id += 1
devid = '%s%d' % (self.idstr, self.html_device_id)
list = dict()
list[devname] = \
{'start': start, 'end': end, 'pid': 0, 'par': '',
'length': (end-start), 'row': 0, 'id': devid, 'drv': '' };
self.dmesg[phasename] = \
{'list': list, 'start': start, 'end': end,
'row': 0, 'color': color, 'order': 0}
self.phases = self.sortedPhases()
def newPhase(self, phasename, start, end, color, order):
if(order < 0):
order = len(self.phases)
for phase in self.phases[order:]:
self.dmesg[phase]['order'] += 1
if(order > 0):
p = self.phases[order-1]
self.dmesg[p]['end'] = start
if(order < len(self.phases)):
p = self.phases[order]
self.dmesg[p]['start'] = end
list = dict()
self.dmesg[phasename] = \
{'list': list, 'start': start, 'end': end,
'row': 0, 'color': color, 'order': order}
self.phases = self.sortedPhases()
def setPhase(self, phase, ktime, isbegin):
if(isbegin):
self.dmesg[phase]['start'] = ktime
else:
self.dmesg[phase]['end'] = ktime
def dmesgSortVal(self, phase):
return self.dmesg[phase]['order']
def sortedPhases(self):
return sorted(self.dmesg, key=self.dmesgSortVal)
def sortedDevices(self, phase):
list = self.dmesg[phase]['list']
slist = []
tmp = dict()
for devname in list:
dev = list[devname]
tmp[dev['start']] = devname
for t in sorted(tmp):
slist.append(tmp[t])
return slist
def fixupInitcalls(self, phase, end):
# if any calls never returned, clip them at system resume end
phaselist = self.dmesg[phase]['list']
for devname in phaselist:
dev = phaselist[devname]
if(dev['end'] < 0):
dev['end'] = end
vprint('%s (%s): callback didnt return' % (devname, phase))
def deviceFilter(self, devicefilter):
# remove all by the relatives of the filter devnames
filter = []
for phase in self.phases:
list = self.dmesg[phase]['list']
for name in devicefilter:
dev = name
while(dev in list):
if(dev not in filter):
filter.append(dev)
dev = list[dev]['par']
children = self.deviceDescendants(name, phase)
for dev in children:
if(dev not in filter):
filter.append(dev)
for phase in self.phases:
list = self.dmesg[phase]['list']
rmlist = []
for name in list:
pid = list[name]['pid']
if(name not in filter and pid >= 0):
rmlist.append(name)
for name in rmlist:
del list[name]
def fixupInitcallsThatDidntReturn(self):
# if any calls never returned, clip them at system resume end
for phase in self.phases:
self.fixupInitcalls(phase, self.getEnd())
def newActionGlobal(self, name, start, end):
# which phase is this device callback or action "in"
targetphase = "none"
overlap = 0.0
for phase in self.phases:
pstart = self.dmesg[phase]['start']
pend = self.dmesg[phase]['end']
o = max(0, min(end, pend) - max(start, pstart))
if(o > overlap):
targetphase = phase
overlap = o
if targetphase in self.phases:
self.newAction(targetphase, name, -1, '', start, end, '')
return True
return False
def newAction(self, phase, name, pid, parent, start, end, drv):
# new device callback for a specific phase
self.html_device_id += 1
devid = '%s%d' % (self.idstr, self.html_device_id)
list = self.dmesg[phase]['list']
length = -1.0
if(start >= 0 and end >= 0):
length = end - start
list[name] = {'start': start, 'end': end, 'pid': pid, 'par': parent,
'length': length, 'row': 0, 'id': devid, 'drv': drv }
def deviceIDs(self, devlist, phase):
idlist = []
list = self.dmesg[phase]['list']
for devname in list:
if devname in devlist:
idlist.append(list[devname]['id'])
return idlist
def deviceParentID(self, devname, phase):
pdev = ''
pdevid = ''
list = self.dmesg[phase]['list']
if devname in list:
pdev = list[devname]['par']
if pdev in list:
return list[pdev]['id']
return pdev
def deviceChildren(self, devname, phase):
devlist = []
list = self.dmesg[phase]['list']
for child in list:
if(list[child]['par'] == devname):
devlist.append(child)
return devlist
def deviceDescendants(self, devname, phase):
children = self.deviceChildren(devname, phase)
family = children
for child in children:
family += self.deviceDescendants(child, phase)
return family
def deviceChildrenIDs(self, devname, phase):
devlist = self.deviceChildren(devname, phase)
return self.deviceIDs(devlist, phase)
def printDetails(self):
vprint(' test start: %f' % self.start)
for phase in self.phases:
dc = len(self.dmesg[phase]['list'])
vprint(' %16s: %f - %f (%d devices)' % (phase, \
self.dmesg[phase]['start'], self.dmesg[phase]['end'], dc))
vprint(' test end: %f' % self.end)
def masterTopology(self, name, list, depth):
node = DeviceNode(name, depth)
for cname in list:
clist = self.deviceChildren(cname, 'resume')
cnode = self.masterTopology(cname, clist, depth+1)
node.children.append(cnode)
return node
def printTopology(self, node):
html = ''
if node.name:
info = ''
drv = ''
for phase in self.phases:
list = self.dmesg[phase]['list']
if node.name in list:
s = list[node.name]['start']
e = list[node.name]['end']
if list[node.name]['drv']:
drv = ' {'+list[node.name]['drv']+'}'
info += ('<li>%s: %.3fms</li>' % (phase, (e-s)*1000))
html += '<li><b>'+node.name+drv+'</b>'
if info:
html += '<ul>'+info+'</ul>'
html += '</li>'
if len(node.children) > 0:
html += '<ul>'
for cnode in node.children:
html += self.printTopology(cnode)
html += '</ul>'
return html
def rootDeviceList(self):
# list of devices graphed
real = []
for phase in self.dmesg:
list = self.dmesg[phase]['list']
for dev in list:
if list[dev]['pid'] >= 0 and dev not in real:
real.append(dev)
# list of top-most root devices
rootlist = []
for phase in self.dmesg:
list = self.dmesg[phase]['list']
for dev in list:
pdev = list[dev]['par']
if(re.match('[0-9]*-[0-9]*\.[0-9]*[\.0-9]*\:[\.0-9]*$', pdev)):
continue
if pdev and pdev not in real and pdev not in rootlist:
rootlist.append(pdev)
return rootlist
def deviceTopology(self):
rootlist = self.rootDeviceList()
master = self.masterTopology('', rootlist, 0)
return self.printTopology(master)
# Class: TraceEvent
# Description:
# A container for trace event data found in the ftrace file
class TraceEvent:
ready = False
name = ''
time = 0.0
color = '#FFFFFF'
length = 0.0
action = ''
def __init__(self, a, n, c, t):
self.action = a
self.name = n
self.color = c
self.time = t
# Class: FTraceLine
# Description:
# A container for a single line of ftrace data. There are six basic types:
# callgraph line:
# call: " dpm_run_callback() {"
# return: " }"
# leaf: " dpm_run_callback();"
# trace event:
# tracing_mark_write: SUSPEND START or RESUME COMPLETE
# suspend_resume: phase or custom exec block data
# device_pm_callback: device callback info
class FTraceLine:
time = 0.0
length = 0.0
fcall = False
freturn = False
fevent = False
depth = 0
name = ''
type = ''
def __init__(self, t, m, d):
self.time = float(t)
# is this a trace event
if(d == 'traceevent' or re.match('^ *\/\* *(?P<msg>.*) \*\/ *$', m)):
if(d == 'traceevent'):
# nop format trace event
msg = m
else:
# function_graph format trace event
em = re.match('^ *\/\* *(?P<msg>.*) \*\/ *$', m)
msg = em.group('msg')
emm = re.match('^(?P<call>.*?): (?P<msg>.*)', msg)
if(emm):
self.name = emm.group('msg')
self.type = emm.group('call')
else:
self.name = msg
self.fevent = True
return
# convert the duration to seconds
if(d):
self.length = float(d)/1000000
# the indentation determines the depth
match = re.match('^(?P<d> *)(?P<o>.*)$', m)
if(not match):
return
self.depth = self.getDepth(match.group('d'))
m = match.group('o')
# function return
if(m[0] == '}'):
self.freturn = True
if(len(m) > 1):
# includes comment with function name
match = re.match('^} *\/\* *(?P<n>.*) *\*\/$', m)
if(match):
self.name = match.group('n')
# function call
else:
self.fcall = True
# function call with children
if(m[-1] == '{'):
match = re.match('^(?P<n>.*) *\(.*', m)
if(match):
self.name = match.group('n')
# function call with no children (leaf)
elif(m[-1] == ';'):
self.freturn = True
match = re.match('^(?P<n>.*) *\(.*', m)
if(match):
self.name = match.group('n')
# something else (possibly a trace marker)
else:
self.name = m
def getDepth(self, str):
return len(str)/2
def debugPrint(self, dev):
if(self.freturn and self.fcall):
print('%s -- %f (%02d): %s(); (%.3f us)' % (dev, self.time, \
self.depth, self.name, self.length*1000000))
elif(self.freturn):
print('%s -- %f (%02d): %s} (%.3f us)' % (dev, self.time, \
self.depth, self.name, self.length*1000000))
else:
print('%s -- %f (%02d): %s() { (%.3f us)' % (dev, self.time, \
self.depth, self.name, self.length*1000000))
# Class: FTraceCallGraph
# Description:
# A container for the ftrace callgraph of a single recursive function.
# This can be a dpm_run_callback, dpm_prepare, or dpm_complete callgraph
# Each instance is tied to a single device in a single phase, and is
# comprised of an ordered list of FTraceLine objects
class FTraceCallGraph:
start = -1.0
end = -1.0
list = []
invalid = False
depth = 0
def __init__(self):
self.start = -1.0
self.end = -1.0
self.list = []
self.depth = 0
def setDepth(self, line):
if(line.fcall and not line.freturn):
line.depth = self.depth
self.depth += 1
elif(line.freturn and not line.fcall):
self.depth -= 1
line.depth = self.depth
else:
line.depth = self.depth
def addLine(self, line, match):
if(not self.invalid):
self.setDepth(line)
if(line.depth == 0 and line.freturn):
if(self.start < 0):
self.start = line.time
self.end = line.time
self.list.append(line)
return True
if(self.invalid):
return False
if(len(self.list) >= 1000000 or self.depth < 0):
if(len(self.list) > 0):
first = self.list[0]
self.list = []
self.list.append(first)
self.invalid = True
if(not match):
return False
id = 'task %s cpu %s' % (match.group('pid'), match.group('cpu'))
window = '(%f - %f)' % (self.start, line.time)
if(self.depth < 0):
print('Too much data for '+id+\
' (buffer overflow), ignoring this callback')
else:
print('Too much data for '+id+\
' '+window+', ignoring this callback')
return False
self.list.append(line)
if(self.start < 0):
self.start = line.time
return False
def slice(self, t0, tN):
minicg = FTraceCallGraph()
count = -1
firstdepth = 0
for l in self.list:
if(l.time < t0 or l.time > tN):
continue
if(count < 0):
if(not l.fcall or l.name == 'dev_driver_string'):
continue
firstdepth = l.depth
count = 0
l.depth -= firstdepth
minicg.addLine(l, 0)
if((count == 0 and l.freturn and l.fcall) or
(count > 0 and l.depth <= 0)):
break
count += 1
return minicg
def sanityCheck(self):
stack = dict()
cnt = 0
for l in self.list:
if(l.fcall and not l.freturn):
stack[l.depth] = l
cnt += 1
elif(l.freturn and not l.fcall):
if(l.depth not in stack):
return False
stack[l.depth].length = l.length
stack[l.depth] = 0
l.length = 0
cnt -= 1
if(cnt == 0):
return True
return False
def debugPrint(self, filename):
if(filename == 'stdout'):
print('[%f - %f]') % (self.start, self.end)
for l in self.list:
if(l.freturn and l.fcall):
print('%f (%02d): %s(); (%.3f us)' % (l.time, \
l.depth, l.name, l.length*1000000))
elif(l.freturn):
print('%f (%02d): %s} (%.3f us)' % (l.time, \
l.depth, l.name, l.length*1000000))
else:
print('%f (%02d): %s() { (%.3f us)' % (l.time, \
l.depth, l.name, l.length*1000000))
print(' ')
else:
fp = open(filename, 'w')
print(filename)
for l in self.list:
if(l.freturn and l.fcall):
fp.write('%f (%02d): %s(); (%.3f us)\n' % (l.time, \
l.depth, l.name, l.length*1000000))
elif(l.freturn):
fp.write('%f (%02d): %s} (%.3f us)\n' % (l.time, \
l.depth, l.name, l.length*1000000))
else:
fp.write('%f (%02d): %s() { (%.3f us)\n' % (l.time, \
l.depth, l.name, l.length*1000000))
fp.close()
# Class: Timeline
# Description:
# A container for a suspend/resume html timeline. In older versions
# of the script there were multiple timelines, but in the latest
# there is only one.
class Timeline:
html = {}
scaleH = 0.0 # height of the row as a percent of the timeline height
rowH = 0.0 # height of each row in percent of the timeline height
row_height_pixels = 30
maxrows = 0
height = 0
def __init__(self):
self.html = {
'timeline': '',
'legend': '',
'scale': ''
}
def setRows(self, rows):
self.maxrows = int(rows)
self.scaleH = 100.0/float(self.maxrows)
self.height = self.maxrows*self.row_height_pixels
r = float(self.maxrows - 1)
if(r < 1.0):
r = 1.0
self.rowH = (100.0 - self.scaleH)/r
# Class: TestRun
# Description:
# A container for a suspend/resume test run. This is necessary as
# there could be more than one, and they need to be separate.
class TestRun:
ftrace_line_fmt_fg = \
'^ *(?P<time>[0-9\.]*) *\| *(?P<cpu>[0-9]*)\)'+\
' *(?P<proc>.*)-(?P<pid>[0-9]*) *\|'+\
'[ +!]*(?P<dur>[0-9\.]*) .*\| (?P<msg>.*)'
ftrace_line_fmt_nop = \
' *(?P<proc>.*)-(?P<pid>[0-9]*) *\[(?P<cpu>[0-9]*)\] *'+\
'(?P<flags>.{4}) *(?P<time>[0-9\.]*): *'+\
'(?P<msg>.*)'
ftrace_line_fmt = ftrace_line_fmt_nop
cgformat = False
ftemp = dict()
ttemp = dict()
inthepipe = False
tracertype = ''
data = 0
def __init__(self, dataobj):
self.data = dataobj
self.ftemp = dict()
self.ttemp = dict()
def isReady(self):
if(tracertype == '' or not data):
return False
return True
def setTracerType(self, tracer):
self.tracertype = tracer
if(tracer == 'function_graph'):
self.cgformat = True
self.ftrace_line_fmt = self.ftrace_line_fmt_fg
elif(tracer == 'nop'):
self.ftrace_line_fmt = self.ftrace_line_fmt_nop
else:
doError('Invalid tracer format: [%s]' % tracer, False)
# ----------------- FUNCTIONS --------------------
# Function: vprint
# Description:
# verbose print (prints only with -verbose option)
# Arguments:
# msg: the debug/log message to print
def vprint(msg):
global sysvals
if(sysvals.verbose):
print(msg)
# Function: initFtrace
# Description:
# Configure ftrace to use trace events and/or a callgraph
def initFtrace():
global sysvals
tp = sysvals.tpath
cf = 'dpm_run_callback'
if(sysvals.usetraceeventsonly):
cf = '-e dpm_prepare -e dpm_complete -e dpm_run_callback'
if(sysvals.usecallgraph or sysvals.usetraceevents):
print('INITIALIZING FTRACE...')
# turn trace off
os.system('echo 0 > '+tp+'tracing_on')
# set the trace clock to global
os.system('echo global > '+tp+'trace_clock')
# set trace buffer to a huge value
os.system('echo nop > '+tp+'current_tracer')
os.system('echo 100000 > '+tp+'buffer_size_kb')
# initialize the callgraph trace, unless this is an x2 run
if(sysvals.usecallgraph and sysvals.execcount == 1):
# set trace type
os.system('echo function_graph > '+tp+'current_tracer')
os.system('echo "" > '+tp+'set_ftrace_filter')
# set trace format options
os.system('echo funcgraph-abstime > '+tp+'trace_options')
os.system('echo funcgraph-proc > '+tp+'trace_options')
# focus only on device suspend and resume
os.system('cat '+tp+'available_filter_functions | grep '+\
cf+' > '+tp+'set_graph_function')
if(sysvals.usetraceevents):
# turn trace events on
events = iter(sysvals.traceevents)
for e in events:
os.system('echo 1 > '+sysvals.epath+e+'/enable')
# clear the trace buffer
os.system('echo "" > '+tp+'trace')
# Function: initFtraceAndroid
# Description:
# Configure ftrace to capture trace events
def initFtraceAndroid():
global sysvals
tp = sysvals.tpath
if(sysvals.usetraceevents):
print('INITIALIZING FTRACE...')
# turn trace off
os.system(sysvals.adb+" shell 'echo 0 > "+tp+"tracing_on'")
# set the trace clock to global
os.system(sysvals.adb+" shell 'echo global > "+tp+"trace_clock'")
# set trace buffer to a huge value
os.system(sysvals.adb+" shell 'echo nop > "+tp+"current_tracer'")
os.system(sysvals.adb+" shell 'echo 10000 > "+tp+"buffer_size_kb'")
# turn trace events on
events = iter(sysvals.traceevents)
for e in events:
os.system(sysvals.adb+" shell 'echo 1 > "+\
sysvals.epath+e+"/enable'")
# clear the trace buffer
os.system(sysvals.adb+" shell 'echo \"\" > "+tp+"trace'")
# Function: verifyFtrace
# Description:
# Check that ftrace is working on the system
# Output:
# True or False
def verifyFtrace():
global sysvals
# files needed for any trace data
files = ['buffer_size_kb', 'current_tracer', 'trace', 'trace_clock',
'trace_marker', 'trace_options', 'tracing_on']
# files needed for callgraph trace data
tp = sysvals.tpath
if(sysvals.usecallgraph):
files += [
'available_filter_functions',
'set_ftrace_filter',
'set_graph_function'
]
for f in files:
if(sysvals.android):
out = os.popen(sysvals.adb+' shell ls '+tp+f).read().strip()
if(out != tp+f):
return False
else:
if(os.path.exists(tp+f) == False):
return False
return True
# Function: parseStamp
# Description:
# Pull in the stamp comment line from the data file(s),
# create the stamp, and add it to the global sysvals object
# Arguments:
# m: the valid re.match output for the stamp line
def parseStamp(m, data):
global sysvals
data.stamp = {'time': '', 'host': '', 'mode': ''}
dt = datetime(int(m.group('y'))+2000, int(m.group('m')),
int(m.group('d')), int(m.group('H')), int(m.group('M')),
int(m.group('S')))
data.stamp['time'] = dt.strftime('%B %d %Y, %I:%M:%S %p')
data.stamp['host'] = m.group('host')
data.stamp['mode'] = m.group('mode')
data.stamp['kernel'] = m.group('kernel')
sysvals.suspendmode = data.stamp['mode']
if not sysvals.stamp:
sysvals.stamp = data.stamp
# Function: diffStamp
# Description:
# compare the host, kernel, and mode fields in 3 stamps
# Arguments:
# stamp1: string array with mode, kernel, and host
# stamp2: string array with mode, kernel, and host
# Return:
# True if stamps differ, False if they're the same
def diffStamp(stamp1, stamp2):
if 'host' in stamp1 and 'host' in stamp2:
if stamp1['host'] != stamp2['host']:
return True
if 'kernel' in stamp1 and 'kernel' in stamp2:
if stamp1['kernel'] != stamp2['kernel']:
return True
if 'mode' in stamp1 and 'mode' in stamp2:
if stamp1['mode'] != stamp2['mode']:
return True
return False
# Function: doesTraceLogHaveTraceEvents
# Description:
# Quickly determine if the ftrace log has some or all of the trace events
# required for primary parsing. Set the usetraceevents and/or
# usetraceeventsonly flags in the global sysvals object
def doesTraceLogHaveTraceEvents():
global sysvals
sysvals.usetraceeventsonly = True
sysvals.usetraceevents = False
for e in sysvals.traceevents:
out = os.popen('cat '+sysvals.ftracefile+' | grep "'+e+': "').read()
if(not out):
sysvals.usetraceeventsonly = False
if(e == 'suspend_resume' and out):
sysvals.usetraceevents = True
# Function: appendIncompleteTraceLog
# Description:
# [deprecated for kernel 3.15 or newer]
# Legacy support of ftrace outputs that lack the device_pm_callback
# and/or suspend_resume trace events. The primary data should be
# taken from dmesg, and this ftrace is used only for callgraph data
# or custom actions in the timeline. The data is appended to the Data
# objects provided.
# Arguments:
# testruns: the array of Data objects obtained from parseKernelLog
def appendIncompleteTraceLog(testruns):
global sysvals
# create TestRun vessels for ftrace parsing
testcnt = len(testruns)
testidx = -1
testrun = []
for data in testruns:
testrun.append(TestRun(data))
# extract the callgraph and traceevent data
vprint('Analyzing the ftrace data...')
tf = open(sysvals.ftracefile, 'r')
for line in tf:
# remove any latent carriage returns
line = line.replace('\r\n', '')
# grab the time stamp first (signifies the start of the test run)
m = re.match(sysvals.stampfmt, line)
if(m):
testidx += 1
parseStamp(m, testrun[testidx].data)
continue
# pull out any firmware data
if(re.match(sysvals.firmwarefmt, line)):
continue
# if we havent found a test time stamp yet keep spinning til we do
if(testidx < 0):
continue
# determine the trace data type (required for further parsing)
m = re.match(sysvals.tracertypefmt, line)
if(m):
tracer = m.group('t')
testrun[testidx].setTracerType(tracer)
continue
# parse only valid lines, if this isnt one move on
m = re.match(testrun[testidx].ftrace_line_fmt, line)
if(not m):
continue
# gather the basic message data from the line
m_time = m.group('time')
m_pid = m.group('pid')
m_msg = m.group('msg')
if(testrun[testidx].cgformat):
m_param3 = m.group('dur')
else:
m_param3 = 'traceevent'
if(m_time and m_pid and m_msg):
t = FTraceLine(m_time, m_msg, m_param3)
pid = int(m_pid)
else:
continue
# the line should be a call, return, or event
if(not t.fcall and not t.freturn and not t.fevent):
continue
# only parse the ftrace data during suspend/resume
data = testrun[testidx].data
if(not testrun[testidx].inthepipe):
# look for the suspend start marker
if(t.fevent):
if(t.name == 'SUSPEND START'):
testrun[testidx].inthepipe = True
data.setStart(t.time)
continue
else:
# trace event processing
if(t.fevent):
if(t.name == 'RESUME COMPLETE'):
testrun[testidx].inthepipe = False
data.setEnd(t.time)
if(testidx == testcnt - 1):
break
continue
# general trace events have two types, begin and end
if(re.match('(?P<name>.*) begin$', t.name)):
isbegin = True
elif(re.match('(?P<name>.*) end$', t.name)):
isbegin = False
else:
continue
m = re.match('(?P<name>.*)\[(?P<val>[0-9]*)\] .*', t.name)
if(m):
val = m.group('val')
if val == '0':
name = m.group('name')
else:
name = m.group('name')+'['+val+']'
else:
m = re.match('(?P<name>.*) .*', t.name)
name = m.group('name')
# special processing for trace events
if re.match('dpm_prepare\[.*', name):
continue
elif re.match('machine_suspend.*', name):
continue
elif re.match('suspend_enter\[.*', name):
if(not isbegin):
data.dmesg['suspend_prepare']['end'] = t.time
continue
elif re.match('dpm_suspend\[.*', name):
if(not isbegin):
data.dmesg['suspend']['end'] = t.time
continue
elif re.match('dpm_suspend_late\[.*', name):
if(isbegin):
data.dmesg['suspend_late']['start'] = t.time
else:
data.dmesg['suspend_late']['end'] = t.time
continue
elif re.match('dpm_suspend_noirq\[.*', name):
if(isbegin):
data.dmesg['suspend_noirq']['start'] = t.time
else:
data.dmesg['suspend_noirq']['end'] = t.time
continue
elif re.match('dpm_resume_noirq\[.*', name):
if(isbegin):
data.dmesg['resume_machine']['end'] = t.time
data.dmesg['resume_noirq']['start'] = t.time
else:
data.dmesg['resume_noirq']['end'] = t.time
continue
elif re.match('dpm_resume_early\[.*', name):
if(isbegin):
data.dmesg['resume_early']['start'] = t.time
else:
data.dmesg['resume_early']['end'] = t.time
continue
elif re.match('dpm_resume\[.*', name):
if(isbegin):
data.dmesg['resume']['start'] = t.time
else:
data.dmesg['resume']['end'] = t.time
continue
elif re.match('dpm_complete\[.*', name):
if(isbegin):
data.dmesg['resume_complete']['start'] = t.time
else:
data.dmesg['resume_complete']['end'] = t.time
continue
# is this trace event outside of the devices calls
if(data.isTraceEventOutsideDeviceCalls(pid, t.time)):
# global events (outside device calls) are simply graphed
if(isbegin):
# store each trace event in ttemp
if(name not in testrun[testidx].ttemp):
testrun[testidx].ttemp[name] = []
testrun[testidx].ttemp[name].append(\
{'begin': t.time, 'end': t.time})
else:
# finish off matching trace event in ttemp
if(name in testrun[testidx].ttemp):
testrun[testidx].ttemp[name][-1]['end'] = t.time
else:
if(isbegin):
data.addIntraDevTraceEvent('', name, pid, t.time)
else:
data.capIntraDevTraceEvent('', name, pid, t.time)
# call/return processing
elif sysvals.usecallgraph:
# create a callgraph object for the data
if(pid not in testrun[testidx].ftemp):
testrun[testidx].ftemp[pid] = []
testrun[testidx].ftemp[pid].append(FTraceCallGraph())
# when the call is finished, see which device matches it
cg = testrun[testidx].ftemp[pid][-1]
if(cg.addLine(t, m)):
testrun[testidx].ftemp[pid].append(FTraceCallGraph())
tf.close()
for test in testrun:
# add the traceevent data to the device hierarchy
if(sysvals.usetraceevents):
for name in test.ttemp:
for event in test.ttemp[name]:
begin = event['begin']
end = event['end']
# if event starts before timeline start, expand timeline
if(begin < test.data.start):
test.data.setStart(begin)
# if event ends after timeline end, expand the timeline
if(end > test.data.end):
test.data.setEnd(end)
test.data.newActionGlobal(name, begin, end)
# add the callgraph data to the device hierarchy
for pid in test.ftemp:
for cg in test.ftemp[pid]:
if(not cg.sanityCheck()):
id = 'task %s cpu %s' % (pid, m.group('cpu'))
vprint('Sanity check failed for '+\
id+', ignoring this callback')
continue
callstart = cg.start
callend = cg.end
for p in test.data.phases:
if(test.data.dmesg[p]['start'] <= callstart and
callstart <= test.data.dmesg[p]['end']):
list = test.data.dmesg[p]['list']
for devname in list:
dev = list[devname]
if(pid == dev['pid'] and
callstart <= dev['start'] and
callend >= dev['end']):
dev['ftrace'] = cg
break
if(sysvals.verbose):
test.data.printDetails()
# add the time in between the tests as a new phase so we can see it
if(len(testruns) > 1):
t1e = testruns[0].getEnd()
t2s = testruns[-1].getStart()
testruns[-1].newPhaseWithSingleAction('user mode', \
'user mode', t1e, t2s, '#FF9966')
# Function: parseTraceLog
# Description:
# Analyze an ftrace log output file generated from this app during
# the execution phase. Used when the ftrace log is the primary data source
# and includes the suspend_resume and device_pm_callback trace events
# The ftrace filename is taken from sysvals
# Output:
# An array of Data objects
def parseTraceLog():
global sysvals
vprint('Analyzing the ftrace data...')
if(os.path.exists(sysvals.ftracefile) == False):
doError('%s doesnt exist' % sysvals.ftracefile, False)
# extract the callgraph and traceevent data
testruns = []
testdata = []
testrun = 0
data = 0
tf = open(sysvals.ftracefile, 'r')
phase = 'suspend_prepare'
for line in tf:
# remove any latent carriage returns
line = line.replace('\r\n', '')
# stamp line: each stamp means a new test run
m = re.match(sysvals.stampfmt, line)
if(m):
data = Data(len(testdata))
testdata.append(data)
testrun = TestRun(data)
testruns.append(testrun)
parseStamp(m, data)
continue
if(not data):
continue
# firmware line: pull out any firmware data
m = re.match(sysvals.firmwarefmt, line)
if(m):
data.fwSuspend = int(m.group('s'))
data.fwResume = int(m.group('r'))
if(data.fwSuspend > 0 or data.fwResume > 0):
data.fwValid = True
continue
# tracer type line: determine the trace data type
m = re.match(sysvals.tracertypefmt, line)
if(m):
tracer = m.group('t')
testrun.setTracerType(tracer)
continue
# post resume time line: did this test run include post-resume data
m = re.match(sysvals.postresumefmt, line)
if(m):
t = int(m.group('t'))
if(t > 0):
sysvals.postresumetime = t
continue
# ftrace line: parse only valid lines
m = re.match(testrun.ftrace_line_fmt, line)
if(not m):
continue
# gather the basic message data from the line
m_time = m.group('time')
m_pid = m.group('pid')
m_msg = m.group('msg')
if(testrun.cgformat):
m_param3 = m.group('dur')
else:
m_param3 = 'traceevent'
if(m_time and m_pid and m_msg):
t = FTraceLine(m_time, m_msg, m_param3)
pid = int(m_pid)
else:
continue
# the line should be a call, return, or event
if(not t.fcall and not t.freturn and not t.fevent):
continue
# only parse the ftrace data during suspend/resume
if(not testrun.inthepipe):
# look for the suspend start marker
if(t.fevent):
if(t.name == 'SUSPEND START'):
testrun.inthepipe = True
data.setStart(t.time)
continue
# trace event processing
if(t.fevent):
if(t.name == 'RESUME COMPLETE'):
if(sysvals.postresumetime > 0):
phase = 'post_resume'
data.newPhase(phase, t.time, t.time, '#FF9966', -1)
else:
testrun.inthepipe = False
data.setEnd(t.time)
continue
if(phase == 'post_resume'):
data.setEnd(t.time)
if(t.type == 'suspend_resume'):
# suspend_resume trace events have two types, begin and end
if(re.match('(?P<name>.*) begin$', t.name)):
isbegin = True
elif(re.match('(?P<name>.*) end$', t.name)):
isbegin = False
else:
continue
m = re.match('(?P<name>.*)\[(?P<val>[0-9]*)\] .*', t.name)
if(m):
val = m.group('val')
if val == '0':
name = m.group('name')
else:
name = m.group('name')+'['+val+']'
else:
m = re.match('(?P<name>.*) .*', t.name)
name = m.group('name')
# ignore these events
if(re.match('acpi_suspend\[.*', t.name) or
re.match('suspend_enter\[.*', name)):
continue
# -- phase changes --
# suspend_prepare start
if(re.match('dpm_prepare\[.*', t.name)):
phase = 'suspend_prepare'
if(not isbegin):
data.dmesg[phase]['end'] = t.time
continue
# suspend start
elif(re.match('dpm_suspend\[.*', t.name)):
phase = 'suspend'
data.setPhase(phase, t.time, isbegin)
continue
# suspend_late start
elif(re.match('dpm_suspend_late\[.*', t.name)):
phase = 'suspend_late'
data.setPhase(phase, t.time, isbegin)
continue
# suspend_noirq start
elif(re.match('dpm_suspend_noirq\[.*', t.name)):
phase = 'suspend_noirq'
data.setPhase(phase, t.time, isbegin)
if(not isbegin):
phase = 'suspend_machine'
data.dmesg[phase]['start'] = t.time
continue
# suspend_machine/resume_machine
elif(re.match('machine_suspend\[.*', t.name)):
if(isbegin):
phase = 'suspend_machine'
data.dmesg[phase]['end'] = t.time
data.tSuspended = t.time
else:
if(sysvals.suspendmode in ['mem', 'disk']):
data.dmesg['suspend_machine']['end'] = t.time
data.tSuspended = t.time
phase = 'resume_machine'
data.dmesg[phase]['start'] = t.time
data.tResumed = t.time
data.tLow = data.tResumed - data.tSuspended
continue
# resume_noirq start
elif(re.match('dpm_resume_noirq\[.*', t.name)):
phase = 'resume_noirq'
data.setPhase(phase, t.time, isbegin)
if(isbegin):
data.dmesg['resume_machine']['end'] = t.time
continue
# resume_early start
elif(re.match('dpm_resume_early\[.*', t.name)):
phase = 'resume_early'
data.setPhase(phase, t.time, isbegin)
continue
# resume start
elif(re.match('dpm_resume\[.*', t.name)):
phase = 'resume'
data.setPhase(phase, t.time, isbegin)
continue
# resume complete start
elif(re.match('dpm_complete\[.*', t.name)):
phase = 'resume_complete'
if(isbegin):
data.dmesg[phase]['start'] = t.time
continue
# is this trace event outside of the devices calls
if(data.isTraceEventOutsideDeviceCalls(pid, t.time)):
# global events (outside device calls) are simply graphed
if(name not in testrun.ttemp):
testrun.ttemp[name] = []
if(isbegin):
# create a new list entry
testrun.ttemp[name].append(\
{'begin': t.time, 'end': t.time})
else:
if(len(testrun.ttemp[name]) > 0):
# if an antry exists, assume this is its end
testrun.ttemp[name][-1]['end'] = t.time
elif(phase == 'post_resume'):
# post resume events can just have ends
testrun.ttemp[name].append({
'begin': data.dmesg[phase]['start'],
'end': t.time})
else:
if(isbegin):
data.addIntraDevTraceEvent('', name, pid, t.time)
else:
data.capIntraDevTraceEvent('', name, pid, t.time)
# device callback start
elif(t.type == 'device_pm_callback_start'):
m = re.match('(?P<drv>.*) (?P<d>.*), parent: *(?P<p>.*), .*',\
t.name);
if(not m):
continue
drv = m.group('drv')
n = m.group('d')
p = m.group('p')
if(n and p):
data.newAction(phase, n, pid, p, t.time, -1, drv)
# device callback finish
elif(t.type == 'device_pm_callback_end'):
m = re.match('(?P<drv>.*) (?P<d>.*), err.*', t.name);
if(not m):
continue
n = m.group('d')
list = data.dmesg[phase]['list']
if(n in list):
dev = list[n]
dev['length'] = t.time - dev['start']
dev['end'] = t.time
# callgraph processing
elif sysvals.usecallgraph:
# this shouldn't happen, but JIC, ignore callgraph data post-res
if(phase == 'post_resume'):
continue
# create a callgraph object for the data
if(pid not in testrun.ftemp):
testrun.ftemp[pid] = []
testrun.ftemp[pid].append(FTraceCallGraph())
# when the call is finished, see which device matches it
cg = testrun.ftemp[pid][-1]
if(cg.addLine(t, m)):
testrun.ftemp[pid].append(FTraceCallGraph())
tf.close()
for test in testruns:
# add the traceevent data to the device hierarchy
if(sysvals.usetraceevents):
for name in test.ttemp:
for event in test.ttemp[name]:
begin = event['begin']
end = event['end']
# if event starts before timeline start, expand timeline
if(begin < test.data.start):
test.data.setStart(begin)
# if event ends after timeline end, expand the timeline
if(end > test.data.end):
test.data.setEnd(end)
test.data.newActionGlobal(name, begin, end)
# add the callgraph data to the device hierarchy
borderphase = {
'dpm_prepare': 'suspend_prepare',
'dpm_complete': 'resume_complete'
}
for pid in test.ftemp:
for cg in test.ftemp[pid]:
if len(cg.list) < 2:
continue
if(not cg.sanityCheck()):
id = 'task %s cpu %s' % (pid, m.group('cpu'))
vprint('Sanity check failed for '+\
id+', ignoring this callback')
continue
callstart = cg.start
callend = cg.end
if(cg.list[0].name in borderphase):
p = borderphase[cg.list[0].name]
list = test.data.dmesg[p]['list']
for devname in list:
dev = list[devname]
if(pid == dev['pid'] and
callstart <= dev['start'] and
callend >= dev['end']):
dev['ftrace'] = cg.slice(dev['start'], dev['end'])
continue
if(cg.list[0].name != 'dpm_run_callback'):
continue
for p in test.data.phases:
if(test.data.dmesg[p]['start'] <= callstart and
callstart <= test.data.dmesg[p]['end']):
list = test.data.dmesg[p]['list']
for devname in list:
dev = list[devname]
if(pid == dev['pid'] and
callstart <= dev['start'] and
callend >= dev['end']):
dev['ftrace'] = cg
break
# fill in any missing phases
for data in testdata:
lp = data.phases[0]
for p in data.phases:
if(data.dmesg[p]['start'] < 0 and data.dmesg[p]['end'] < 0):
print('WARNING: phase "%s" is missing!' % p)
if(data.dmesg[p]['start'] < 0):
data.dmesg[p]['start'] = data.dmesg[lp]['end']
if(p == 'resume_machine'):
data.tSuspended = data.dmesg[lp]['end']
data.tResumed = data.dmesg[lp]['end']
data.tLow = 0
if(data.dmesg[p]['end'] < 0):
data.dmesg[p]['end'] = data.dmesg[p]['start']
lp = p
if(len(sysvals.devicefilter) > 0):
data.deviceFilter(sysvals.devicefilter)
data.fixupInitcallsThatDidntReturn()
if(sysvals.verbose):
data.printDetails()
# add the time in between the tests as a new phase so we can see it
if(len(testdata) > 1):
t1e = testdata[0].getEnd()
t2s = testdata[-1].getStart()
testdata[-1].newPhaseWithSingleAction('user mode', \
'user mode', t1e, t2s, '#FF9966')
return testdata
# Function: loadKernelLog
# Description:
# [deprecated for kernel 3.15.0 or newer]
# load the dmesg file into memory and fix up any ordering issues
# The dmesg filename is taken from sysvals
# Output:
# An array of empty Data objects with only their dmesgtext attributes set
def loadKernelLog():
global sysvals
vprint('Analyzing the dmesg data...')
if(os.path.exists(sysvals.dmesgfile) == False):
doError('%s doesnt exist' % sysvals.dmesgfile, False)
# there can be multiple test runs in a single file delineated by stamps
testruns = []
data = 0
lf = open(sysvals.dmesgfile, 'r')
for line in lf:
line = line.replace('\r\n', '')
idx = line.find('[')
if idx > 1:
line = line[idx:]
m = re.match(sysvals.stampfmt, line)
if(m):
if(data):
testruns.append(data)
data = Data(len(testruns))
parseStamp(m, data)
continue
if(not data):
continue
m = re.match(sysvals.firmwarefmt, line)
if(m):
data.fwSuspend = int(m.group('s'))
data.fwResume = int(m.group('r'))
if(data.fwSuspend > 0 or data.fwResume > 0):
data.fwValid = True
continue
m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(m):
data.dmesgtext.append(line)
if(re.match('ACPI: resume from mwait', m.group('msg'))):
print('NOTE: This suspend appears to be freeze rather than'+\
' %s, it will be treated as such' % sysvals.suspendmode)
sysvals.suspendmode = 'freeze'
else:
vprint('ignoring dmesg line: %s' % line.replace('\n', ''))
testruns.append(data)
lf.close()
if(not data):
print('ERROR: analyze_suspend header missing from dmesg log')
sys.exit()
# fix lines with same timestamp/function with the call and return swapped
for data in testruns:
last = ''
for line in data.dmesgtext:
mc = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) calling '+\
'(?P<f>.*)\+ @ .*, parent: .*', line)
mr = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) call '+\
'(?P<f>.*)\+ returned .* after (?P<dt>.*) usecs', last)
if(mc and mr and (mc.group('t') == mr.group('t')) and
(mc.group('f') == mr.group('f'))):
i = data.dmesgtext.index(last)
j = data.dmesgtext.index(line)
data.dmesgtext[i] = line
data.dmesgtext[j] = last
last = line
return testruns
# Function: parseKernelLog
# Description:
# [deprecated for kernel 3.15.0 or newer]
# Analyse a dmesg log output file generated from this app during
# the execution phase. Create a set of device structures in memory
# for subsequent formatting in the html output file
# This call is only for legacy support on kernels where the ftrace
# data lacks the suspend_resume or device_pm_callbacks trace events.
# Arguments:
# data: an empty Data object (with dmesgtext) obtained from loadKernelLog
# Output:
# The filled Data object
def parseKernelLog(data):
global sysvals
phase = 'suspend_runtime'
if(data.fwValid):
vprint('Firmware Suspend = %u ns, Firmware Resume = %u ns' % \
(data.fwSuspend, data.fwResume))
# dmesg phase match table
dm = {
'suspend_prepare': 'PM: Syncing filesystems.*',
'suspend': 'PM: Entering [a-z]* sleep.*',
'suspend_late': 'PM: suspend of devices complete after.*',
'suspend_noirq': 'PM: late suspend of devices complete after.*',
'suspend_machine': 'PM: noirq suspend of devices complete after.*',
'resume_machine': 'ACPI: Low-level resume complete.*',
'resume_noirq': 'ACPI: Waking up from system sleep state.*',
'resume_early': 'PM: noirq resume of devices complete after.*',
'resume': 'PM: early resume of devices complete after.*',
'resume_complete': 'PM: resume of devices complete after.*',
'post_resume': '.*Restarting tasks \.\.\..*',
}
if(sysvals.suspendmode == 'standby'):
dm['resume_machine'] = 'PM: Restoring platform NVS memory'
elif(sysvals.suspendmode == 'disk'):
dm['suspend_late'] = 'PM: freeze of devices complete after.*'
dm['suspend_noirq'] = 'PM: late freeze of devices complete after.*'
dm['suspend_machine'] = 'PM: noirq freeze of devices complete after.*'
dm['resume_machine'] = 'PM: Restoring platform NVS memory'
dm['resume_early'] = 'PM: noirq restore of devices complete after.*'
dm['resume'] = 'PM: early restore of devices complete after.*'
dm['resume_complete'] = 'PM: restore of devices complete after.*'
elif(sysvals.suspendmode == 'freeze'):
dm['resume_machine'] = 'ACPI: resume from mwait'
# action table (expected events that occur and show up in dmesg)
at = {
'sync_filesystems': {
'smsg': 'PM: Syncing filesystems.*',
'emsg': 'PM: Preparing system for mem sleep.*' },
'freeze_user_processes': {
'smsg': 'Freezing user space processes .*',
'emsg': 'Freezing remaining freezable tasks.*' },
'freeze_tasks': {
'smsg': 'Freezing remaining freezable tasks.*',
'emsg': 'PM: Entering (?P<mode>[a-z,A-Z]*) sleep.*' },
'ACPI prepare': {
'smsg': 'ACPI: Preparing to enter system sleep state.*',
'emsg': 'PM: Saving platform NVS memory.*' },
'PM vns': {
'smsg': 'PM: Saving platform NVS memory.*',
'emsg': 'Disabling non-boot CPUs .*' },
}
t0 = -1.0
cpu_start = -1.0
prevktime = -1.0
actions = dict()
for line in data.dmesgtext:
# -- preprocessing --
# parse each dmesg line into the time and message
m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(m):
val = m.group('ktime')
try:
ktime = float(val)
except:
doWarning('INVALID DMESG LINE: '+\
line.replace('\n', ''), 'dmesg')
continue
msg = m.group('msg')
# initialize data start to first line time
if t0 < 0:
data.setStart(ktime)
t0 = ktime
else:
continue
# hack for determining resume_machine end for freeze
if(not sysvals.usetraceevents and sysvals.suspendmode == 'freeze' \
and phase == 'resume_machine' and \
re.match('calling (?P<f>.*)\+ @ .*, parent: .*', msg)):
data.dmesg['resume_machine']['end'] = ktime
phase = 'resume_noirq'
data.dmesg[phase]['start'] = ktime
# -- phase changes --
# suspend start
if(re.match(dm['suspend_prepare'], msg)):
phase = 'suspend_prepare'
data.dmesg[phase]['start'] = ktime
data.setStart(ktime)
# suspend start
elif(re.match(dm['suspend'], msg)):
data.dmesg['suspend_prepare']['end'] = ktime
phase = 'suspend'
data.dmesg[phase]['start'] = ktime
# suspend_late start
elif(re.match(dm['suspend_late'], msg)):
data.dmesg['suspend']['end'] = ktime
phase = 'suspend_late'
data.dmesg[phase]['start'] = ktime
# suspend_noirq start
elif(re.match(dm['suspend_noirq'], msg)):
data.dmesg['suspend_late']['end'] = ktime
phase = 'suspend_noirq'
data.dmesg[phase]['start'] = ktime
# suspend_machine start
elif(re.match(dm['suspend_machine'], msg)):
data.dmesg['suspend_noirq']['end'] = ktime
phase = 'suspend_machine'
data.dmesg[phase]['start'] = ktime
# resume_machine start
elif(re.match(dm['resume_machine'], msg)):
if(sysvals.suspendmode in ['freeze', 'standby']):
data.tSuspended = prevktime
data.dmesg['suspend_machine']['end'] = prevktime
else:
data.tSuspended = ktime
data.dmesg['suspend_machine']['end'] = ktime
phase = 'resume_machine'
data.tResumed = ktime
data.tLow = data.tResumed - data.tSuspended
data.dmesg[phase]['start'] = ktime
# resume_noirq start
elif(re.match(dm['resume_noirq'], msg)):
data.dmesg['resume_machine']['end'] = ktime
phase = 'resume_noirq'
data.dmesg[phase]['start'] = ktime
# resume_early start
elif(re.match(dm['resume_early'], msg)):
data.dmesg['resume_noirq']['end'] = ktime
phase = 'resume_early'
data.dmesg[phase]['start'] = ktime
# resume start
elif(re.match(dm['resume'], msg)):
data.dmesg['resume_early']['end'] = ktime
phase = 'resume'
data.dmesg[phase]['start'] = ktime
# resume complete start
elif(re.match(dm['resume_complete'], msg)):
data.dmesg['resume']['end'] = ktime
phase = 'resume_complete'
data.dmesg[phase]['start'] = ktime
# post resume start
elif(re.match(dm['post_resume'], msg)):
data.dmesg['resume_complete']['end'] = ktime
data.setEnd(ktime)
phase = 'post_resume'
break
# -- device callbacks --
if(phase in data.phases):
# device init call
if(re.match('calling (?P<f>.*)\+ @ .*, parent: .*', msg)):
sm = re.match('calling (?P<f>.*)\+ @ '+\
'(?P<n>.*), parent: (?P<p>.*)', msg);
f = sm.group('f')
n = sm.group('n')
p = sm.group('p')
if(f and n and p):
data.newAction(phase, f, int(n), p, ktime, -1, '')
# device init return
elif(re.match('call (?P<f>.*)\+ returned .* after '+\
'(?P<t>.*) usecs', msg)):
sm = re.match('call (?P<f>.*)\+ returned .* after '+\
'(?P<t>.*) usecs(?P<a>.*)', msg);
f = sm.group('f')
t = sm.group('t')
list = data.dmesg[phase]['list']
if(f in list):
dev = list[f]
dev['length'] = int(t)
dev['end'] = ktime
# -- non-devicecallback actions --
# if trace events are not available, these are better than nothing
if(not sysvals.usetraceevents):
# look for known actions
for a in at:
if(re.match(at[a]['smsg'], msg)):
if(a not in actions):
actions[a] = []
actions[a].append({'begin': ktime, 'end': ktime})
if(re.match(at[a]['emsg'], msg)):
actions[a][-1]['end'] = ktime
# now look for CPU on/off events
if(re.match('Disabling non-boot CPUs .*', msg)):
# start of first cpu suspend
cpu_start = ktime
elif(re.match('Enabling non-boot CPUs .*', msg)):
# start of first cpu resume
cpu_start = ktime
elif(re.match('smpboot: CPU (?P<cpu>[0-9]*) is now offline', msg)):
# end of a cpu suspend, start of the next
m = re.match('smpboot: CPU (?P<cpu>[0-9]*) is now offline', msg)
cpu = 'CPU'+m.group('cpu')
if(cpu not in actions):
actions[cpu] = []
actions[cpu].append({'begin': cpu_start, 'end': ktime})
cpu_start = ktime
elif(re.match('CPU(?P<cpu>[0-9]*) is up', msg)):
# end of a cpu resume, start of the next
m = re.match('CPU(?P<cpu>[0-9]*) is up', msg)
cpu = 'CPU'+m.group('cpu')
if(cpu not in actions):
actions[cpu] = []
actions[cpu].append({'begin': cpu_start, 'end': ktime})
cpu_start = ktime
prevktime = ktime
# fill in any missing phases
lp = data.phases[0]
for p in data.phases:
if(data.dmesg[p]['start'] < 0 and data.dmesg[p]['end'] < 0):
print('WARNING: phase "%s" is missing, something went wrong!' % p)
print(' In %s, this dmesg line denotes the start of %s:' % \
(sysvals.suspendmode, p))
print(' "%s"' % dm[p])
if(data.dmesg[p]['start'] < 0):
data.dmesg[p]['start'] = data.dmesg[lp]['end']
if(p == 'resume_machine'):
data.tSuspended = data.dmesg[lp]['end']
data.tResumed = data.dmesg[lp]['end']
data.tLow = 0
if(data.dmesg[p]['end'] < 0):
data.dmesg[p]['end'] = data.dmesg[p]['start']
lp = p
# fill in any actions we've found
for name in actions:
for event in actions[name]:
begin = event['begin']
end = event['end']
# if event starts before timeline start, expand timeline
if(begin < data.start):
data.setStart(begin)
# if event ends after timeline end, expand the timeline
if(end > data.end):
data.setEnd(end)
data.newActionGlobal(name, begin, end)
if(sysvals.verbose):
data.printDetails()
if(len(sysvals.devicefilter) > 0):
data.deviceFilter(sysvals.devicefilter)
data.fixupInitcallsThatDidntReturn()
return True
# Function: setTimelineRows
# Description:
# Organize the timeline entries into the smallest
# number of rows possible, with no entry overlapping
# Arguments:
# list: the list of devices/actions for a single phase
# sortedkeys: cronologically sorted key list to use
# Output:
# The total number of rows needed to display this phase of the timeline
def setTimelineRows(list, sortedkeys):
# clear all rows and set them to undefined
remaining = len(list)
rowdata = dict()
row = 0
for item in list:
list[item]['row'] = -1
# try to pack each row with as many ranges as possible
while(remaining > 0):
if(row not in rowdata):
rowdata[row] = []
for item in sortedkeys:
if(list[item]['row'] < 0):
s = list[item]['start']
e = list[item]['end']
valid = True
for ritem in rowdata[row]:
rs = ritem['start']
re = ritem['end']
if(not (((s <= rs) and (e <= rs)) or
((s >= re) and (e >= re)))):
valid = False
break
if(valid):
rowdata[row].append(list[item])
list[item]['row'] = row
remaining -= 1
row += 1
return row
# Function: createTimeScale
# Description:
# Create the timescale header for the html timeline
# Arguments:
# t0: start time (suspend begin)
# tMax: end time (resume end)
# tSuspend: time when suspend occurs, i.e. the zero time
# Output:
# The html code needed to display the time scale
def createTimeScale(t0, tMax, tSuspended):
timescale = '<div class="t" style="right:{0}%">{1}</div>\n'
output = '<div id="timescale">\n'
# set scale for timeline
tTotal = tMax - t0
tS = 0.1
if(tTotal <= 0):
return output
if(tTotal > 4):
tS = 1
if(tSuspended < 0):
for i in range(int(tTotal/tS)+1):
pos = '%0.3f' % (100 - ((float(i)*tS*100)/tTotal))
if(i > 0):
val = '%0.fms' % (float(i)*tS*1000)
else:
val = ''
output += timescale.format(pos, val)
else:
tSuspend = tSuspended - t0
divTotal = int(tTotal/tS) + 1
divSuspend = int(tSuspend/tS)
s0 = (tSuspend - tS*divSuspend)*100/tTotal
for i in range(divTotal):
pos = '%0.3f' % (100 - ((float(i)*tS*100)/tTotal) - s0)
if((i == 0) and (s0 < 3)):
val = ''
elif(i == divSuspend):
val = 'S/R'
else:
val = '%0.fms' % (float(i-divSuspend)*tS*1000)
output += timescale.format(pos, val)
output += '</div>\n'
return output
# Function: createHTMLSummarySimple
# Description:
# Create summary html file for a series of tests
# Arguments:
# testruns: array of Data objects from parseTraceLog
def createHTMLSummarySimple(testruns, htmlfile):
global sysvals
# print out the basic summary of all the tests
hf = open(htmlfile, 'w')
# write the html header first (html head, css code, up to body start)
html = '<!DOCTYPE html>\n<html>\n<head>\n\
<meta http-equiv="content-type" content="text/html; charset=UTF-8">\n\
<title>AnalyzeSuspend Summary</title>\n\
<style type=\'text/css\'>\n\
body {overflow-y: scroll;}\n\
.stamp {width: 100%;text-align:center;background-color:#495E09;line-height:30px;color:white;font: 25px Arial;}\n\
table {width:100%;border-collapse: collapse;}\n\
.summary {font: 22px Arial;border:1px solid;}\n\
th {border: 1px solid black;background-color:#A7C942;color:white;}\n\
td {text-align: center;}\n\
tr.alt td {background-color:#EAF2D3;}\n\
tr.avg td {background-color:#BDE34C;}\n\
a:link {color: #90B521;}\n\
a:visited {color: #495E09;}\n\
a:hover {color: #B1DF28;}\n\
a:active {color: #FFFFFF;}\n\
</style>\n</head>\n<body>\n'
# group test header
count = len(testruns)
headline_stamp = '<div class="stamp">{0} {1} {2} {3} ({4} tests)</div>\n'
html += headline_stamp.format(sysvals.stamp['host'],
sysvals.stamp['kernel'], sysvals.stamp['mode'],
sysvals.stamp['time'], count)
# check to see if all the tests have the same value
stampcolumns = False
for data in testruns:
if diffStamp(sysvals.stamp, data.stamp):
stampcolumns = True
break
th = '\t<th>{0}</th>\n'
td = '\t<td>{0}</td>\n'
tdlink = '\t<td><a href="{0}">Click Here</a></td>\n'
# table header
html += '<table class="summary">\n<tr>\n'
html += th.format("Test #")
if stampcolumns:
html += th.format("Hostname")
html += th.format("Kernel Version")
html += th.format("Suspend Mode")
html += th.format("Test Time")
html += th.format("Suspend Time")
html += th.format("Resume Time")
html += th.format("Detail")
html += '</tr>\n'
# test data, 1 row per test
sTimeAvg = 0.0
rTimeAvg = 0.0
num = 1
for data in testruns:
# data.end is the end of post_resume
resumeEnd = data.dmesg['resume_complete']['end']
if num % 2 == 1:
html += '<tr class="alt">\n'
else:
html += '<tr>\n'
# test num
html += td.format("test %d" % num)
num += 1
if stampcolumns:
# host name
val = "unknown"
if('host' in data.stamp):
val = data.stamp['host']
html += td.format(val)
# host kernel
val = "unknown"
if('kernel' in data.stamp):
val = data.stamp['kernel']
html += td.format(val)
# suspend mode
val = "unknown"
if('mode' in data.stamp):
val = data.stamp['mode']
html += td.format(val)
# test time
val = "unknown"
if('time' in data.stamp):
val = data.stamp['time']
html += td.format(val)
# suspend time
sTime = (data.tSuspended - data.start)*1000
sTimeAvg += sTime
html += td.format("%3.3f ms" % sTime)
# resume time
rTime = (resumeEnd - data.tResumed)*1000
rTimeAvg += rTime
html += td.format("%3.3f ms" % rTime)
# link to the output html
html += tdlink.format(data.outfile)
html += '</tr>\n'
# last line: test average
if(count > 0):
sTimeAvg /= count
rTimeAvg /= count
html += '<tr class="avg">\n'
html += td.format('Average') # name
if stampcolumns:
html += td.format('') # host
html += td.format('') # kernel
html += td.format('') # mode
html += td.format('') # time
html += td.format("%3.3f ms" % sTimeAvg) # suspend time
html += td.format("%3.3f ms" % rTimeAvg) # resume time
html += td.format('') # output link
html += '</tr>\n'
# flush the data to file
hf.write(html+'</table>\n')
hf.write('</body>\n</html>\n')
hf.close()
# Function: createHTML
# Description:
# Create the output html file from the resident test data
# Arguments:
# testruns: array of Data objects from parseKernelLog or parseTraceLog
# Output:
# True if the html file was created, false if it failed
def createHTML(testruns):
global sysvals
for data in testruns:
data.normalizeTime(testruns[-1].tSuspended)
x2changes = ['', 'absolute']
if len(testruns) > 1:
x2changes = ['1', 'relative']
# html function templates
headline_stamp = '<div class="stamp">{0} {1} {2} {3}</div>\n'
html_devlist1 = '<button id="devlist1" class="devlist" style="float:left;">Device Detail%s</button>' % x2changes[0]
html_zoombox = '<center><button id="zoomin">ZOOM IN</button><button id="zoomout">ZOOM OUT</button><button id="zoomdef">ZOOM 1:1</button></center>\n'
html_devlist2 = '<button id="devlist2" class="devlist" style="float:right;">Device Detail2</button>\n'
html_timeline = '<div id="dmesgzoombox" class="zoombox">\n<div id="{0}" class="timeline" style="height:{1}px">\n'
html_device = '<div id="{0}" title="{1}" class="thread" style="left:{2}%;top:{3}%;height:{4}%;width:{5}%;">{6}</div>\n'
html_traceevent = '<div title="{0}" class="traceevent" style="left:{1}%;top:{2}%;height:{3}%;width:{4}%;border:1px solid {5};background-color:{5}">{6}</div>\n'
html_phase = '<div class="phase" style="left:{0}%;width:{1}%;top:{2}%;height:{3}%;background-color:{4}">{5}</div>\n'
html_phaselet = '<div id="{0}" class="phaselet" style="left:{1}%;width:{2}%;background-color:{3}"></div>\n'
html_legend = '<div class="square" style="left:{0}%;background-color:{1}"> {2}</div>\n'
html_timetotal = '<table class="time1">\n<tr>'\
'<td class="green">{2} Suspend Time: <b>{0} ms</b></td>'\
'<td class="yellow">{2} Resume Time: <b>{1} ms</b></td>'\
'</tr>\n</table>\n'
html_timetotal2 = '<table class="time1">\n<tr>'\
'<td class="green">{3} Suspend Time: <b>{0} ms</b></td>'\
'<td class="gray">'+sysvals.suspendmode+' time: <b>{1} ms</b></td>'\
'<td class="yellow">{3} Resume Time: <b>{2} ms</b></td>'\
'</tr>\n</table>\n'
html_timegroups = '<table class="time2">\n<tr>'\
'<td class="green">{4}Kernel Suspend: {0} ms</td>'\
'<td class="purple">{4}Firmware Suspend: {1} ms</td>'\
'<td class="purple">{4}Firmware Resume: {2} ms</td>'\
'<td class="yellow">{4}Kernel Resume: {3} ms</td>'\
'</tr>\n</table>\n'
# device timeline
vprint('Creating Device Timeline...')
devtl = Timeline()
# Generate the header for this timeline
textnum = ['First', 'Second']
for data in testruns:
tTotal = data.end - data.start
tEnd = data.dmesg['resume_complete']['end']
if(tTotal == 0):
print('ERROR: No timeline data')
sys.exit()
if(data.tLow > 0):
low_time = '%.0f'%(data.tLow*1000)
if data.fwValid:
suspend_time = '%.0f'%((data.tSuspended-data.start)*1000 + \
(data.fwSuspend/1000000.0))
resume_time = '%.0f'%((tEnd-data.tSuspended)*1000 + \
(data.fwResume/1000000.0))
testdesc1 = 'Total'
testdesc2 = ''
if(len(testruns) > 1):
testdesc1 = testdesc2 = textnum[data.testnumber]
testdesc2 += ' '
if(data.tLow == 0):
thtml = html_timetotal.format(suspend_time, \
resume_time, testdesc1)
else:
thtml = html_timetotal2.format(suspend_time, low_time, \
resume_time, testdesc1)
devtl.html['timeline'] += thtml
sktime = '%.3f'%((data.dmesg['suspend_machine']['end'] - \
data.getStart())*1000)
sftime = '%.3f'%(data.fwSuspend / 1000000.0)
rftime = '%.3f'%(data.fwResume / 1000000.0)
rktime = '%.3f'%((data.getEnd() - \
data.dmesg['resume_machine']['start'])*1000)
devtl.html['timeline'] += html_timegroups.format(sktime, \
sftime, rftime, rktime, testdesc2)
else:
suspend_time = '%.0f'%((data.tSuspended-data.start)*1000)
resume_time = '%.0f'%((tEnd-data.tSuspended)*1000)
testdesc = 'Kernel'
if(len(testruns) > 1):
testdesc = textnum[data.testnumber]+' '+testdesc
if(data.tLow == 0):
thtml = html_timetotal.format(suspend_time, \
resume_time, testdesc)
else:
thtml = html_timetotal2.format(suspend_time, low_time, \
resume_time, testdesc)
devtl.html['timeline'] += thtml
# time scale for potentially multiple datasets
t0 = testruns[0].start
tMax = testruns[-1].end
tSuspended = testruns[-1].tSuspended
tTotal = tMax - t0
# determine the maximum number of rows we need to draw
timelinerows = 0
for data in testruns:
for phase in data.dmesg:
list = data.dmesg[phase]['list']
rows = setTimelineRows(list, list)
data.dmesg[phase]['row'] = rows
if(rows > timelinerows):
timelinerows = rows
# calculate the timeline height and create bounding box, add buttons
devtl.setRows(timelinerows + 1)
devtl.html['timeline'] += html_devlist1
if len(testruns) > 1:
devtl.html['timeline'] += html_devlist2
devtl.html['timeline'] += html_zoombox
devtl.html['timeline'] += html_timeline.format('dmesg', devtl.height)
# draw the colored boxes for each of the phases
for data in testruns:
for b in data.dmesg:
phase = data.dmesg[b]
length = phase['end']-phase['start']
left = '%.3f' % (((phase['start']-t0)*100.0)/tTotal)
width = '%.3f' % ((length*100.0)/tTotal)
devtl.html['timeline'] += html_phase.format(left, width, \
'%.3f'%devtl.scaleH, '%.3f'%(100-devtl.scaleH), \
data.dmesg[b]['color'], '')
# draw the time scale, try to make the number of labels readable
devtl.html['scale'] = createTimeScale(t0, tMax, tSuspended)
devtl.html['timeline'] += devtl.html['scale']
for data in testruns:
for b in data.dmesg:
phaselist = data.dmesg[b]['list']
for d in phaselist:
name = d
drv = ''
dev = phaselist[d]
if(d in sysvals.altdevname):
name = sysvals.altdevname[d]
if('drv' in dev and dev['drv']):
drv = ' {%s}' % dev['drv']
height = (100.0 - devtl.scaleH)/data.dmesg[b]['row']
top = '%.3f' % ((dev['row']*height) + devtl.scaleH)
left = '%.3f' % (((dev['start']-t0)*100)/tTotal)
width = '%.3f' % (((dev['end']-dev['start'])*100)/tTotal)
length = ' (%0.3f ms) ' % ((dev['end']-dev['start'])*1000)
color = 'rgba(204,204,204,0.5)'
devtl.html['timeline'] += html_device.format(dev['id'], \
d+drv+length+b, left, top, '%.3f'%height, width, name+drv)
# draw any trace events found
for data in testruns:
for b in data.dmesg:
phaselist = data.dmesg[b]['list']
for name in phaselist:
dev = phaselist[name]
if('traceevents' in dev):
vprint('Debug trace events found for device %s' % name)
vprint('%20s %20s %10s %8s' % ('action', \
'name', 'time(ms)', 'length(ms)'))
for e in dev['traceevents']:
vprint('%20s %20s %10.3f %8.3f' % (e.action, \
e.name, e.time*1000, e.length*1000))
height = (100.0 - devtl.scaleH)/data.dmesg[b]['row']
top = '%.3f' % ((dev['row']*height) + devtl.scaleH)
left = '%.3f' % (((e.time-t0)*100)/tTotal)
width = '%.3f' % (e.length*100/tTotal)
color = 'rgba(204,204,204,0.5)'
devtl.html['timeline'] += \
html_traceevent.format(e.action+' '+e.name, \
left, top, '%.3f'%height, \
width, e.color, '')
# timeline is finished
devtl.html['timeline'] += '</div>\n</div>\n'
# draw a legend which describes the phases by color
data = testruns[-1]
devtl.html['legend'] = '<div class="legend">\n'
pdelta = 100.0/len(data.phases)
pmargin = pdelta / 4.0
for phase in data.phases:
order = '%.2f' % ((data.dmesg[phase]['order'] * pdelta) + pmargin)
name = string.replace(phase, '_', ' ')
devtl.html['legend'] += html_legend.format(order, \
data.dmesg[phase]['color'], name)
devtl.html['legend'] += '</div>\n'
hf = open(sysvals.htmlfile, 'w')
thread_height = 0
# write the html header first (html head, css code, up to body start)
html_header = '<!DOCTYPE html>\n<html>\n<head>\n\
<meta http-equiv="content-type" content="text/html; charset=UTF-8">\n\
<title>AnalyzeSuspend</title>\n\
<style type=\'text/css\'>\n\
body {overflow-y: scroll;}\n\
.stamp {width: 100%;text-align:center;background-color:gray;line-height:30px;color:white;font: 25px Arial;}\n\
.callgraph {margin-top: 30px;box-shadow: 5px 5px 20px black;}\n\
.callgraph article * {padding-left: 28px;}\n\
h1 {color:black;font: bold 30px Times;}\n\
t0 {color:black;font: bold 30px Times;}\n\
t1 {color:black;font: 30px Times;}\n\
t2 {color:black;font: 25px Times;}\n\
t3 {color:black;font: 20px Times;white-space:nowrap;}\n\
t4 {color:black;font: bold 30px Times;line-height:60px;white-space:nowrap;}\n\
table {width:100%;}\n\
.gray {background-color:rgba(80,80,80,0.1);}\n\
.green {background-color:rgba(204,255,204,0.4);}\n\
.purple {background-color:rgba(128,0,128,0.2);}\n\
.yellow {background-color:rgba(255,255,204,0.4);}\n\
.time1 {font: 22px Arial;border:1px solid;}\n\
.time2 {font: 15px Arial;border-bottom:1px solid;border-left:1px solid;border-right:1px solid;}\n\
td {text-align: center;}\n\
r {color:#500000;font:15px Tahoma;}\n\
n {color:#505050;font:15px Tahoma;}\n\
.tdhl {color: red;}\n\
.hide {display: none;}\n\
.pf {display: none;}\n\
.pf:checked + label {background: url(\'data:image/svg+xml;utf,<?xml version="1.0" standalone="no"?><svg xmlns="http://www.w3.org/2000/svg" height="18" width="18" version="1.1"><circle cx="9" cy="9" r="8" stroke="black" stroke-width="1" fill="white"/><rect x="4" y="8" width="10" height="2" style="fill:black;stroke-width:0"/><rect x="8" y="4" width="2" height="10" style="fill:black;stroke-width:0"/></svg>\') no-repeat left center;}\n\
.pf:not(:checked) ~ label {background: url(\'data:image/svg+xml;utf,<?xml version="1.0" standalone="no"?><svg xmlns="http://www.w3.org/2000/svg" height="18" width="18" version="1.1"><circle cx="9" cy="9" r="8" stroke="black" stroke-width="1" fill="white"/><rect x="4" y="8" width="10" height="2" style="fill:black;stroke-width:0"/></svg>\') no-repeat left center;}\n\
.pf:checked ~ *:not(:nth-child(2)) {display: none;}\n\
.zoombox {position: relative; width: 100%; overflow-x: scroll;}\n\
.timeline {position: relative; font-size: 14px;cursor: pointer;width: 100%; overflow: hidden; background-color:#dddddd;}\n\
.thread {position: absolute; height: '+'%.3f'%thread_height+'%; overflow: hidden; line-height: 30px; border:1px solid;text-align:center;white-space:nowrap;background-color:rgba(204,204,204,0.5);}\n\
.thread:hover {background-color:white;border:1px solid red;z-index:10;}\n\
.hover {background-color:white;border:1px solid red;z-index:10;}\n\
.traceevent {position: absolute;opacity: 0.3;height: '+'%.3f'%thread_height+'%;width:0;overflow:hidden;line-height:30px;text-align:center;white-space:nowrap;}\n\
.phase {position: absolute;overflow: hidden;border:0px;text-align:center;}\n\
.phaselet {position:absolute;overflow:hidden;border:0px;text-align:center;height:100px;font-size:24px;}\n\
.t {position:absolute;top:0%;height:100%;border-right:1px solid black;}\n\
.legend {position: relative; width: 100%; height: 40px; text-align: center;margin-bottom:20px}\n\
.legend .square {position:absolute;top:10px; width: 0px;height: 20px;border:1px solid;padding-left:20px;}\n\
button {height:40px;width:200px;margin-bottom:20px;margin-top:20px;font-size:24px;}\n\
.devlist {position:'+x2changes[1]+';width:190px;}\n\
#devicedetail {height:100px;box-shadow: 5px 5px 20px black;}\n\
</style>\n</head>\n<body>\n'
hf.write(html_header)
# write the test title and general info header
if(sysvals.stamp['time'] != ""):
hf.write(headline_stamp.format(sysvals.stamp['host'],
sysvals.stamp['kernel'], sysvals.stamp['mode'], \
sysvals.stamp['time']))
# write the device timeline
hf.write(devtl.html['timeline'])
hf.write(devtl.html['legend'])
hf.write('<div id="devicedetailtitle"></div>\n')
hf.write('<div id="devicedetail" style="display:none;">\n')
# draw the colored boxes for the device detail section
for data in testruns:
hf.write('<div id="devicedetail%d">\n' % data.testnumber)
for b in data.phases:
phase = data.dmesg[b]
length = phase['end']-phase['start']
left = '%.3f' % (((phase['start']-t0)*100.0)/tTotal)
width = '%.3f' % ((length*100.0)/tTotal)
hf.write(html_phaselet.format(b, left, width, \
data.dmesg[b]['color']))
hf.write('</div>\n')
hf.write('</div>\n')
# write the ftrace data (callgraph)
data = testruns[-1]
if(sysvals.usecallgraph):
hf.write('<section id="callgraphs" class="callgraph">\n')
# write out the ftrace data converted to html
html_func_top = '<article id="{0}" class="atop" style="background-color:{1}">\n<input type="checkbox" class="pf" id="f{2}" checked/><label for="f{2}">{3} {4}</label>\n'
html_func_start = '<article>\n<input type="checkbox" class="pf" id="f{0}" checked/><label for="f{0}">{1} {2}</label>\n'
html_func_end = '</article>\n'
html_func_leaf = '<article>{0} {1}</article>\n'
num = 0
for p in data.phases:
list = data.dmesg[p]['list']
for devname in data.sortedDevices(p):
if('ftrace' not in list[devname]):
continue
name = devname
if(devname in sysvals.altdevname):
name = sysvals.altdevname[devname]
devid = list[devname]['id']
cg = list[devname]['ftrace']
flen = '<r>(%.3f ms @ %.3f to %.3f)</r>' % \
((cg.end - cg.start)*1000, cg.start*1000, cg.end*1000)
hf.write(html_func_top.format(devid, data.dmesg[p]['color'], \
num, name+' '+p, flen))
num += 1
for line in cg.list:
if(line.length < 0.000000001):
flen = ''
else:
flen = '<n>(%.3f ms @ %.3f)</n>' % (line.length*1000, \
line.time*1000)
if(line.freturn and line.fcall):
hf.write(html_func_leaf.format(line.name, flen))
elif(line.freturn):
hf.write(html_func_end)
else:
hf.write(html_func_start.format(num, line.name, flen))
num += 1
hf.write(html_func_end)
hf.write('\n\n </section>\n')
# write the footer and close
addScriptCode(hf, testruns)
hf.write('</body>\n</html>\n')
hf.close()
return True
# Function: addScriptCode
# Description:
# Adds the javascript code to the output html
# Arguments:
# hf: the open html file pointer
# testruns: array of Data objects from parseKernelLog or parseTraceLog
def addScriptCode(hf, testruns):
t0 = (testruns[0].start - testruns[-1].tSuspended) * 1000
tMax = (testruns[-1].end - testruns[-1].tSuspended) * 1000
# create an array in javascript memory with the device details
detail = ' var devtable = [];\n'
for data in testruns:
topo = data.deviceTopology()
detail += ' devtable[%d] = "%s";\n' % (data.testnumber, topo)
detail += ' var bounds = [%f,%f];\n' % (t0, tMax)
# add the code which will manipulate the data in the browser
script_code = \
'<script type="text/javascript">\n'+detail+\
' function zoomTimeline() {\n'\
' var timescale = document.getElementById("timescale");\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' var zoombox = document.getElementById("dmesgzoombox");\n'\
' var val = parseFloat(dmesg.style.width);\n'\
' var newval = 100;\n'\
' var sh = window.outerWidth / 2;\n'\
' if(this.id == "zoomin") {\n'\
' newval = val * 1.2;\n'\
' if(newval > 40000) newval = 40000;\n'\
' dmesg.style.width = newval+"%";\n'\
' zoombox.scrollLeft = ((zoombox.scrollLeft + sh) * newval / val) - sh;\n'\
' } else if (this.id == "zoomout") {\n'\
' newval = val / 1.2;\n'\
' if(newval < 100) newval = 100;\n'\
' dmesg.style.width = newval+"%";\n'\
' zoombox.scrollLeft = ((zoombox.scrollLeft + sh) * newval / val) - sh;\n'\
' } else {\n'\
' zoombox.scrollLeft = 0;\n'\
' dmesg.style.width = "100%";\n'\
' }\n'\
' var html = "";\n'\
' var t0 = bounds[0];\n'\
' var tMax = bounds[1];\n'\
' var tTotal = tMax - t0;\n'\
' var wTotal = tTotal * 100.0 / newval;\n'\
' for(var tS = 1000; (wTotal / tS) < 3; tS /= 10);\n'\
' if(tS < 1) tS = 1;\n'\
' for(var s = ((t0 / tS)|0) * tS; s < tMax; s += tS) {\n'\
' var pos = (tMax - s) * 100.0 / tTotal;\n'\
' var name = (s == 0)?"S/R":(s+"ms");\n'\
' html += "<div class=\\"t\\" style=\\"right:"+pos+"%\\">"+name+"</div>";\n'\
' }\n'\
' timescale.innerHTML = html;\n'\
' }\n'\
' function deviceHover() {\n'\
' var name = this.title.slice(0, this.title.indexOf(" ("));\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' var dev = dmesg.getElementsByClassName("thread");\n'\
' var cpu = -1;\n'\
' if(name.match("CPU_ON\[[0-9]*\]"))\n'\
' cpu = parseInt(name.slice(7));\n'\
' else if(name.match("CPU_OFF\[[0-9]*\]"))\n'\
' cpu = parseInt(name.slice(8));\n'\
' for (var i = 0; i < dev.length; i++) {\n'\
' dname = dev[i].title.slice(0, dev[i].title.indexOf(" ("));\n'\
' if((cpu >= 0 && dname.match("CPU_O[NF]*\\\[*"+cpu+"\\\]")) ||\n'\
' (name == dname))\n'\
' {\n'\
' dev[i].className = "thread hover";\n'\
' } else {\n'\
' dev[i].className = "thread";\n'\
' }\n'\
' }\n'\
' }\n'\
' function deviceUnhover() {\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' var dev = dmesg.getElementsByClassName("thread");\n'\
' for (var i = 0; i < dev.length; i++) {\n'\
' dev[i].className = "thread";\n'\
' }\n'\
' }\n'\
' function deviceTitle(title, total, cpu) {\n'\
' var prefix = "Total";\n'\
' if(total.length > 3) {\n'\
' prefix = "Average";\n'\
' total[1] = (total[1]+total[3])/2;\n'\
' total[2] = (total[2]+total[4])/2;\n'\
' }\n'\
' var devtitle = document.getElementById("devicedetailtitle");\n'\
' var name = title.slice(0, title.indexOf(" "));\n'\
' if(cpu >= 0) name = "CPU"+cpu;\n'\
' var driver = "";\n'\
' var tS = "<t2>(</t2>";\n'\
' var tR = "<t2>)</t2>";\n'\
' if(total[1] > 0)\n'\
' tS = "<t2>("+prefix+" Suspend:</t2><t0> "+total[1].toFixed(3)+" ms</t0> ";\n'\
' if(total[2] > 0)\n'\
' tR = " <t2>"+prefix+" Resume:</t2><t0> "+total[2].toFixed(3)+" ms<t2>)</t2></t0>";\n'\
' var s = title.indexOf("{");\n'\
' var e = title.indexOf("}");\n'\
' if((s >= 0) && (e >= 0))\n'\
' driver = title.slice(s+1, e) + " <t1>@</t1> ";\n'\
' if(total[1] > 0 && total[2] > 0)\n'\
' devtitle.innerHTML = "<t0>"+driver+name+"</t0> "+tS+tR;\n'\
' else\n'\
' devtitle.innerHTML = "<t0>"+title+"</t0>";\n'\
' return name;\n'\
' }\n'\
' function deviceDetail() {\n'\
' var devinfo = document.getElementById("devicedetail");\n'\
' devinfo.style.display = "block";\n'\
' var name = this.title.slice(0, this.title.indexOf(" ("));\n'\
' var cpu = -1;\n'\
' if(name.match("CPU_ON\[[0-9]*\]"))\n'\
' cpu = parseInt(name.slice(7));\n'\
' else if(name.match("CPU_OFF\[[0-9]*\]"))\n'\
' cpu = parseInt(name.slice(8));\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' var dev = dmesg.getElementsByClassName("thread");\n'\
' var idlist = [];\n'\
' var pdata = [[]];\n'\
' var pd = pdata[0];\n'\
' var total = [0.0, 0.0, 0.0];\n'\
' for (var i = 0; i < dev.length; i++) {\n'\
' dname = dev[i].title.slice(0, dev[i].title.indexOf(" ("));\n'\
' if((cpu >= 0 && dname.match("CPU_O[NF]*\\\[*"+cpu+"\\\]")) ||\n'\
' (name == dname))\n'\
' {\n'\
' idlist[idlist.length] = dev[i].id;\n'\
' var tidx = 1;\n'\
' if(dev[i].id[0] == "a") {\n'\
' pd = pdata[0];\n'\
' } else {\n'\
' if(pdata.length == 1) pdata[1] = [];\n'\
' if(total.length == 3) total[3]=total[4]=0.0;\n'\
' pd = pdata[1];\n'\
' tidx = 3;\n'\
' }\n'\
' var info = dev[i].title.split(" ");\n'\
' var pname = info[info.length-1];\n'\
' pd[pname] = parseFloat(info[info.length-3].slice(1));\n'\
' total[0] += pd[pname];\n'\
' if(pname.indexOf("suspend") >= 0)\n'\
' total[tidx] += pd[pname];\n'\
' else\n'\
' total[tidx+1] += pd[pname];\n'\
' }\n'\
' }\n'\
' var devname = deviceTitle(this.title, total, cpu);\n'\
' var left = 0.0;\n'\
' for (var t = 0; t < pdata.length; t++) {\n'\
' pd = pdata[t];\n'\
' devinfo = document.getElementById("devicedetail"+t);\n'\
' var phases = devinfo.getElementsByClassName("phaselet");\n'\
' for (var i = 0; i < phases.length; i++) {\n'\
' if(phases[i].id in pd) {\n'\
' var w = 100.0*pd[phases[i].id]/total[0];\n'\
' var fs = 32;\n'\
' if(w < 8) fs = 4*w | 0;\n'\
' var fs2 = fs*3/4;\n'\
' phases[i].style.width = w+"%";\n'\
' phases[i].style.left = left+"%";\n'\
' phases[i].title = phases[i].id+" "+pd[phases[i].id]+" ms";\n'\
' left += w;\n'\
' var time = "<t4 style=\\"font-size:"+fs+"px\\">"+pd[phases[i].id]+" ms<br></t4>";\n'\
' var pname = "<t3 style=\\"font-size:"+fs2+"px\\">"+phases[i].id.replace("_", " ")+"</t3>";\n'\
' phases[i].innerHTML = time+pname;\n'\
' } else {\n'\
' phases[i].style.width = "0%";\n'\
' phases[i].style.left = left+"%";\n'\
' }\n'\
' }\n'\
' }\n'\
' var cglist = document.getElementById("callgraphs");\n'\
' if(!cglist) return;\n'\
' var cg = cglist.getElementsByClassName("atop");\n'\
' for (var i = 0; i < cg.length; i++) {\n'\
' if(idlist.indexOf(cg[i].id) >= 0) {\n'\
' cg[i].style.display = "block";\n'\
' } else {\n'\
' cg[i].style.display = "none";\n'\
' }\n'\
' }\n'\
' }\n'\
' function devListWindow(e) {\n'\
' var sx = e.clientX;\n'\
' if(sx > window.innerWidth - 440)\n'\
' sx = window.innerWidth - 440;\n'\
' var cfg="top="+e.screenY+", left="+sx+", width=440, height=720, scrollbars=yes";\n'\
' var win = window.open("", "_blank", cfg);\n'\
' if(window.chrome) win.moveBy(sx, 0);\n'\
' var html = "<title>"+e.target.innerHTML+"</title>"+\n'\
' "<style type=\\"text/css\\">"+\n'\
' " ul {list-style-type:circle;padding-left:10px;margin-left:10px;}"+\n'\
' "</style>"\n'\
' var dt = devtable[0];\n'\
' if(e.target.id != "devlist1")\n'\
' dt = devtable[1];\n'\
' win.document.write(html+dt);\n'\
' }\n'\
' window.addEventListener("load", function () {\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' dmesg.style.width = "100%"\n'\
' document.getElementById("zoomin").onclick = zoomTimeline;\n'\
' document.getElementById("zoomout").onclick = zoomTimeline;\n'\
' document.getElementById("zoomdef").onclick = zoomTimeline;\n'\
' var devlist = document.getElementsByClassName("devlist");\n'\
' for (var i = 0; i < devlist.length; i++)\n'\
' devlist[i].onclick = devListWindow;\n'\
' var dev = dmesg.getElementsByClassName("thread");\n'\
' for (var i = 0; i < dev.length; i++) {\n'\
' dev[i].onclick = deviceDetail;\n'\
' dev[i].onmouseover = deviceHover;\n'\
' dev[i].onmouseout = deviceUnhover;\n'\
' }\n'\
' zoomTimeline();\n'\
' });\n'\
'</script>\n'
hf.write(script_code);
# Function: executeSuspend
# Description:
# Execute system suspend through the sysfs interface, then copy the output
# dmesg and ftrace files to the test output directory.
def executeSuspend():
global sysvals
detectUSB(False)
t0 = time.time()*1000
tp = sysvals.tpath
# execute however many s/r runs requested
for count in range(1,sysvals.execcount+1):
# clear the kernel ring buffer just as we start
os.system('dmesg -C')
# enable callgraph ftrace only for the second run
if(sysvals.usecallgraph and count == 2):
# set trace type
os.system('echo function_graph > '+tp+'current_tracer')
os.system('echo "" > '+tp+'set_ftrace_filter')
# set trace format options
os.system('echo funcgraph-abstime > '+tp+'trace_options')
os.system('echo funcgraph-proc > '+tp+'trace_options')
# focus only on device suspend and resume
os.system('cat '+tp+'available_filter_functions | '+\
'grep dpm_run_callback > '+tp+'set_graph_function')
# if this is test2 and there's a delay, start here
if(count > 1 and sysvals.x2delay > 0):
tN = time.time()*1000
while (tN - t0) < sysvals.x2delay:
tN = time.time()*1000
time.sleep(0.001)
# start ftrace
if(sysvals.usecallgraph or sysvals.usetraceevents):
print('START TRACING')
os.system('echo 1 > '+tp+'tracing_on')
# initiate suspend
if(sysvals.usecallgraph or sysvals.usetraceevents):
os.system('echo SUSPEND START > '+tp+'trace_marker')
if(sysvals.rtcwake):
print('SUSPEND START')
print('will autoresume in %d seconds' % sysvals.rtcwaketime)
sysvals.rtcWakeAlarm()
else:
print('SUSPEND START (press a key to resume)')
pf = open(sysvals.powerfile, 'w')
pf.write(sysvals.suspendmode)
# execution will pause here
pf.close()
t0 = time.time()*1000
# return from suspend
print('RESUME COMPLETE')
if(sysvals.usecallgraph or sysvals.usetraceevents):
os.system('echo RESUME COMPLETE > '+tp+'trace_marker')
# see if there's firmware timing data to be had
t = sysvals.postresumetime
if(t > 0):
print('Waiting %d seconds for POST-RESUME trace events...' % t)
time.sleep(t)
# stop ftrace
if(sysvals.usecallgraph or sysvals.usetraceevents):
os.system('echo 0 > '+tp+'tracing_on')
print('CAPTURING TRACE')
writeDatafileHeader(sysvals.ftracefile)
os.system('cat '+tp+'trace >> '+sysvals.ftracefile)
os.system('echo "" > '+tp+'trace')
# grab a copy of the dmesg output
print('CAPTURING DMESG')
writeDatafileHeader(sysvals.dmesgfile)
os.system('dmesg -c >> '+sysvals.dmesgfile)
def writeDatafileHeader(filename):
global sysvals
fw = getFPDT(False)
prt = sysvals.postresumetime
fp = open(filename, 'a')
fp.write(sysvals.teststamp+'\n')
if(fw):
fp.write('# fwsuspend %u fwresume %u\n' % (fw[0], fw[1]))
if(prt > 0):
fp.write('# post resume time %u\n' % prt)
fp.close()
# Function: executeAndroidSuspend
# Description:
# Execute system suspend through the sysfs interface
# on a remote android device, then transfer the output
# dmesg and ftrace files to the local output directory.
def executeAndroidSuspend():
global sysvals
# check to see if the display is currently off
tp = sysvals.tpath
out = os.popen(sysvals.adb+\
' shell dumpsys power | grep mScreenOn').read().strip()
# if so we need to turn it on so we can issue a new suspend
if(out.endswith('false')):
print('Waking the device up for the test...')
# send the KEYPAD_POWER keyevent to wake it up
os.system(sysvals.adb+' shell input keyevent 26')
# wait a few seconds so the user can see the device wake up
time.sleep(3)
# execute however many s/r runs requested
for count in range(1,sysvals.execcount+1):
# clear the kernel ring buffer just as we start
os.system(sysvals.adb+' shell dmesg -c > /dev/null 2>&1')
# start ftrace
if(sysvals.usetraceevents):
print('START TRACING')
os.system(sysvals.adb+" shell 'echo 1 > "+tp+"tracing_on'")
# initiate suspend
for count in range(1,sysvals.execcount+1):
if(sysvals.usetraceevents):
os.system(sysvals.adb+\
" shell 'echo SUSPEND START > "+tp+"trace_marker'")
print('SUSPEND START (press a key on the device to resume)')
os.system(sysvals.adb+" shell 'echo "+sysvals.suspendmode+\
" > "+sysvals.powerfile+"'")
# execution will pause here, then adb will exit
while(True):
check = os.popen(sysvals.adb+\
' shell pwd 2>/dev/null').read().strip()
if(len(check) > 0):
break
time.sleep(1)
if(sysvals.usetraceevents):
os.system(sysvals.adb+" shell 'echo RESUME COMPLETE > "+tp+\
"trace_marker'")
# return from suspend
print('RESUME COMPLETE')
# stop ftrace
if(sysvals.usetraceevents):
os.system(sysvals.adb+" shell 'echo 0 > "+tp+"tracing_on'")
print('CAPTURING TRACE')
os.system('echo "'+sysvals.teststamp+'" > '+sysvals.ftracefile)
os.system(sysvals.adb+' shell cat '+tp+\
'trace >> '+sysvals.ftracefile)
# grab a copy of the dmesg output
print('CAPTURING DMESG')
os.system('echo "'+sysvals.teststamp+'" > '+sysvals.dmesgfile)
os.system(sysvals.adb+' shell dmesg >> '+sysvals.dmesgfile)
# Function: setUSBDevicesAuto
# Description:
# Set the autosuspend control parameter of all USB devices to auto
# This can be dangerous, so use at your own risk, most devices are set
# to always-on since the kernel cant determine if the device can
# properly autosuspend
def setUSBDevicesAuto():
global sysvals
rootCheck()
for dirname, dirnames, filenames in os.walk('/sys/devices'):
if(re.match('.*/usb[0-9]*.*', dirname) and
'idVendor' in filenames and 'idProduct' in filenames):
os.system('echo auto > %s/power/control' % dirname)
name = dirname.split('/')[-1]
desc = os.popen('cat %s/product 2>/dev/null' % \
dirname).read().replace('\n', '')
ctrl = os.popen('cat %s/power/control 2>/dev/null' % \
dirname).read().replace('\n', '')
print('control is %s for %6s: %s' % (ctrl, name, desc))
# Function: yesno
# Description:
# Print out an equivalent Y or N for a set of known parameter values
# Output:
# 'Y', 'N', or ' ' if the value is unknown
def yesno(val):
yesvals = ['auto', 'enabled', 'active', '1']
novals = ['on', 'disabled', 'suspended', 'forbidden', 'unsupported']
if val in yesvals:
return 'Y'
elif val in novals:
return 'N'
return ' '
# Function: ms2nice
# Description:
# Print out a very concise time string in minutes and seconds
# Output:
# The time string, e.g. "1901m16s"
def ms2nice(val):
ms = 0
try:
ms = int(val)
except:
return 0.0
m = ms / 60000
s = (ms / 1000) - (m * 60)
return '%3dm%2ds' % (m, s)
# Function: detectUSB
# Description:
# Detect all the USB hosts and devices currently connected and add
# a list of USB device names to sysvals for better timeline readability
# Arguments:
# output: True to output the info to stdout, False otherwise
def detectUSB(output):
global sysvals
field = {'idVendor':'', 'idProduct':'', 'product':'', 'speed':''}
power = {'async':'', 'autosuspend':'', 'autosuspend_delay_ms':'',
'control':'', 'persist':'', 'runtime_enabled':'',
'runtime_status':'', 'runtime_usage':'',
'runtime_active_time':'',
'runtime_suspended_time':'',
'active_duration':'',
'connected_duration':''}
if(output):
print('LEGEND')
print('---------------------------------------------------------------------------------------------')
print(' A = async/sync PM queue Y/N D = autosuspend delay (seconds)')
print(' S = autosuspend Y/N rACTIVE = runtime active (min/sec)')
print(' P = persist across suspend Y/N rSUSPEN = runtime suspend (min/sec)')
print(' E = runtime suspend enabled/forbidden Y/N ACTIVE = active duration (min/sec)')
print(' R = runtime status active/suspended Y/N CONNECT = connected duration (min/sec)')
print(' U = runtime usage count')
print('---------------------------------------------------------------------------------------------')
print(' NAME ID DESCRIPTION SPEED A S P E R U D rACTIVE rSUSPEN ACTIVE CONNECT')
print('---------------------------------------------------------------------------------------------')
for dirname, dirnames, filenames in os.walk('/sys/devices'):
if(re.match('.*/usb[0-9]*.*', dirname) and
'idVendor' in filenames and 'idProduct' in filenames):
for i in field:
field[i] = os.popen('cat %s/%s 2>/dev/null' % \
(dirname, i)).read().replace('\n', '')
name = dirname.split('/')[-1]
if(len(field['product']) > 0):
sysvals.altdevname[name] = \
'%s [%s]' % (field['product'], name)
else:
sysvals.altdevname[name] = \
'%s:%s [%s]' % (field['idVendor'], \
field['idProduct'], name)
if(output):
for i in power:
power[i] = os.popen('cat %s/power/%s 2>/dev/null' % \
(dirname, i)).read().replace('\n', '')
if(re.match('usb[0-9]*', name)):
first = '%-8s' % name
else:
first = '%8s' % name
print('%s [%s:%s] %-20s %-4s %1s %1s %1s %1s %1s %1s %1s %s %s %s %s' % \
(first, field['idVendor'], field['idProduct'], \
field['product'][0:20], field['speed'], \
yesno(power['async']), \
yesno(power['control']), \
yesno(power['persist']), \
yesno(power['runtime_enabled']), \
yesno(power['runtime_status']), \
power['runtime_usage'], \
power['autosuspend'], \
ms2nice(power['runtime_active_time']), \
ms2nice(power['runtime_suspended_time']), \
ms2nice(power['active_duration']), \
ms2nice(power['connected_duration'])))
# Function: getModes
# Description:
# Determine the supported power modes on this system
# Output:
# A string list of the available modes
def getModes():
global sysvals
modes = ''
if(not sysvals.android):
if(os.path.exists(sysvals.powerfile)):
fp = open(sysvals.powerfile, 'r')
modes = string.split(fp.read())
fp.close()
else:
line = os.popen(sysvals.adb+' shell cat '+\
sysvals.powerfile).read().strip()
modes = string.split(line)
return modes
# Function: getFPDT
# Description:
# Read the acpi bios tables and pull out FPDT, the firmware data
# Arguments:
# output: True to output the info to stdout, False otherwise
def getFPDT(output):
global sysvals
rectype = {}
rectype[0] = 'Firmware Basic Boot Performance Record'
rectype[1] = 'S3 Performance Table Record'
prectype = {}
prectype[0] = 'Basic S3 Resume Performance Record'
prectype[1] = 'Basic S3 Suspend Performance Record'
rootCheck()
if(not os.path.exists(sysvals.fpdtpath)):
if(output):
doError('file doesnt exist: %s' % sysvals.fpdtpath, False)
return False
if(not os.access(sysvals.fpdtpath, os.R_OK)):
if(output):
doError('file isnt readable: %s' % sysvals.fpdtpath, False)
return False
if(not os.path.exists(sysvals.mempath)):
if(output):
doError('file doesnt exist: %s' % sysvals.mempath, False)
return False
if(not os.access(sysvals.mempath, os.R_OK)):
if(output):
doError('file isnt readable: %s' % sysvals.mempath, False)
return False
fp = open(sysvals.fpdtpath, 'rb')
buf = fp.read()
fp.close()
if(len(buf) < 36):
if(output):
doError('Invalid FPDT table data, should '+\
'be at least 36 bytes', False)
return False
table = struct.unpack('4sIBB6s8sI4sI', buf[0:36])
if(output):
print('')
print('Firmware Performance Data Table (%s)' % table[0])
print(' Signature : %s' % table[0])
print(' Table Length : %u' % table[1])
print(' Revision : %u' % table[2])
print(' Checksum : 0x%x' % table[3])
print(' OEM ID : %s' % table[4])
print(' OEM Table ID : %s' % table[5])
print(' OEM Revision : %u' % table[6])
print(' Creator ID : %s' % table[7])
print(' Creator Revision : 0x%x' % table[8])
print('')
if(table[0] != 'FPDT'):
if(output):
doError('Invalid FPDT table')
return False
if(len(buf) <= 36):
return False
i = 0
fwData = [0, 0]
records = buf[36:]
fp = open(sysvals.mempath, 'rb')
while(i < len(records)):
header = struct.unpack('HBB', records[i:i+4])
if(header[0] not in rectype):
continue
if(header[1] != 16):
continue
addr = struct.unpack('Q', records[i+8:i+16])[0]
try:
fp.seek(addr)
first = fp.read(8)
except:
doError('Bad address 0x%x in %s' % (addr, sysvals.mempath), False)
rechead = struct.unpack('4sI', first)
recdata = fp.read(rechead[1]-8)
if(rechead[0] == 'FBPT'):
record = struct.unpack('HBBIQQQQQ', recdata)
if(output):
print('%s (%s)' % (rectype[header[0]], rechead[0]))
print(' Reset END : %u ns' % record[4])
print(' OS Loader LoadImage Start : %u ns' % record[5])
print(' OS Loader StartImage Start : %u ns' % record[6])
print(' ExitBootServices Entry : %u ns' % record[7])
print(' ExitBootServices Exit : %u ns' % record[8])
elif(rechead[0] == 'S3PT'):
if(output):
print('%s (%s)' % (rectype[header[0]], rechead[0]))
j = 0
while(j < len(recdata)):
prechead = struct.unpack('HBB', recdata[j:j+4])
if(prechead[0] not in prectype):
continue
if(prechead[0] == 0):
record = struct.unpack('IIQQ', recdata[j:j+prechead[1]])
fwData[1] = record[2]
if(output):
print(' %s' % prectype[prechead[0]])
print(' Resume Count : %u' % \
record[1])
print(' FullResume : %u ns' % \
record[2])
print(' AverageResume : %u ns' % \
record[3])
elif(prechead[0] == 1):
record = struct.unpack('QQ', recdata[j+4:j+prechead[1]])
fwData[0] = record[1] - record[0]
if(output):
print(' %s' % prectype[prechead[0]])
print(' SuspendStart : %u ns' % \
record[0])
print(' SuspendEnd : %u ns' % \
record[1])
print(' SuspendTime : %u ns' % \
fwData[0])
j += prechead[1]
if(output):
print('')
i += header[1]
fp.close()
return fwData
# Function: statusCheck
# Description:
# Verify that the requested command and options will work, and
# print the results to the terminal
# Output:
# True if the test will work, False if not
def statusCheck():
global sysvals
status = True
if(sysvals.android):
print('Checking the android system ...')
else:
print('Checking this system (%s)...' % platform.node())
# check if adb is connected to a device
if(sysvals.android):
res = 'NO'
out = os.popen(sysvals.adb+' get-state').read().strip()
if(out == 'device'):
res = 'YES'
print(' is android device connected: %s' % res)
if(res != 'YES'):
print(' Please connect the device before using this tool')
return False
# check we have root access
res = 'NO (No features of this tool will work!)'
if(sysvals.android):
out = os.popen(sysvals.adb+' shell id').read().strip()
if('root' in out):
res = 'YES'
else:
if(os.environ['USER'] == 'root'):
res = 'YES'
print(' have root access: %s' % res)
if(res != 'YES'):
if(sysvals.android):
print(' Try running "adb root" to restart the daemon as root')
else:
print(' Try running this script with sudo')
return False
# check sysfs is mounted
res = 'NO (No features of this tool will work!)'
if(sysvals.android):
out = os.popen(sysvals.adb+' shell ls '+\
sysvals.powerfile).read().strip()
if(out == sysvals.powerfile):
res = 'YES'
else:
if(os.path.exists(sysvals.powerfile)):
res = 'YES'
print(' is sysfs mounted: %s' % res)
if(res != 'YES'):
return False
# check target mode is a valid mode
res = 'NO'
modes = getModes()
if(sysvals.suspendmode in modes):
res = 'YES'
else:
status = False
print(' is "%s" a valid power mode: %s' % (sysvals.suspendmode, res))
if(res == 'NO'):
print(' valid power modes are: %s' % modes)
print(' please choose one with -m')
# check if the tool can unlock the device
if(sysvals.android):
res = 'YES'
out1 = os.popen(sysvals.adb+\
' shell dumpsys power | grep mScreenOn').read().strip()
out2 = os.popen(sysvals.adb+\
' shell input').read().strip()
if(not out1.startswith('mScreenOn') or not out2.startswith('usage')):
res = 'NO (wake the android device up before running the test)'
print(' can I unlock the screen: %s' % res)
# check if ftrace is available
res = 'NO'
ftgood = verifyFtrace()
if(ftgood):
res = 'YES'
elif(sysvals.usecallgraph):
status = False
print(' is ftrace supported: %s' % res)
# what data source are we using
res = 'DMESG'
if(ftgood):
sysvals.usetraceeventsonly = True
sysvals.usetraceevents = False
for e in sysvals.traceevents:
check = False
if(sysvals.android):
out = os.popen(sysvals.adb+' shell ls -d '+\
sysvals.epath+e).read().strip()
if(out == sysvals.epath+e):
check = True
else:
if(os.path.exists(sysvals.epath+e)):
check = True
if(not check):
sysvals.usetraceeventsonly = False
if(e == 'suspend_resume' and check):
sysvals.usetraceevents = True
if(sysvals.usetraceevents and sysvals.usetraceeventsonly):
res = 'FTRACE (all trace events found)'
elif(sysvals.usetraceevents):
res = 'DMESG and FTRACE (suspend_resume trace event found)'
print(' timeline data source: %s' % res)
# check if rtcwake
res = 'NO'
if(sysvals.rtcpath != ''):
res = 'YES'
elif(sysvals.rtcwake):
status = False
print(' is rtcwake supported: %s' % res)
return status
# Function: doError
# Description:
# generic error function for catastrphic failures
# Arguments:
# msg: the error message to print
# help: True if printHelp should be called after, False otherwise
def doError(msg, help):
if(help == True):
printHelp()
print('ERROR: %s\n') % msg
sys.exit()
# Function: doWarning
# Description:
# generic warning function for non-catastrophic anomalies
# Arguments:
# msg: the warning message to print
# file: If not empty, a filename to request be sent to the owner for debug
def doWarning(msg, file):
print('/* %s */') % msg
if(file):
print('/* For a fix, please send this'+\
' %s file to <todd.e.brandt@intel.com> */' % file)
# Function: rootCheck
# Description:
# quick check to see if we have root access
def rootCheck():
if(os.environ['USER'] != 'root'):
doError('This script must be run as root', False)
# Function: getArgInt
# Description:
# pull out an integer argument from the command line with checks
def getArgInt(name, args, min, max):
try:
arg = args.next()
except:
doError(name+': no argument supplied', True)
try:
val = int(arg)
except:
doError(name+': non-integer value given', True)
if(val < min or val > max):
doError(name+': value should be between %d and %d' % (min, max), True)
return val
# Function: rerunTest
# Description:
# generate an output from an existing set of ftrace/dmesg logs
def rerunTest():
global sysvals
if(sysvals.ftracefile != ''):
doesTraceLogHaveTraceEvents()
if(sysvals.dmesgfile == '' and not sysvals.usetraceeventsonly):
doError('recreating this html output '+\
'requires a dmesg file', False)
sysvals.setOutputFile()
vprint('Output file: %s' % sysvals.htmlfile)
print('PROCESSING DATA')
if(sysvals.usetraceeventsonly):
testruns = parseTraceLog()
else:
testruns = loadKernelLog()
for data in testruns:
parseKernelLog(data)
if(sysvals.ftracefile != ''):
appendIncompleteTraceLog(testruns)
createHTML(testruns)
# Function: runTest
# Description:
# execute a suspend/resume, gather the logs, and generate the output
def runTest(subdir):
global sysvals
# prepare for the test
if(not sysvals.android):
initFtrace()
else:
initFtraceAndroid()
sysvals.initTestOutput(subdir)
vprint('Output files:\n %s' % sysvals.dmesgfile)
if(sysvals.usecallgraph or
sysvals.usetraceevents or
sysvals.usetraceeventsonly):
vprint(' %s' % sysvals.ftracefile)
vprint(' %s' % sysvals.htmlfile)
# execute the test
if(not sysvals.android):
executeSuspend()
else:
executeAndroidSuspend()
# analyze the data and create the html output
print('PROCESSING DATA')
if(sysvals.usetraceeventsonly):
# data for kernels 3.15 or newer is entirely in ftrace
testruns = parseTraceLog()
else:
# data for kernels older than 3.15 is primarily in dmesg
testruns = loadKernelLog()
for data in testruns:
parseKernelLog(data)
if(sysvals.usecallgraph or sysvals.usetraceevents):
appendIncompleteTraceLog(testruns)
createHTML(testruns)
# Function: runSummary
# Description:
# create a summary of tests in a sub-directory
def runSummary(subdir, output):
global sysvals
# get a list of ftrace output files
files = []
for dirname, dirnames, filenames in os.walk(subdir):
for filename in filenames:
if(re.match('.*_ftrace.txt', filename)):
files.append("%s/%s" % (dirname, filename))
# process the files in order and get an array of data objects
testruns = []
for file in sorted(files):
if output:
print("Test found in %s" % os.path.dirname(file))
sysvals.ftracefile = file
sysvals.dmesgfile = file.replace('_ftrace.txt', '_dmesg.txt')
doesTraceLogHaveTraceEvents()
sysvals.usecallgraph = False
if not sysvals.usetraceeventsonly:
if(not os.path.exists(sysvals.dmesgfile)):
print("Skipping %s: not a valid test input" % file)
continue
else:
if output:
f = os.path.basename(sysvals.ftracefile)
d = os.path.basename(sysvals.dmesgfile)
print("\tInput files: %s and %s" % (f, d))
testdata = loadKernelLog()
data = testdata[0]
parseKernelLog(data)
testdata = [data]
appendIncompleteTraceLog(testdata)
else:
if output:
print("\tInput file: %s" % os.path.basename(sysvals.ftracefile))
testdata = parseTraceLog()
data = testdata[0]
data.normalizeTime(data.tSuspended)
link = file.replace(subdir+'/', '').replace('_ftrace.txt', '.html')
data.outfile = link
testruns.append(data)
createHTMLSummarySimple(testruns, subdir+'/summary.html')
# Function: printHelp
# Description:
# print out the help text
def printHelp():
global sysvals
modes = getModes()
print('')
print('AnalyzeSuspend v%.1f' % sysvals.version)
print('Usage: sudo analyze_suspend.py <options>')
print('')
print('Description:')
print(' This tool is designed to assist kernel and OS developers in optimizing')
print(' their linux stack\'s suspend/resume time. Using a kernel image built')
print(' with a few extra options enabled, the tool will execute a suspend and')
print(' capture dmesg and ftrace data until resume is complete. This data is')
print(' transformed into a device timeline and an optional callgraph to give')
print(' a detailed view of which devices/subsystems are taking the most')
print(' time in suspend/resume.')
print('')
print(' Generates output files in subdirectory: suspend-mmddyy-HHMMSS')
print(' HTML output: <hostname>_<mode>.html')
print(' raw dmesg output: <hostname>_<mode>_dmesg.txt')
print(' raw ftrace output: <hostname>_<mode>_ftrace.txt')
print('')
print('Options:')
print(' [general]')
print(' -h Print this help text')
print(' -v Print the current tool version')
print(' -verbose Print extra information during execution and analysis')
print(' -status Test to see if the system is enabled to run this tool')
print(' -modes List available suspend modes')
print(' -m mode Mode to initiate for suspend %s (default: %s)') % (modes, sysvals.suspendmode)
print(' -rtcwake t Use rtcwake to autoresume after <t> seconds (default: disabled)')
print(' [advanced]')
print(' -f Use ftrace to create device callgraphs (default: disabled)')
print(' -filter "d1 d2 ..." Filter out all but this list of dev names')
print(' -x2 Run two suspend/resumes back to back (default: disabled)')
print(' -x2delay t Minimum millisecond delay <t> between the two test runs (default: 0 ms)')
print(' -postres t Time after resume completion to wait for post-resume events (default: 0 S)')
print(' -multi n d Execute <n> consecutive tests at <d> seconds intervals. The outputs will')
print(' be created in a new subdirectory with a summary page.')
print(' [utilities]')
print(' -fpdt Print out the contents of the ACPI Firmware Performance Data Table')
print(' -usbtopo Print out the current USB topology with power info')
print(' -usbauto Enable autosuspend for all connected USB devices')
print(' [android testing]')
print(' -adb binary Use the given adb binary to run the test on an android device.')
print(' The device should already be connected and with root access.')
print(' Commands will be executed on the device using "adb shell"')
print(' [re-analyze data from previous runs]')
print(' -ftrace ftracefile Create HTML output using ftrace input')
print(' -dmesg dmesgfile Create HTML output using dmesg (not needed for kernel >= 3.15)')
print(' -summary directory Create a summary of all test in this dir')
print('')
return True
# ----------------- MAIN --------------------
# exec start (skipped if script is loaded as library)
if __name__ == '__main__':
cmd = ''
cmdarg = ''
multitest = {'run': False, 'count': 0, 'delay': 0}
# loop through the command line arguments
args = iter(sys.argv[1:])
for arg in args:
if(arg == '-m'):
try:
val = args.next()
except:
doError('No mode supplied', True)
sysvals.suspendmode = val
elif(arg == '-adb'):
try:
val = args.next()
except:
doError('No adb binary supplied', True)
if(not os.path.exists(val)):
doError('file doesnt exist: %s' % val, False)
if(not os.access(val, os.X_OK)):
doError('file isnt executable: %s' % val, False)
try:
check = os.popen(val+' version').read().strip()
except:
doError('adb version failed to execute', False)
if(not re.match('Android Debug Bridge .*', check)):
doError('adb version failed to execute', False)
sysvals.adb = val
sysvals.android = True
elif(arg == '-x2'):
if(sysvals.postresumetime > 0):
doError('-x2 is not compatible with -postres', False)
sysvals.execcount = 2
elif(arg == '-x2delay'):
sysvals.x2delay = getArgInt('-x2delay', args, 0, 60000)
elif(arg == '-postres'):
if(sysvals.execcount != 1):
doError('-x2 is not compatible with -postres', False)
sysvals.postresumetime = getArgInt('-postres', args, 0, 3600)
elif(arg == '-f'):
sysvals.usecallgraph = True
elif(arg == '-modes'):
cmd = 'modes'
elif(arg == '-fpdt'):
cmd = 'fpdt'
elif(arg == '-usbtopo'):
cmd = 'usbtopo'
elif(arg == '-usbauto'):
cmd = 'usbauto'
elif(arg == '-status'):
cmd = 'status'
elif(arg == '-verbose'):
sysvals.verbose = True
elif(arg == '-v'):
print("Version %.1f" % sysvals.version)
sys.exit()
elif(arg == '-rtcwake'):
sysvals.rtcwake = True
sysvals.rtcwaketime = getArgInt('-rtcwake', args, 0, 3600)
elif(arg == '-multi'):
multitest['run'] = True
multitest['count'] = getArgInt('-multi n (exec count)', args, 2, 1000000)
multitest['delay'] = getArgInt('-multi d (delay between tests)', args, 0, 3600)
elif(arg == '-dmesg'):
try:
val = args.next()
except:
doError('No dmesg file supplied', True)
sysvals.notestrun = True
sysvals.dmesgfile = val
if(os.path.exists(sysvals.dmesgfile) == False):
doError('%s doesnt exist' % sysvals.dmesgfile, False)
elif(arg == '-ftrace'):
try:
val = args.next()
except:
doError('No ftrace file supplied', True)
sysvals.notestrun = True
sysvals.usecallgraph = True
sysvals.ftracefile = val
if(os.path.exists(sysvals.ftracefile) == False):
doError('%s doesnt exist' % sysvals.ftracefile, False)
elif(arg == '-summary'):
try:
val = args.next()
except:
doError('No directory supplied', True)
cmd = 'summary'
cmdarg = val
sysvals.notestrun = True
if(os.path.isdir(val) == False):
doError('%s isnt accesible' % val, False)
elif(arg == '-filter'):
try:
val = args.next()
except:
doError('No devnames supplied', True)
sysvals.setDeviceFilter(val)
elif(arg == '-h'):
printHelp()
sys.exit()
else:
doError('Invalid argument: '+arg, True)
# just run a utility command and exit
if(cmd != ''):
if(cmd == 'status'):
statusCheck()
elif(cmd == 'fpdt'):
if(sysvals.android):
doError('cannot read FPDT on android device', False)
getFPDT(True)
elif(cmd == 'usbtopo'):
if(sysvals.android):
doError('cannot read USB topology '+\
'on an android device', False)
detectUSB(True)
elif(cmd == 'modes'):
modes = getModes()
print modes
elif(cmd == 'usbauto'):
setUSBDevicesAuto()
elif(cmd == 'summary'):
print("Generating a summary of folder \"%s\"" % cmdarg)
runSummary(cmdarg, True)
sys.exit()
# run test on android device
if(sysvals.android):
if(sysvals.usecallgraph):
doError('ftrace (-f) is not yet supported '+\
'in the android kernel', False)
if(sysvals.notestrun):
doError('cannot analyze test files on the '+\
'android device', False)
# if instructed, re-analyze existing data files
if(sysvals.notestrun):
rerunTest()
sys.exit()
# verify that we can run a test
if(not statusCheck()):
print('Check FAILED, aborting the test run!')
sys.exit()
if multitest['run']:
# run multiple tests in a separte subdirectory
s = 'x%d' % multitest['count']
subdir = datetime.now().strftime('suspend-'+s+'-%m%d%y-%H%M%S')
os.mkdir(subdir)
for i in range(multitest['count']):
if(i != 0):
print('Waiting %d seconds...' % (multitest['delay']))
time.sleep(multitest['delay'])
print('TEST (%d/%d) START' % (i+1, multitest['count']))
runTest(subdir)
print('TEST (%d/%d) COMPLETE' % (i+1, multitest['count']))
runSummary(subdir, False)
else:
# run the test in the current directory
runTest(".")
|
gpl-2.0
|
huchoi/edx-platform
|
lms/djangoapps/django_comment_client/permissions.py
|
19
|
4384
|
"""
Module for checking permissions with the comment_client backend
"""
import logging
from types import NoneType
from django.core import cache
from opaque_keys.edx.keys import CourseKey
CACHE = cache.get_cache('default')
CACHE_LIFESPAN = 60
def cached_has_permission(user, permission, course_id=None):
"""
Call has_permission if it's not cached. A change in a user's role or
a role's permissions will only become effective after CACHE_LIFESPAN seconds.
"""
assert isinstance(course_id, (NoneType, CourseKey))
key = u"permission_{user_id:d}_{course_id}_{permission}".format(
user_id=user.id, course_id=course_id, permission=permission)
val = CACHE.get(key, None)
if val not in [True, False]:
val = has_permission(user, permission, course_id=course_id)
CACHE.set(key, val, CACHE_LIFESPAN)
return val
def has_permission(user, permission, course_id=None):
assert isinstance(course_id, (NoneType, CourseKey))
for role in user.roles.filter(course_id=course_id):
if role.has_permission(permission):
return True
return False
CONDITIONS = ['is_open', 'is_author']
def _check_condition(user, condition, course_id, data):
def check_open(user, condition, course_id, data):
try:
return data and not data['content']['closed']
except KeyError:
return False
def check_author(user, condition, course_id, data):
try:
return data and data['content']['user_id'] == str(user.id)
except KeyError:
return False
handlers = {
'is_open': check_open,
'is_author': check_author,
}
return handlers[condition](user, condition, course_id, data)
def _check_conditions_permissions(user, permissions, course_id, **kwargs):
"""
Accepts a list of permissions and proceed if any of the permission is valid.
Note that ["can_view", "can_edit"] will proceed if the user has either
"can_view" or "can_edit" permission. To use AND operator in between, wrap them in
a list.
"""
def test(user, per, operator="or"):
if isinstance(per, basestring):
if per in CONDITIONS:
return _check_condition(user, per, course_id, kwargs)
return cached_has_permission(user, per, course_id=course_id)
elif isinstance(per, list) and operator in ["and", "or"]:
results = [test(user, x, operator="and") for x in per]
if operator == "or":
return True in results
elif operator == "and":
return not False in results
return test(user, permissions, operator="or")
VIEW_PERMISSIONS = {
'update_thread': ['edit_content', ['update_thread', 'is_open', 'is_author']],
'create_comment': [["create_comment", "is_open"]],
'delete_thread': ['delete_thread', ['update_thread', 'is_author']],
'update_comment': ['edit_content', ['update_comment', 'is_open', 'is_author']],
'endorse_comment': ['endorse_comment'],
'openclose_thread': ['openclose_thread'],
'create_sub_comment': [['create_sub_comment', 'is_open']],
'delete_comment': ['delete_comment', ['update_comment', 'is_open', 'is_author']],
'vote_for_comment': [['vote', 'is_open']],
'undo_vote_for_comment': [['unvote', 'is_open']],
'vote_for_thread': [['vote', 'is_open']],
'flag_abuse_for_thread': [['vote', 'is_open']],
'un_flag_abuse_for_thread': [['vote', 'is_open']],
'flag_abuse_for_comment': [['vote', 'is_open']],
'un_flag_abuse_for_comment': [['vote', 'is_open']],
'undo_vote_for_thread': [['unvote', 'is_open']],
'pin_thread': ['openclose_thread'],
'un_pin_thread': ['openclose_thread'],
'follow_thread': ['follow_thread'],
'follow_commentable': ['follow_commentable'],
'follow_user': ['follow_user'],
'unfollow_thread': ['unfollow_thread'],
'unfollow_commentable': ['unfollow_commentable'],
'unfollow_user': ['unfollow_user'],
'create_thread': ['create_thread'],
}
def check_permissions_by_view(user, course_id, content, name):
assert isinstance(course_id, CourseKey)
try:
p = VIEW_PERMISSIONS[name]
except KeyError:
logging.warning("Permission for view named %s does not exist in permissions.py" % name)
return _check_conditions_permissions(user, p, course_id, content=content)
|
agpl-3.0
|
jordanemedlock/psychtruths
|
temboo/core/Library/Zillow/GetZestimate.py
|
5
|
3437
|
# -*- coding: utf-8 -*-
###############################################################################
#
# GetZestimate
# Retrieve estimate information for a specified property.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetZestimate(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetZestimate Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetZestimate, self).__init__(temboo_session, '/Library/Zillow/GetZestimate')
def new_input_set(self):
return GetZestimateInputSet()
def _make_result_set(self, result, path):
return GetZestimateResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetZestimateChoreographyExecution(session, exec_id, path)
class GetZestimateInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetZestimate
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_RentEstimate(self, value):
"""
Set the value of the RentEstimate input for this Choreo. ((optional, boolean) Set to 1 (true) to enable. Defaults to 0 (false).)
"""
super(GetZestimateInputSet, self)._set_input('RentEstimate', value)
def set_ZPID(self, value):
"""
Set the value of the ZPID input for this Choreo. ((required, integer) Enter a Zillow Property ID for the property being queried.)
"""
super(GetZestimateInputSet, self)._set_input('ZPID', value)
def set_ZWSID(self, value):
"""
Set the value of the ZWSID input for this Choreo. ((required, string) Enter a Zillow Web Service Identifier (ZWS ID).)
"""
super(GetZestimateInputSet, self)._set_input('ZWSID', value)
class GetZestimateResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetZestimate Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((xml) The response from Zillow.)
"""
return self._output.get('Response', None)
class GetZestimateChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetZestimateResultSet(response, path)
|
apache-2.0
|
nagyistoce/edx-platform
|
lms/djangoapps/dashboard/support.py
|
50
|
4528
|
"""
Views for support dashboard
"""
import logging
from django.contrib.auth.models import User
from django.views.generic.edit import FormView
from django.views.generic.base import TemplateView
from django.utils.translation import ugettext as _
from django.http import HttpResponseRedirect
from django.contrib import messages
from django import forms
from student.models import CourseEnrollment
from opaque_keys.edx.keys import CourseKey
from opaque_keys import InvalidKeyError
from opaque_keys.edx.locations import SlashSeparatedCourseKey
log = logging.getLogger(__name__)
class RefundForm(forms.Form):
"""
Form for manual refunds
"""
user = forms.EmailField(label=_("Email Address"), required=True)
course_id = forms.CharField(label=_("Course ID"), required=True)
confirmed = forms.CharField(widget=forms.HiddenInput, required=False)
def clean_user(self):
"""
validate user field
"""
user_email = self.cleaned_data['user']
try:
user = User.objects.get(email=user_email)
except User.DoesNotExist:
raise forms.ValidationError(_("User not found"))
return user
def clean_course_id(self):
"""
validate course id field
"""
course_id = self.cleaned_data['course_id']
try:
course_key = CourseKey.from_string(course_id)
except InvalidKeyError:
try:
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
except InvalidKeyError:
raise forms.ValidationError(_("Invalid course id"))
return course_key
def clean(self):
"""
clean form
"""
user, course_id = self.cleaned_data.get('user'), self.cleaned_data.get('course_id')
if user and course_id:
self.cleaned_data['enrollment'] = enrollment = CourseEnrollment.get_or_create_enrollment(user, course_id)
if enrollment.refundable():
raise forms.ValidationError(_("Course {course_id} not past the refund window.").format(course_id=course_id))
try:
self.cleaned_data['cert'] = enrollment.certificateitem_set.filter(mode='verified', status='purchased')[0]
except IndexError:
raise forms.ValidationError(_("No order found for {user} in course {course_id}").format(user=user, course_id=course_id))
return self.cleaned_data
def is_valid(self):
"""
returns whether form is valid
"""
is_valid = super(RefundForm, self).is_valid()
if is_valid and self.cleaned_data.get('confirmed') != 'true':
# this is a two-step form: first look up the data, then issue the refund.
# first time through, set the hidden "confirmed" field to true and then redisplay the form
# second time through, do the unenrollment/refund.
data = dict(self.data.items())
self.cleaned_data['confirmed'] = data['confirmed'] = 'true'
self.data = data
is_valid = False
return is_valid
class SupportDash(TemplateView):
"""
Support dashboard view
"""
template_name = 'dashboard/support.html'
class Refund(FormView):
"""
Refund form view
"""
template_name = 'dashboard/_dashboard_refund.html'
form_class = RefundForm
success_url = '/support/'
def get_context_data(self, **kwargs):
"""
extra context data to add to page
"""
form = getattr(kwargs['form'], 'cleaned_data', {})
if form.get('confirmed') == 'true':
kwargs['cert'] = form.get('cert')
kwargs['enrollment'] = form.get('enrollment')
return kwargs
def form_valid(self, form):
"""
unenrolls student, issues refund
"""
user = form.cleaned_data['user']
course_id = form.cleaned_data['course_id']
enrollment = form.cleaned_data['enrollment']
cert = form.cleaned_data['cert']
enrollment.can_refund = True
enrollment.update_enrollment(is_active=False)
log.info(u"%s manually refunded %s %s", self.request.user, user, course_id)
messages.success(self.request, _("Unenrolled {user} from {course_id}").format(user=user, course_id=course_id))
messages.success(self.request, _("Refunded {cost} for order id {order_id}").format(cost=cert.unit_cost, order_id=cert.order.id))
return HttpResponseRedirect('/support/refund/')
|
agpl-3.0
|
ruslanloman/nova
|
nova/network/ldapdns.py
|
68
|
13225
|
# Copyright 2012 Andrew Bogott for the Wikimedia Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
try:
import ldap
except ImportError:
# This module needs to be importable despite ldap not being a requirement
ldap = None
import time
from oslo_config import cfg
from oslo_log import log as logging
from nova import exception
from nova.i18n import _, _LW
from nova.network import dns_driver
from nova import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
ldap_dns_opts = [
cfg.StrOpt('ldap_dns_url',
default='ldap://ldap.example.com:389',
help='URL for LDAP server which will store DNS entries'),
cfg.StrOpt('ldap_dns_user',
default='uid=admin,ou=people,dc=example,dc=org',
help='User for LDAP DNS'),
cfg.StrOpt('ldap_dns_password',
default='password',
help='Password for LDAP DNS',
secret=True),
cfg.StrOpt('ldap_dns_soa_hostmaster',
default='hostmaster@example.org',
help='Hostmaster for LDAP DNS driver Statement of Authority'),
cfg.MultiStrOpt('ldap_dns_servers',
default=['dns.example.org'],
help='DNS Servers for LDAP DNS driver'),
cfg.StrOpt('ldap_dns_base_dn',
default='ou=hosts,dc=example,dc=org',
help='Base DN for DNS entries in LDAP'),
cfg.StrOpt('ldap_dns_soa_refresh',
default='1800',
help='Refresh interval (in seconds) for LDAP DNS driver '
'Statement of Authority'),
cfg.StrOpt('ldap_dns_soa_retry',
default='3600',
help='Retry interval (in seconds) for LDAP DNS driver '
'Statement of Authority'),
cfg.StrOpt('ldap_dns_soa_expiry',
default='86400',
help='Expiry interval (in seconds) for LDAP DNS driver '
'Statement of Authority'),
cfg.StrOpt('ldap_dns_soa_minimum',
default='7200',
help='Minimum interval (in seconds) for LDAP DNS driver '
'Statement of Authority'),
]
CONF.register_opts(ldap_dns_opts)
# Importing ldap.modlist breaks the tests for some reason,
# so this is an abbreviated version of a function from
# there.
def create_modlist(newattrs):
modlist = []
for attrtype in newattrs.keys():
utf8_vals = []
for val in newattrs[attrtype]:
utf8_vals.append(utils.utf8(val))
newattrs[attrtype] = utf8_vals
modlist.append((attrtype, newattrs[attrtype]))
return modlist
class DNSEntry(object):
def __init__(self, ldap_object):
"""ldap_object is an instance of ldap.LDAPObject.
It should already be initialized and bound before
getting passed in here.
"""
self.lobj = ldap_object
self.ldap_tuple = None
self.qualified_domain = None
@classmethod
def _get_tuple_for_domain(cls, lobj, domain):
entry = lobj.search_s(CONF.ldap_dns_base_dn, ldap.SCOPE_SUBTREE,
'(associatedDomain=%s)' % utils.utf8(domain))
if not entry:
return None
if len(entry) > 1:
LOG.warning(_LW("Found multiple matches for domain "
"%(domain)s.\n%(entry)s"),
domain, entry)
return entry[0]
@classmethod
def _get_all_domains(cls, lobj):
entries = lobj.search_s(CONF.ldap_dns_base_dn,
ldap.SCOPE_SUBTREE, '(sOARecord=*)')
domains = []
for entry in entries:
domain = entry[1].get('associatedDomain')
if domain:
domains.append(domain[0])
return domains
def _set_tuple(self, tuple):
self.ldap_tuple = tuple
def _qualify(self, name):
return '%s.%s' % (name, self.qualified_domain)
def _dequalify(self, name):
z = ".%s" % self.qualified_domain
if name.endswith(z):
dequalified = name[0:name.rfind(z)]
else:
LOG.warning(_LW("Unable to dequalify. %(name)s is not in "
"%(domain)s.\n"),
{'name': name,
'domain': self.qualified_domain})
dequalified = None
return dequalified
def _dn(self):
return self.ldap_tuple[0]
dn = property(_dn)
def _rdn(self):
return self.dn.partition(',')[0]
rdn = property(_rdn)
class DomainEntry(DNSEntry):
@classmethod
def _soa(cls):
date = time.strftime('%Y%m%d%H%M%S')
soa = '%s %s %s %s %s %s %s' % (
CONF.ldap_dns_servers[0],
CONF.ldap_dns_soa_hostmaster,
date,
CONF.ldap_dns_soa_refresh,
CONF.ldap_dns_soa_retry,
CONF.ldap_dns_soa_expiry,
CONF.ldap_dns_soa_minimum)
return utils.utf8(soa)
@classmethod
def create_domain(cls, lobj, domain):
"""Create a new domain entry, and return an object that wraps it."""
entry = cls._get_tuple_for_domain(lobj, domain)
if entry:
raise exception.FloatingIpDNSExists(name=domain, domain='')
newdn = 'dc=%s,%s' % (domain, CONF.ldap_dns_base_dn)
attrs = {'objectClass': ['domainrelatedobject', 'dnsdomain',
'domain', 'dcobject', 'top'],
'sOARecord': [cls._soa()],
'associatedDomain': [domain],
'dc': [domain]}
lobj.add_s(newdn, create_modlist(attrs))
return DomainEntry(lobj, domain)
def __init__(self, ldap_object, domain):
super(DomainEntry, self).__init__(ldap_object)
entry = self._get_tuple_for_domain(self.lobj, domain)
if not entry:
raise exception.NotFound()
self._set_tuple(entry)
assert(entry[1]['associatedDomain'][0] == domain)
self.qualified_domain = domain
def delete(self):
"""Delete the domain that this entry refers to."""
entries = self.lobj.search_s(self.dn,
ldap.SCOPE_SUBTREE,
'(aRecord=*)')
for entry in entries:
self.lobj.delete_s(entry[0])
self.lobj.delete_s(self.dn)
def update_soa(self):
mlist = [(ldap.MOD_REPLACE, 'sOARecord', self._soa())]
self.lobj.modify_s(self.dn, mlist)
def subentry_with_name(self, name):
entry = self.lobj.search_s(self.dn, ldap.SCOPE_SUBTREE,
'(associatedDomain=%s.%s)' %
(utils.utf8(name),
utils.utf8(self.qualified_domain)))
if entry:
return HostEntry(self, entry[0])
else:
return None
def subentries_with_ip(self, ip):
entries = self.lobj.search_s(self.dn, ldap.SCOPE_SUBTREE,
'(aRecord=%s)' % utils.utf8(ip))
objs = []
for entry in entries:
if 'associatedDomain' in entry[1]:
objs.append(HostEntry(self, entry))
return objs
def add_entry(self, name, address):
if self.subentry_with_name(name):
raise exception.FloatingIpDNSExists(name=name,
domain=self.qualified_domain)
entries = self.subentries_with_ip(address)
if entries:
# We already have an ldap entry for this IP, so we just
# need to add the new name.
existingdn = entries[0].dn
self.lobj.modify_s(existingdn, [(ldap.MOD_ADD,
'associatedDomain',
utils.utf8(self._qualify(name)))])
return self.subentry_with_name(name)
else:
# We need to create an entirely new entry.
newdn = 'dc=%s,%s' % (name, self.dn)
attrs = {'objectClass': ['domainrelatedobject', 'dnsdomain',
'domain', 'dcobject', 'top'],
'aRecord': [address],
'associatedDomain': [self._qualify(name)],
'dc': [name]}
self.lobj.add_s(newdn, create_modlist(attrs))
return self.subentry_with_name(name)
def remove_entry(self, name):
entry = self.subentry_with_name(name)
if not entry:
raise exception.NotFound()
entry.remove_name(name)
self.update_soa()
class HostEntry(DNSEntry):
def __init__(self, parent, tuple):
super(HostEntry, self).__init__(parent.lobj)
self.parent_entry = parent
self._set_tuple(tuple)
self.qualified_domain = parent.qualified_domain
def remove_name(self, name):
names = self.ldap_tuple[1]['associatedDomain']
if not names:
raise exception.NotFound()
if len(names) > 1:
# We just have to remove the requested domain.
self.lobj.modify_s(self.dn, [(ldap.MOD_DELETE, 'associatedDomain',
self._qualify(utils.utf8(name)))])
if (self.rdn[1] == name):
# We just removed the rdn, so we need to move this entry.
names.remove(self._qualify(name))
newrdn = 'dc=%s' % self._dequalify(names[0])
self.lobj.modrdn_s(self.dn, [newrdn])
else:
# We should delete the entire record.
self.lobj.delete_s(self.dn)
def modify_address(self, name, address):
names = self.ldap_tuple[1]['associatedDomain']
if not names:
raise exception.NotFound()
if len(names) == 1:
self.lobj.modify_s(self.dn, [(ldap.MOD_REPLACE, 'aRecord',
[utils.utf8(address)])])
else:
self.remove_name(name)
self.parent.add_entry(name, address)
def _names(self):
names = []
for domain in self.ldap_tuple[1]['associatedDomain']:
names.append(self._dequalify(domain))
return names
names = property(_names)
def _ip(self):
ip = self.ldap_tuple[1]['aRecord'][0]
return ip
ip = property(_ip)
def _parent(self):
return self.parent_entry
parent = property(_parent)
class LdapDNS(dns_driver.DNSDriver):
"""Driver for PowerDNS using ldap as a back end.
This driver assumes ldap-method=strict, with all domains
in the top-level, aRecords only.
"""
def __init__(self):
if not ldap:
raise ImportError(_('ldap not installed'))
self.lobj = ldap.initialize(CONF.ldap_dns_url)
self.lobj.simple_bind_s(CONF.ldap_dns_user,
CONF.ldap_dns_password)
def get_domains(self):
return DomainEntry._get_all_domains(self.lobj)
def create_entry(self, name, address, type, domain):
if type.lower() != 'a':
raise exception.InvalidInput(_("This driver only supports "
"type 'a' entries."))
dEntry = DomainEntry(self.lobj, domain)
dEntry.add_entry(name, address)
def delete_entry(self, name, domain):
dEntry = DomainEntry(self.lobj, domain)
dEntry.remove_entry(name)
def get_entries_by_address(self, address, domain):
try:
dEntry = DomainEntry(self.lobj, domain)
except exception.NotFound:
return []
entries = dEntry.subentries_with_ip(address)
names = []
for entry in entries:
names.extend(entry.names)
return names
def get_entries_by_name(self, name, domain):
try:
dEntry = DomainEntry(self.lobj, domain)
except exception.NotFound:
return []
nEntry = dEntry.subentry_with_name(name)
if nEntry:
return [nEntry.ip]
def modify_address(self, name, address, domain):
dEntry = DomainEntry(self.lobj, domain)
nEntry = dEntry.subentry_with_name(name)
nEntry.modify_address(name, address)
def create_domain(self, domain):
DomainEntry.create_domain(self.lobj, domain)
def delete_domain(self, domain):
dEntry = DomainEntry(self.lobj, domain)
dEntry.delete()
def delete_dns_file(self):
LOG.warning(_LW("This shouldn't be getting called except during "
"testing."))
pass
|
apache-2.0
|
hronoses/vispy
|
vispy/geometry/tests/test_generation.py
|
4
|
1541
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
import numpy as np
from numpy.testing import assert_array_equal, assert_allclose
from vispy.testing import run_tests_if_main
from vispy.geometry import (create_box, create_cube, create_cylinder,
create_sphere, create_plane)
def test_box():
"""Test box function"""
vertices, filled, outline = create_box()
assert_array_equal(np.arange(len(vertices)), np.unique(filled))
assert_array_equal(np.arange(len(vertices)), np.unique(outline))
def test_cube():
"""Test cube function"""
vertices, filled, outline = create_cube()
assert_array_equal(np.arange(len(vertices)), np.unique(filled))
assert_array_equal(np.arange(len(vertices)), np.unique(outline))
def test_sphere():
"""Test sphere function"""
md = create_sphere(10, 20, radius=10)
radii = np.sqrt((md.get_vertices() ** 2).sum(axis=1))
assert_allclose(radii, np.ones_like(radii) * 10)
def test_cylinder():
"""Test cylinder function"""
md = create_cylinder(10, 20, radius=[10, 10])
radii = np.sqrt((md.get_vertices()[:, :2] ** 2).sum(axis=1))
assert_allclose(radii, np.ones_like(radii) * 10)
def test_plane():
"""Test plane function"""
vertices, filled, outline = create_plane()
assert_array_equal(np.arange(len(vertices)), np.unique(filled))
assert_array_equal(np.arange(len(vertices)), np.unique(outline))
run_tests_if_main()
|
bsd-3-clause
|
stephane-caron/rss-2015
|
lib/pymanoid_sage/ik.py
|
1
|
7773
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Stephane Caron <stephane.caron@normalesup.org>
#
# This file is part of pymanoid.
#
# pymanoid is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# pymanoid is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# pymanoid. If not, see <http://www.gnu.org/licenses/>.
import cvxopt
import cvxopt.solvers
import numpy
import pymanoid_sage
import time
import vector
from numpy import array, dot, eye, hstack, vstack, zeros
cvxopt.solvers.options['show_progress'] = False # disable cvxopt output
CONV_THRES = 1e-2
DEBUG = False
DOF_SCALE = 0.8 # additional scaling to avoid joint-limit saturation
GAIN = 1.
DOF_LIM_GAIN = 0.05 # NB: this can act as velocity limiter if not set properly
MAX_ITER = 150
def full_to_active(x, active_dofs):
x_act = numpy.zeros(len(active_dofs))
for i, dof in enumerate(active_dofs):
x_act[i] = x[dof.index]
return x_act
class IKError(Exception):
def __init__(self, msg=None, q=None):
self.msg = msg
self.q = q
def __str__(self):
return self.msg
class SelfCollides(Exception):
def __init__(self, hrp, q):
self.msg = str(hrp.last_collision)
self.q = q
def __str__(self):
return self.msg
class KinematicTask(object):
def __init__(self, f, J):
self.f = f
self.J = J
class LinkFrameTask(KinematicTask):
"""
Enforce a given pose for a frame attached to the link. The origin of the
frame is taken as the point of coordinates local_origin in the link's
reference frame (RF). The orientation is the same as that of the RF.
"""
def __init__(self, robot, active_dofs, link, local_origin, target_pose):
active_indexes = [dof.index for dof in active_dofs]
index = link.GetIndex()
def f():
T = link.GetTransform()
pose = link.GetTransformPose()
pose[4:] += dot(T[0:3, 0:3], local_origin)
return pose - target_pose
def J():
T = link.GetTransform()
pose = link.GetTransformPose()
pose[4:] += dot(T[0:3, 0:3], local_origin)
rot, pos = pose[:4], pose[4:]
J_trans = robot.rave.CalculateJacobian(index, pos)
J_rot = robot.rave.CalculateRotationJacobian(index, rot)
J_full = numpy.vstack([J_rot, J_trans])
# NB: vstack has same order as GetTransformPose()
return J_full[:, active_indexes]
super(LinkFrameTask, self).__init__(f, J)
class COMTask(KinematicTask):
"""Enforce a given projection of the COM on a plane floor."""
def __init__(self, robot, active_dofs, target_com):
assert target_com.shape == (3,)
active_indexes = [dof.index for dof in active_dofs]
f = lambda: robot.compute_com() - target_com
J = lambda: robot.compute_com_jacobian()[:, active_indexes]
super(COMTask, self).__init__(f, J)
self.target = target_com
class PrioritizedKinematics(object):
def __init__(self, robot, active_dofs):
self.active_dofs = active_dofs
self.active_indexes = [dof.index for dof in self.active_dofs]
self.nb_active_dof = len(active_dofs)
self.robot = robot
self.tasks = []
def append_link_frame_task(self, link, local_origin, target_pose):
new_task = LinkFrameTask(
self.robot, self.active_dofs, link, local_origin, target_pose)
return self.tasks.append(new_task)
def append_com_task(self, target_com):
new_task = COMTask(self.robot, self.active_dofs, target_com)
return self.tasks.append(new_task)
def show_debug_info(self, itnum):
conv_vect = array([vector.norm(task.f()) for task in self.tasks])
conv_str = ["%10.8f" % x for x in conv_vect]
print " %4d: %s" % (itnum, ' '.join(conv_str))
for task in self.tasks:
if type(task) is COMTask:
com = self.robot.compute_com(self.robot.rave.GetDOFValues())
pymanoid_sage.rave.display_box(
self.robot.env, com, box_id="COM", color='g',
thickness=0.01)
pymanoid_sage.rave.display_box(
self.robot.env, task.target, box_id='Target', color='b',
thickness=0.01)
def get_active_dof_limits(self):
q_max = array([dof.ulim for dof in self.active_dofs])
q_min = array([dof.llim for dof in self.active_dofs])
q_avg = .5 * (q_max + q_min)
q_dev = .5 * (q_max - q_min)
q_max = q_avg + DOF_SCALE * q_dev
q_min = q_avg - DOF_SCALE * q_dev
return q_max, q_min
@property
def converged(self):
conv_norms = (vector.norm(task.f()) for task in self.tasks)
return max(conv_norms) < CONV_THRES
def solve_in_place(self, q_start):
self.robot.rave.SetDOFValues(q_start)
self.robot.rave.SetActiveDOFs(self.active_indexes)
q = full_to_active(q_start, self.active_dofs)
q_max, q_min = self.get_active_dof_limits()
I = eye(self.nb_active_dof)
for itnum in xrange(MAX_ITER):
if self.converged:
break
if DEBUG:
self.show_debug_info(itnum)
time.sleep(0.1)
dq = zeros(self.nb_active_dof)
dq_max = DOF_LIM_GAIN * (q_max - q)
dq_min = DOF_LIM_GAIN * (q_min - q)
Jstack, bstack = None, None
for i, task in enumerate(self.tasks):
Ji, bi = task.J(), -GAIN * task.f()
# min. || Ji * dq - bi ||
# s.t. dq_min <= dq <= dq_max
# and (Jj * dq) stays the same for all j < i
qp_P = cvxopt.matrix(dot(Ji.T, Ji))
qp_q = cvxopt.matrix(dot(-bi.T, Ji))
qp_G = cvxopt.matrix(vstack([+I, -I]))
qp_h = cvxopt.matrix(hstack([dq_max, -dq_min]))
qp_args = [qp_P, qp_q, qp_G, qp_h]
if Jstack is not None:
qp_A = cvxopt.matrix(Jstack)
qp_b = cvxopt.matrix(bstack)
qp_args.extend([qp_A, qp_b])
qp_x = cvxopt.solvers.qp(*qp_args)['x']
dq = array(qp_x).reshape((I.shape[0],))
Js, bs = Ji, dot(Ji, dq)
if type(task) is LinkFrameTask:
# removing one angular coord since cvxopt does not support
# redundancy in its equality constraints
Js, bs = Js[1:], bs[1:]
Jstack = Js if Jstack is None else vstack([Jstack, Js])
bstack = bs if bstack is None else hstack([bstack, bs])
q += dq
assert all(dq <= dq_max)
assert all(dq_min <= dq)
self.robot.rave.SetActiveDOFValues(q)
return self.robot.rave.GetDOFValues()
def solve(self, q_start):
if DEBUG:
q = self.solve_in_place(q_start)
else:
with self.robot.rave:
q = self.solve_in_place(q_start)
if self.robot.self_collides(q):
raise SelfCollides(self.robot, q)
elif not self.converged:
raise IKError("did not converge", q)
return q
|
gpl-3.0
|
Harmon758/Harmonbot
|
Discord/utilities/logging.py
|
1
|
5836
|
import asyncio
import datetime
import logging
import logging.handlers
import sys
from aiohttp.web_log import AccessLogger
from utilities.database import create_database_connection
sys.path.insert(0, "..")
from units.files import create_folder
sys.path.pop(0)
class ConsoleLogger(object):
'''Console Logger'''
def __init__(self, log, prefix = ""):
self.log = log
self.prefix = prefix
self.console = sys.__stdout__
self.console.reconfigure(encoding = "UTF-8")
def write(self, message):
self.console.write(message)
if not message.isspace():
self.log(self.prefix + message)
def flush(self):
pass
def initialize_logging(data_path):
path = data_path + "/logs/"
# Create log folders
create_folder(path + "aiohttp")
create_folder(path + "discord")
# Console log
console_logger = logging.getLogger("console")
console_logger.setLevel(logging.DEBUG)
console_logger_handler = logging.FileHandler(filename = path + "console.log", encoding = "UTF-8", mode = 'a')
console_logger_handler.setFormatter(logging.Formatter("%(asctime)s: %(message)s"))
console_logger.addHandler(console_logger_handler)
sys.stdout = ConsoleLogger(console_logger.info)
## sys.stderr = ConsoleLogger(errors_logger.error, "Error")
## sys.stderr = ConsoleLogger(console_logger.error)
# Errors/Exceptions logs
# TODO: rename to exceptions?
errors_logger = logging.getLogger("errors")
errors_logger.setLevel(logging.DEBUG)
errors_logger_handler_1 = logging.FileHandler(filename = path + "errors.log", encoding = "UTF-8", mode = 'a')
errors_logger_handler_1.setFormatter(logging.Formatter("\n\n%(asctime)s\n%(message)s"))
errors_logger_handler_2 = logging.FileHandler(filename = path + "unresolved_errors.log", encoding = "UTF-8", mode = 'a')
errors_logger_handler_2.setFormatter(logging.Formatter("\n\n%(asctime)s\n%(message)s"))
errors_logger.addHandler(errors_logger_handler_1)
errors_logger.addHandler(errors_logger_handler_2)
def log_exception(exc_type, exc_value, exc_traceback):
sys.__excepthook__(exc_type, exc_value, exc_traceback)
errors_logger.error("Uncaught exception\n", exc_info = (exc_type, exc_value, exc_traceback))
sys.excepthook = log_exception
# discord.py log
discord_logger = logging.getLogger("discord")
discord_logger.setLevel(logging.INFO)
discord_handler = logging.handlers.TimedRotatingFileHandler(filename = path + "discord/discord.log", when = "midnight", backupCount = 3650000, encoding = "UTF-8")
discord_handler.setFormatter(logging.Formatter("%(asctime)s:%(levelname)s:%(name)s: %(message)s"))
discord_logger.addHandler(discord_handler)
# handler to output to console
## console_handler = logging.StreamHandler(sys.stdout)
# not used by aiohttp server log
# aiohttp logs
# aiohttp server access log
# replaced by AiohttpAccessLogger logging to database
## TODO: Rotate
## aiohttp_access_logger = logging.getLogger("aiohttp.access")
## aiohttp_access_logger.setLevel(logging.DEBUG)
## aiohttp_access_logger_handler = logging.FileHandler(filename = path + "aiohttp/access.log",
## encoding = "UTF-8", mode = 'a')
## aiohttp_access_logger_handler.setFormatter(logging.Formatter("%(asctime)s: %(message)s"))
## aiohttp_access_logger.addHandler(aiohttp_access_logger_handler)
# aiohttp client log
aiohttp_client_logger = logging.getLogger("aiohttp.client")
aiohttp_client_logger_handler = logging.FileHandler(filename = path + "aiohttp/client.log", encoding = "UTF-8", mode = 'a')
aiohttp_client_logger_handler.setFormatter(logging.Formatter("%(asctime)s: %(message)s"))
aiohttp_client_logger.addHandler(aiohttp_client_logger_handler)
# aiohttp server log
aiohttp_server_logger = logging.getLogger("aiohttp.server")
aiohttp_server_logger.setLevel(logging.DEBUG)
aiohttp_server_logger_handler = logging.FileHandler(filename = path + "aiohttp/server.log", encoding = "UTF-8", mode = 'a')
aiohttp_server_logger_handler.setFormatter(logging.Formatter("%(asctime)s: %(message)s"))
aiohttp_server_logger.addHandler(aiohttp_server_logger_handler)
## aiohttp_server_logger.addHandler(console_handler)
# aiohttp web log
aiohttp_web_logger = logging.getLogger("aiohttp.web")
aiohttp_web_logger.setLevel(logging.DEBUG) # Necessary?
aiohttp_web_logger_handler = logging.FileHandler(filename = path + "aiohttp/web.log", encoding = "UTF-8", mode = 'a')
aiohttp_web_logger_handler.setFormatter(logging.Formatter("%(asctime)s: %(message)s"))
aiohttp_web_logger.addHandler(aiohttp_web_logger_handler)
class AiohttpAccessLogger(AccessLogger):
def log(self, request, response, time):
# super().log(request, response, time)
asyncio.create_task(self.log_to_database(request, response, time), name = "Log aiohttp access to database")
async def log_to_database(self, request, response, time):
async with create_database_connection() as connection:
await connection.execute(
"""
INSERT INTO aiohttp.access_log
VALUES ($1, $2, $3, $4, $5, $6, $7)
""",
datetime.datetime.now(datetime.timezone.utc) - datetime.timedelta(seconds = time),
self._format_a(request, response, time),
self._format_r(request, response, time),
response.status, response.body_length,
self._format_i("Referer", request, response, time),
self._format_i("User-Agent", request, response, time).encode("UTF-8", "backslashreplace").decode("UTF-8")
)
async def initialize_aiohttp_access_logging(database):
await database.execute("CREATE SCHEMA IF NOT EXISTS aiohttp")
await database.execute(
"""
CREATE TABLE IF NOT EXISTS aiohttp.access_log (
request_start_timestamp TIMESTAMPTZ PRIMARY KEY,
remote_ip_address TEXT,
request_first_line TEXT,
response_status_code INT,
response_bytes_size INT,
request_referer TEXT,
request_user_agent TEXT
)
"""
)
|
mit
|
procangroup/edx-platform
|
openedx/core/djangoapps/programs/management/commands/backpopulate_program_credentials.py
|
12
|
4490
|
"""Management command for backpopulating missing program credentials."""
import logging
from collections import namedtuple
from django.contrib.sites.models import Site
from django.core.management import BaseCommand
from django.db.models import Q
from opaque_keys.edx.keys import CourseKey
from lms.djangoapps.certificates.models import CertificateStatuses, GeneratedCertificate # pylint: disable=import-error
from course_modes.models import CourseMode
from openedx.core.djangoapps.catalog.utils import get_programs
from openedx.core.djangoapps.programs.tasks.v1.tasks import award_program_certificates
# TODO: Log to console, even with debug mode disabled?
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
CourseRun = namedtuple('CourseRun', ['key', 'type'])
class Command(BaseCommand):
"""Management command for backpopulating missing program credentials.
The command's goal is to pass a narrow subset of usernames to an idempotent
Celery task for further (parallelized) processing.
"""
help = 'Backpopulate missing program credentials.'
course_runs = None
usernames = None
def add_arguments(self, parser):
parser.add_argument(
'-c', '--commit',
action='store_true',
dest='commit',
default=False,
help='Submit tasks for processing.'
)
def handle(self, *args, **options):
logger.info('Loading programs from the catalog.')
self._load_course_runs()
logger.info('Looking for users who may be eligible for a program certificate.')
self._load_usernames()
if options.get('commit'):
logger.info('Enqueuing program certification tasks for %d candidates.', len(self.usernames))
else:
logger.info(
'Found %d candidates. To enqueue program certification tasks, pass the -c or --commit flags.',
len(self.usernames)
)
return
succeeded, failed = 0, 0
for username in self.usernames:
try:
award_program_certificates.delay(username)
except: # pylint: disable=bare-except
failed += 1
logger.exception('Failed to enqueue task for user [%s]', username)
else:
succeeded += 1
logger.debug('Successfully enqueued task for user [%s]', username)
logger.info(
'Done. Successfully enqueued tasks for %d candidates. '
'Failed to enqueue tasks for %d candidates.',
succeeded,
failed
)
def _load_course_runs(self):
"""Find all course runs which are part of a program."""
programs = []
for site in Site.objects.all():
logger.info('Loading programs from the catalog for site %s.', site.domain)
programs.extend(get_programs(site))
self.course_runs = self._flatten(programs)
def _flatten(self, programs):
"""Flatten programs into a set of course runs."""
course_runs = set()
for program in programs:
for course in program['courses']:
for course_run in course['course_runs']:
key = CourseKey.from_string(course_run['key'])
course_runs.add(
CourseRun(key, course_run['type'])
)
return course_runs
def _load_usernames(self):
"""Identify a subset of users who may be eligible for a program certificate.
This is done by finding users who have earned a qualifying certificate in
at least one program course's course run.
"""
status_query = Q(status__in=CertificateStatuses.PASSED_STATUSES)
course_run_query = reduce(
lambda x, y: x | y,
[Q(course_id=course_run.key, mode=course_run.type) for course_run in self.course_runs]
)
# Account for the fact that no-id-professional and professional are equivalent
for course_run in self.course_runs:
if course_run.type == CourseMode.PROFESSIONAL:
course_run_query |= Q(course_id=course_run.key, mode=CourseMode.NO_ID_PROFESSIONAL_MODE)
query = status_query & course_run_query
username_dicts = GeneratedCertificate.eligible_certificates.filter(query).values('user__username').distinct()
self.usernames = [d['user__username'] for d in username_dicts]
|
agpl-3.0
|
raildo/nova
|
nova/tests/unit/cells/test_cells_state_manager.py
|
17
|
11725
|
# Copyright (c) 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For CellStateManager
"""
import time
import mock
from oslo_config import cfg
from oslo_db import exception as db_exc
import six
from nova.cells import state
from nova import db
from nova.db.sqlalchemy import models
from nova import exception
from nova import objects
from nova import test
from nova import utils
FAKE_COMPUTES = [
('host1', 1024, 100, 0, 0),
('host2', 1024, 100, -1, -1),
('host3', 1024, 100, 1024, 100),
('host4', 1024, 100, 300, 30),
]
FAKE_COMPUTES_N_TO_ONE = [
('host1', 1024, 100, 0, 0),
('host1', 1024, 100, -1, -1),
('host2', 1024, 100, 1024, 100),
('host2', 1024, 100, 300, 30),
]
# NOTE(alaski): It's important to have multiple types that end up having the
# same memory and disk requirements. So two types need the same first value,
# and two need the second and third values to add up to the same thing.
FAKE_ITYPES = [
(0, 0, 0),
(50, 12, 13),
(50, 2, 4),
(10, 20, 5),
]
def _create_fake_node(host, total_mem, total_disk, free_mem, free_disk):
return objects.ComputeNode(host=host,
memory_mb=total_mem,
local_gb=total_disk,
free_ram_mb=free_mem,
free_disk_gb=free_disk)
@classmethod
def _fake_service_get_all_by_binary(cls, context, binary):
def _node(host, total_mem, total_disk, free_mem, free_disk):
return objects.Service(host=host, disabled=False)
return [_node(*fake) for fake in FAKE_COMPUTES]
@classmethod
def _fake_compute_node_get_all(cls, context):
return [_create_fake_node(*fake) for fake in FAKE_COMPUTES]
@classmethod
def _fake_compute_node_n_to_one_get_all(cls, context):
return [_create_fake_node(*fake) for fake in FAKE_COMPUTES_N_TO_ONE]
def _fake_cell_get_all(context):
return []
def _fake_instance_type_all(context):
def _type(mem, root, eph):
return {'root_gb': root,
'ephemeral_gb': eph,
'memory_mb': mem}
return [_type(*fake) for fake in FAKE_ITYPES]
class TestCellsStateManager(test.NoDBTestCase):
def setUp(self):
super(TestCellsStateManager, self).setUp()
self.stubs.Set(objects.ComputeNodeList, 'get_all',
_fake_compute_node_get_all)
self.stubs.Set(objects.ServiceList, 'get_by_binary',
_fake_service_get_all_by_binary)
self.stubs.Set(db, 'flavor_get_all', _fake_instance_type_all)
self.stubs.Set(db, 'cell_get_all', _fake_cell_get_all)
def test_cells_config_not_found(self):
self.flags(cells_config='no_such_file_exists.conf', group='cells')
e = self.assertRaises(cfg.ConfigFilesNotFoundError,
state.CellStateManager)
self.assertEqual(['no_such_file_exists.conf'], e.config_files)
@mock.patch.object(cfg.ConfigOpts, 'find_file')
@mock.patch.object(utils, 'read_cached_file')
def test_filemanager_returned(self, mock_read_cached_file, mock_find_file):
mock_find_file.return_value = "/etc/nova/cells.json"
mock_read_cached_file.return_value = (False, six.StringIO({}))
self.flags(cells_config='cells.json', group='cells')
manager = state.CellStateManager()
self.assertIsInstance(manager,
state.CellStateManagerFile)
self.assertRaises(exception.CellsUpdateUnsupported,
manager.cell_create, None, None)
self.assertRaises(exception.CellsUpdateUnsupported,
manager.cell_update, None, None, None)
self.assertRaises(exception.CellsUpdateUnsupported,
manager.cell_delete, None, None)
def test_dbmanager_returned(self):
self.assertIsInstance(state.CellStateManager(),
state.CellStateManagerDB)
def test_capacity_no_reserve(self):
# utilize entire cell
cap = self._capacity(0.0)
cell_free_ram = sum(compute[3] for compute in FAKE_COMPUTES)
self.assertEqual(cell_free_ram, cap['ram_free']['total_mb'])
cell_free_disk = 1024 * sum(compute[4] for compute in FAKE_COMPUTES)
self.assertEqual(cell_free_disk, cap['disk_free']['total_mb'])
self.assertEqual(0, cap['ram_free']['units_by_mb']['0'])
self.assertEqual(0, cap['disk_free']['units_by_mb']['0'])
units = cell_free_ram / 50
self.assertEqual(units, cap['ram_free']['units_by_mb']['50'])
sz = 25 * 1024
units = 5 # 4 on host 3, 1 on host4
self.assertEqual(units, cap['disk_free']['units_by_mb'][str(sz)])
def test_capacity_full_reserve(self):
# reserve the entire cell. (utilize zero percent)
cap = self._capacity(100.0)
cell_free_ram = sum(compute[3] for compute in FAKE_COMPUTES)
self.assertEqual(cell_free_ram, cap['ram_free']['total_mb'])
cell_free_disk = 1024 * sum(compute[4] for compute in FAKE_COMPUTES)
self.assertEqual(cell_free_disk, cap['disk_free']['total_mb'])
self.assertEqual(0, cap['ram_free']['units_by_mb']['0'])
self.assertEqual(0, cap['disk_free']['units_by_mb']['0'])
self.assertEqual(0, cap['ram_free']['units_by_mb']['50'])
sz = 25 * 1024
self.assertEqual(0, cap['disk_free']['units_by_mb'][str(sz)])
def test_capacity_part_reserve(self):
# utilize half the cell's free capacity
cap = self._capacity(50.0)
cell_free_ram = sum(compute[3] for compute in FAKE_COMPUTES)
self.assertEqual(cell_free_ram, cap['ram_free']['total_mb'])
cell_free_disk = 1024 * sum(compute[4] for compute in FAKE_COMPUTES)
self.assertEqual(cell_free_disk, cap['disk_free']['total_mb'])
self.assertEqual(0, cap['ram_free']['units_by_mb']['0'])
self.assertEqual(0, cap['disk_free']['units_by_mb']['0'])
units = 10 # 10 from host 3
self.assertEqual(units, cap['ram_free']['units_by_mb']['50'])
sz = 25 * 1024
units = 2 # 2 on host 3
self.assertEqual(units, cap['disk_free']['units_by_mb'][str(sz)])
def _get_state_manager(self, reserve_percent=0.0):
self.flags(reserve_percent=reserve_percent, group='cells')
return state.CellStateManager()
def _capacity(self, reserve_percent):
state_manager = self._get_state_manager(reserve_percent)
my_state = state_manager.get_my_state()
return my_state.capacities
class TestCellsStateManagerNToOne(TestCellsStateManager):
def setUp(self):
super(TestCellsStateManagerNToOne, self).setUp()
self.stubs.Set(objects.ComputeNodeList, 'get_all',
_fake_compute_node_n_to_one_get_all)
def test_capacity_part_reserve(self):
# utilize half the cell's free capacity
cap = self._capacity(50.0)
cell_free_ram = sum(compute[3] for compute in FAKE_COMPUTES_N_TO_ONE)
self.assertEqual(cell_free_ram, cap['ram_free']['total_mb'])
cell_free_disk = (1024 *
sum(compute[4] for compute in FAKE_COMPUTES_N_TO_ONE))
self.assertEqual(cell_free_disk, cap['disk_free']['total_mb'])
self.assertEqual(0, cap['ram_free']['units_by_mb']['0'])
self.assertEqual(0, cap['disk_free']['units_by_mb']['0'])
units = 6 # 6 from host 2
self.assertEqual(units, cap['ram_free']['units_by_mb']['50'])
sz = 25 * 1024
units = 1 # 1 on host 2
self.assertEqual(units, cap['disk_free']['units_by_mb'][str(sz)])
class TestCellStateManagerException(test.NoDBTestCase):
@mock.patch.object(time, 'sleep')
def test_init_db_error(self, mock_sleep):
class TestCellStateManagerDB(state.CellStateManagerDB):
def __init__(self):
self._cell_data_sync = mock.Mock()
self._cell_data_sync.side_effect = [db_exc.DBError(), []]
super(TestCellStateManagerDB, self).__init__()
test = TestCellStateManagerDB()
mock_sleep.assert_called_once_with(30)
self.assertEqual(2, test._cell_data_sync.call_count)
class TestCellsGetCapacity(TestCellsStateManager):
def setUp(self):
super(TestCellsGetCapacity, self).setUp()
self.capacities = {"ram_free": 1234}
self.state_manager = self._get_state_manager()
cell = models.Cell(name="cell_name")
other_cell = models.Cell(name="other_cell_name")
cell.capacities = self.capacities
other_cell.capacities = self.capacities
self.stubs.Set(self.state_manager, 'child_cells',
{"cell_name": cell,
"other_cell_name": other_cell})
def test_get_cell_capacity_for_all_cells(self):
self.stubs.Set(self.state_manager.my_cell_state, 'capacities',
self.capacities)
capacities = self.state_manager.get_capacities()
self.assertEqual({"ram_free": 3702}, capacities)
def test_get_cell_capacity_for_the_parent_cell(self):
self.stubs.Set(self.state_manager.my_cell_state, 'capacities',
self.capacities)
capacities = self.state_manager.\
get_capacities(self.state_manager.my_cell_state.name)
self.assertEqual({"ram_free": 3702}, capacities)
def test_get_cell_capacity_for_a_cell(self):
self.assertEqual(self.capacities,
self.state_manager.get_capacities(cell_name="cell_name"))
def test_get_cell_capacity_for_non_existing_cell(self):
self.assertRaises(exception.CellNotFound,
self.state_manager.get_capacities,
cell_name="invalid_cell_name")
class FakeCellStateManager(object):
def __init__(self):
self.called = []
def _cell_data_sync(self, force=False):
self.called.append(('_cell_data_sync', force))
class TestSyncDecorators(test.NoDBTestCase):
def test_sync_before(self):
manager = FakeCellStateManager()
def test(inst, *args, **kwargs):
self.assertEqual(manager, inst)
self.assertEqual((1, 2, 3), args)
self.assertEqual(dict(a=4, b=5, c=6), kwargs)
return 'result'
wrapper = state.sync_before(test)
result = wrapper(manager, 1, 2, 3, a=4, b=5, c=6)
self.assertEqual('result', result)
self.assertEqual([('_cell_data_sync', False)], manager.called)
def test_sync_after(self):
manager = FakeCellStateManager()
def test(inst, *args, **kwargs):
self.assertEqual(manager, inst)
self.assertEqual((1, 2, 3), args)
self.assertEqual(dict(a=4, b=5, c=6), kwargs)
return 'result'
wrapper = state.sync_after(test)
result = wrapper(manager, 1, 2, 3, a=4, b=5, c=6)
self.assertEqual('result', result)
self.assertEqual([('_cell_data_sync', True)], manager.called)
|
apache-2.0
|
azaghal/ansible
|
test/units/module_utils/basic/test_run_command.py
|
5
|
10520
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division)
__metaclass__ = type
import errno
from itertools import product
from io import BytesIO
import pytest
from ansible.module_utils._text import to_native
from ansible.module_utils.six import PY2
from ansible.module_utils.compat import selectors
class OpenBytesIO(BytesIO):
"""BytesIO with dummy close() method
So that you can inspect the content after close() was called.
"""
def close(self):
pass
@pytest.fixture
def mock_os(mocker):
def mock_os_chdir(path):
if path == '/inaccessible':
raise OSError(errno.EPERM, "Permission denied: '/inaccessible'")
def mock_os_abspath(path):
if path.startswith('/'):
return path
else:
return os.getcwd.return_value + '/' + path
os = mocker.patch('ansible.module_utils.basic.os')
os.path.expandvars.side_effect = lambda x: x
os.path.expanduser.side_effect = lambda x: x
os.environ = {'PATH': '/bin'}
os.getcwd.return_value = '/home/foo'
os.path.isdir.return_value = True
os.chdir.side_effect = mock_os_chdir
os.path.abspath.side_effect = mock_os_abspath
yield os
class DummyFileObj():
def __init__(self, fileobj):
self.fileobj = fileobj
class SpecialBytesIO(BytesIO):
def __init__(self, *args, **kwargs):
fh = kwargs.pop('fh', None)
super(SpecialBytesIO, self).__init__(*args, **kwargs)
self.fh = fh
def fileno(self):
return self.fh
# We need to do this because some of our tests create a new value for stdout and stderr
# The new value is able to affect the string that is returned by the subprocess stdout and
# stderr but by the time the test gets it, it is too late to change the SpecialBytesIO that
# subprocess.Popen returns for stdout and stderr. If we could figure out how to change those as
# well, then we wouldn't need this.
def __eq__(self, other):
if id(self) == id(other) or self.fh == other.fileno():
return True
return False
class DummyKey:
def __init__(self, fileobj):
self.fileobj = fileobj
@pytest.fixture
def mock_subprocess(mocker):
class MockSelector(selectors.BaseSelector):
def __init__(self):
super(MockSelector, self).__init__()
self._file_objs = []
def register(self, fileobj, events, data=None):
self._file_objs.append(fileobj)
def unregister(self, fileobj):
self._file_objs.remove(fileobj)
def select(self, timeout=None):
ready = []
for file_obj in self._file_objs:
ready.append((DummyKey(subprocess._output[file_obj.fileno()]), selectors.EVENT_READ))
return ready
def get_map(self):
return self._file_objs
def close(self):
super(MockSelector, self).close()
self._file_objs = []
selectors.DefaultSelector = MockSelector
subprocess = mocker.patch('ansible.module_utils.basic.subprocess')
subprocess._output = {mocker.sentinel.stdout: SpecialBytesIO(b'', fh=mocker.sentinel.stdout),
mocker.sentinel.stderr: SpecialBytesIO(b'', fh=mocker.sentinel.stderr)}
cmd = mocker.MagicMock()
cmd.returncode = 0
cmd.stdin = OpenBytesIO()
cmd.stdout = subprocess._output[mocker.sentinel.stdout]
cmd.stderr = subprocess._output[mocker.sentinel.stderr]
subprocess.Popen.return_value = cmd
yield subprocess
@pytest.fixture()
def rc_am(mocker, am, mock_os, mock_subprocess):
am.fail_json = mocker.MagicMock(side_effect=SystemExit)
am._os = mock_os
am._subprocess = mock_subprocess
yield am
class TestRunCommandArgs:
# Format is command as passed to run_command, command to Popen as list, command to Popen as string
ARGS_DATA = (
(['/bin/ls', 'a', 'b', 'c'], [b'/bin/ls', b'a', b'b', b'c'], b'/bin/ls a b c'),
('/bin/ls a " b" "c "', [b'/bin/ls', b'a', b' b', b'c '], b'/bin/ls a " b" "c "'),
)
# pylint bug: https://github.com/PyCQA/pylint/issues/511
# pylint: disable=undefined-variable
@pytest.mark.parametrize('cmd, expected, shell, stdin',
((arg, cmd_str if sh else cmd_lst, sh, {})
for (arg, cmd_lst, cmd_str), sh in product(ARGS_DATA, (True, False))),
indirect=['stdin'])
def test_args(self, cmd, expected, shell, rc_am):
rc_am.run_command(cmd, use_unsafe_shell=shell)
assert rc_am._subprocess.Popen.called
args, kwargs = rc_am._subprocess.Popen.call_args
assert args == (expected, )
assert kwargs['shell'] == shell
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_tuple_as_args(self, rc_am):
with pytest.raises(SystemExit):
rc_am.run_command(('ls', '/'))
assert rc_am.fail_json.called
class TestRunCommandCwd:
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_cwd(self, mocker, rc_am):
rc_am._os.getcwd.return_value = '/old'
rc_am.run_command('/bin/ls', cwd='/new')
assert rc_am._os.chdir.mock_calls == [mocker.call(b'/new'), mocker.call('/old'), ]
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_cwd_relative_path(self, mocker, rc_am):
rc_am._os.getcwd.return_value = '/old'
rc_am.run_command('/bin/ls', cwd='sub-dir')
assert rc_am._os.chdir.mock_calls == [mocker.call(b'/old/sub-dir'), mocker.call('/old'), ]
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_cwd_not_a_dir(self, mocker, rc_am):
rc_am._os.getcwd.return_value = '/old'
rc_am._os.path.isdir.side_effect = lambda d: d != '/not-a-dir'
rc_am.run_command('/bin/ls', cwd='/not-a-dir')
assert rc_am._os.chdir.mock_calls == [mocker.call('/old'), ]
class TestRunCommandPrompt:
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_prompt_bad_regex(self, rc_am):
with pytest.raises(SystemExit):
rc_am.run_command('foo', prompt_regex='[pP)assword:')
assert rc_am.fail_json.called
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_prompt_no_match(self, mocker, rc_am):
rc_am._os._cmd_out[mocker.sentinel.stdout] = BytesIO(b'hello')
(rc, _, _) = rc_am.run_command('foo', prompt_regex='[pP]assword:')
assert rc == 0
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_prompt_match_wo_data(self, mocker, rc_am):
rc_am._subprocess._output = {mocker.sentinel.stdout:
SpecialBytesIO(b'Authentication required!\nEnter password: ',
fh=mocker.sentinel.stdout),
mocker.sentinel.stderr:
SpecialBytesIO(b'', fh=mocker.sentinel.stderr)}
(rc, _, _) = rc_am.run_command('foo', prompt_regex=r'[pP]assword:', data=None)
assert rc == 257
class TestRunCommandRc:
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_check_rc_false(self, rc_am):
rc_am._subprocess.Popen.return_value.returncode = 1
(rc, _, _) = rc_am.run_command('/bin/false', check_rc=False)
assert rc == 1
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_check_rc_true(self, rc_am):
rc_am._subprocess.Popen.return_value.returncode = 1
with pytest.raises(SystemExit):
rc_am.run_command('/bin/false', check_rc=True)
assert rc_am.fail_json.called
args, kwargs = rc_am.fail_json.call_args
assert kwargs['rc'] == 1
class TestRunCommandOutput:
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_text_stdin(self, rc_am):
(rc, stdout, stderr) = rc_am.run_command('/bin/foo', data='hello world')
assert rc_am._subprocess.Popen.return_value.stdin.getvalue() == b'hello world\n'
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_ascii_stdout(self, mocker, rc_am):
rc_am._subprocess._output = {mocker.sentinel.stdout:
SpecialBytesIO(b'hello', fh=mocker.sentinel.stdout),
mocker.sentinel.stderr:
SpecialBytesIO(b'', fh=mocker.sentinel.stderr)}
(rc, stdout, stderr) = rc_am.run_command('/bin/cat hello.txt')
assert rc == 0
# module_utils function. On py3 it returns text and py2 it returns
# bytes because it's returning native strings
assert stdout == 'hello'
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_utf8_output(self, mocker, rc_am):
rc_am._subprocess._output = {mocker.sentinel.stdout:
SpecialBytesIO(u'Žarn§'.encode('utf-8'),
fh=mocker.sentinel.stdout),
mocker.sentinel.stderr:
SpecialBytesIO(u'لرئيسية'.encode('utf-8'),
fh=mocker.sentinel.stderr)}
(rc, stdout, stderr) = rc_am.run_command('/bin/something_ugly')
assert rc == 0
# module_utils function. On py3 it returns text and py2 it returns
# bytes because it's returning native strings
assert stdout == to_native(u'Žarn§')
assert stderr == to_native(u'لرئيسية')
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_run_command_fds(mocker, rc_am):
subprocess_mock = mocker.patch('ansible.module_utils.basic.subprocess')
subprocess_mock.Popen.side_effect = AssertionError
try:
rc_am.run_command('synchronize', pass_fds=(101, 42))
except SystemExit:
pass
if PY2:
assert subprocess_mock.Popen.call_args[1]['close_fds'] is False
assert 'pass_fds' not in subprocess_mock.Popen.call_args[1]
else:
assert subprocess_mock.Popen.call_args[1]['pass_fds'] == (101, 42)
assert subprocess_mock.Popen.call_args[1]['close_fds'] is True
|
gpl-3.0
|
ageron/tensorflow
|
tensorflow/python/training/adagrad_test.py
|
22
|
15078
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for aggregate operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adagrad
class AdagradOptimizerTest(test.TestCase):
def doTestBasic(self,
use_locking=False,
use_resource=False,
use_callable_params=False):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
if use_resource:
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
else:
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
learning_rate = lambda: 3.0
if not use_callable_params:
learning_rate = learning_rate()
ada_opt = adagrad.AdagradOptimizer(
learning_rate, initial_accumulator_value=0.1, use_locking=use_locking)
if not context.executing_eagerly():
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllClose([1.0, 2.0], v0_val)
self.assertAllClose([3.0, 4.0], v1_val)
# Run 3 steps of adagrad
for _ in range(3):
if not context.executing_eagerly():
self.evaluate(ada_update)
else:
ada_opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
# Validate updated params
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType(
np.array([-1.6026098728179932, -0.6026098728179932]), v0_val)
self.assertAllCloseAccordingToType(
np.array([2.715679168701172, 3.715679168701172]), v1_val)
def testBasic(self):
self.doTestBasic(use_locking=False)
@test_util.run_in_graph_and_eager_modes(reset_test=True)
def testBasicResource(self):
self.doTestBasic(use_locking=False, use_resource=True)
def testBasicCallableParams(self):
with context.eager_mode():
self.doTestBasic(
use_locking=False, use_resource=True, use_callable_params=True)
def testBasicLocked(self):
self.doTestBasic(use_locking=True)
@test_util.run_deprecated_v1
def testMinimizeSparseResourceVariable(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = resource_variable_ops.ResourceVariable(
[[1.0, 2.0], [3.0, 4.0]], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
loss = pred * pred
sgd_op = adagrad.AdagradOptimizer(1.0).minimize(loss)
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0], [3.0, 4.0]],
self.evaluate(var0))
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType([[0, 1], [3, 4]],
self.evaluate(var0),
atol=0.01)
@test_util.run_deprecated_v1
def testTensorLearningRate(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
ada_opt = adagrad.AdagradOptimizer(
constant_op.constant(3.0), initial_accumulator_value=0.1)
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 3 steps of adagrad
for _ in range(3):
ada_update.run()
# Validate updated params
self.assertAllCloseAccordingToType(
np.array([-1.6026098728179932, -0.6026098728179932]),
self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([2.715679168701172, 3.715679168701172]),
self.evaluate(var1))
@test_util.run_deprecated_v1
def testSparseBasic(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable([[1.0], [2.0]], dtype=dtype)
var1 = variables.Variable([[3.0], [4.0]], dtype=dtype)
grads0 = ops.IndexedSlices(
constant_op.constant(
[0.1], shape=[1, 1], dtype=dtype),
constant_op.constant([0]),
constant_op.constant([2, 1]))
grads1 = ops.IndexedSlices(
constant_op.constant(
[0.01], shape=[1, 1], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([2, 1]))
ada_opt = adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1)
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([[1.0], [2.0]], self.evaluate(var0))
self.assertAllClose([[3.0], [4.0]], self.evaluate(var1))
# Run 3 step of sgd
for _ in range(3):
ada_update.run()
# Validate updated params
self.assertAllCloseAccordingToType(
np.array([[-1.6026098728179932], [2.0]]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([[3.0], [3.715679168701172]]), self.evaluate(var1))
@test_util.run_deprecated_v1
def testSparseRepeatedIndices(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
repeated_index_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
aggregated_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
grad_repeated_index = ops.IndexedSlices(
constant_op.constant(
[0.1, 0.1], shape=[2, 1], dtype=dtype),
constant_op.constant([1, 1]),
constant_op.constant([2, 1]))
grad_aggregated = ops.IndexedSlices(
constant_op.constant(
[0.2], shape=[1, 1], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([2, 1]))
repeated_update = adagrad.AdagradOptimizer(3.0).apply_gradients(
[(grad_repeated_index, repeated_index_update_var)])
aggregated_update = adagrad.AdagradOptimizer(3.0).apply_gradients(
[(grad_aggregated, aggregated_update_var)])
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(aggregated_update_var.eval(),
self.evaluate(repeated_index_update_var))
for _ in range(3):
repeated_update.run()
aggregated_update.run()
self.assertAllClose(aggregated_update_var.eval(),
self.evaluate(repeated_index_update_var))
@test_util.run_deprecated_v1
def testSparseRepeatedIndicesResourceVariable(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var_repeated = resource_variable_ops.ResourceVariable(
[1.0, 2.0], dtype=dtype)
loss_repeated = math_ops.reduce_sum(
embedding_ops.embedding_lookup(var_repeated, [0, 0]))
var_aggregated = resource_variable_ops.ResourceVariable(
[1.0, 2.0], dtype=dtype)
loss_aggregated = 2 * math_ops.reduce_sum(
embedding_ops.embedding_lookup(var_aggregated, [0]))
update_op_repeated = adagrad.AdagradOptimizer(
2.0).minimize(loss_repeated)
update_op_aggregated = adagrad.AdagradOptimizer(
2.0).minimize(loss_aggregated)
self.evaluate(variables.global_variables_initializer())
self.assertAllCloseAccordingToType(
self.evaluate(var_repeated), self.evaluate(var_aggregated))
for _ in range(3):
update_op_repeated.run()
update_op_aggregated.run()
self.assertAllCloseAccordingToType(
self.evaluate(var_repeated), self.evaluate(var_aggregated))
@test_util.run_deprecated_v1
def testSparseStability(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
shape = [1, 6]
var0 = variables.Variable(
[[
0.00872496, -0.106952, 0.110467, 0.226505, -0.0147257,
-0.0105945
]],
dtype=dtype)
grads0 = ops.IndexedSlices(
constant_op.constant(
[[
-5.91278e-05, 5.31673e-05, -2.5779e-06, 4.29153e-05,
-8.4877e-05, -9.48906e-05
]],
shape=shape,
dtype=dtype),
constant_op.constant([0]),
constant_op.constant(shape))
ada_opt = adagrad.AdagradOptimizer(1.0, initial_accumulator_value=0.1)
ada_update = ada_opt.apply_gradients(zip([grads0], [var0]))
self.assertEqual(["accumulator"], ada_opt.get_slot_names())
slot0 = ada_opt.get_slot(var0, "accumulator")
init = variables.global_variables_initializer()
for _ in range(100):
init.run()
ada_update.run()
self.assertAllCloseAccordingToType(
np.array([[0.1, 0.1, 0.1, 0.1, 0.1, 0.1]]), self.evaluate(slot0))
self.assertAllCloseAccordingToType(
np.array([[
0.00891194, -0.10712013, 0.11047515, 0.22636929, -0.0144573,
-0.01029443
]]), self.evaluate(var0))
@test_util.run_deprecated_v1
def testSharing(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
ada_opt = adagrad.AdagradOptimizer(3.0)
# Apply the optimizer twice. Both applications will use
# the same accums.
ada_update1 = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
ada_update2 = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.assertEqual(["accumulator"], ada_opt.get_slot_names())
slot0 = ada_opt.get_slot(var0, "accumulator")
self.assertEquals(slot0.get_shape(), var0.get_shape())
slot1 = ada_opt.get_slot(var1, "accumulator")
self.assertEquals(slot1.get_shape(), var1.get_shape())
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values.
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Mix the first and the second adagrad for 3 steps.
ada_update1.run()
ada_update2.run()
ada_update1.run()
# Validate updated params (the same as with only 1 Adagrad).
self.assertAllCloseAccordingToType(
np.array([-1.6026098728179932, -0.6026098728179932]),
self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([2.715679168701172, 3.715679168701172]),
self.evaluate(var1))
@test_util.run_v1_only("b/120545219")
def testDynamicShapeVariable_Ok(self):
with self.cached_session():
v = variable_scope.get_variable("v", initializer=constant_op.constant(1.),
validate_shape=False)
self.assertFalse(v.shape.is_fully_defined())
# Creating optimizer should cause no exception.
adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1)
@test_util.run_v1_only("b/120545219")
def testDynamicShapeVariableWithCallableInit(self):
var0 = variable_scope.get_variable("var0",
initializer=constant_op.constant(1.),
validate_shape=False)
self.assertFalse(var0.shape.is_fully_defined())
grads0 = constant_op.constant(0.1, dtype=dtypes.float32)
learning_rate = lambda: 3.0
ada_opt = adagrad.AdagradOptimizer(
learning_rate, initial_accumulator_value=0.1, use_locking=True)
if not context.executing_eagerly():
ada_update = ada_opt.apply_gradients(
zip([grads0], [var0]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
v0_val = self.evaluate([var0])
self.assertAllClose([1.0], v0_val)
# Run 3 steps of adagrad
for _ in range(3):
if not context.executing_eagerly():
self.evaluate(ada_update)
else:
ada_opt.apply_gradients(zip([grads0], [var0]))
# Validate updated params
v0_val = self.evaluate([var0])
self.assertAllCloseAccordingToType(
np.array([-1.6026098728179932]), v0_val)
if __name__ == "__main__":
test.main()
|
apache-2.0
|
beeftornado/sentry
|
tests/sentry/integrations/msteams/test_client.py
|
1
|
2323
|
from __future__ import absolute_import
import responses
from six.moves.urllib.parse import urlencode
from sentry.models import Integration
from sentry.integrations.msteams.client import MsTeamsClient
from sentry.testutils import TestCase
from sentry.utils.compat.mock import patch
class MsTeamsClientTest(TestCase):
def setUp(self):
self.expires_at = 1594768808
self.integration = Integration.objects.create(
provider="msteams",
name="my_team",
metadata={"access_token": "my_token", "expires_at": self.expires_at},
)
# token mock
access_json = {"expires_in": 86399, "access_token": "my_new_token"}
responses.add(
responses.POST,
u"https://login.microsoftonline.com/botframework.com/oauth2/v2.0/token",
json=access_json,
)
self.client = MsTeamsClient(self.integration)
@responses.activate
def test_token_refreshes(self):
with patch("time.time") as mock_time:
mock_time.return_value = self.expires_at
# accessing the property should refresh the token
self.client.access_token
body = responses.calls[0].request.body
assert body == urlencode(
{
"client_id": "msteams-client-id",
"client_secret": "msteams-client-secret",
"grant_type": "client_credentials",
"scope": "https://api.botframework.com/.default",
}
)
integration = Integration.objects.get(provider="msteams")
assert integration.metadata == {
"access_token": "my_new_token",
"expires_at": self.expires_at + 86399 - 60 * 5,
}
@responses.activate
def test_no_token_refresh(self):
with patch("time.time") as mock_time:
mock_time.return_value = self.expires_at - 100
# accessing the property should refresh the token
self.client.access_token
assert not responses.calls
integration = Integration.objects.get(provider="msteams")
assert integration.metadata == {
"access_token": "my_token",
"expires_at": self.expires_at,
}
|
bsd-3-clause
|
Opentrons/labware
|
api/tests/opentrons/hardware_control/test_calibration_functions.py
|
2
|
2931
|
import numpy as np
from opentrons import config
from opentrons.calibration_storage import file_operators as io
from opentrons.hardware_control import robot_calibration
from opentrons.util.helpers import utc_now
from opentrons.types import Mount
def test_migrate_affine_xy_to_attitude():
affine = [[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
[9.0, 10.0, 11.0, 12.0],
[13.0, 14.0, 15.0, 16.0]]
expected = [[1.0, 2.0, 3.0],
[5.0, 6.0, 7.0],
[0.0, 0.0, 1.0]]
result = robot_calibration.migrate_affine_xy_to_attitude(affine)
assert result == expected
def test_save_calibration(ot_config_tempdir):
pathway = config.get_opentrons_path(
'robot_calibration_dir') / 'deck_calibration.json'
pip_id = 'fakePip'
lw_hash = 'fakeHash'
e = ((1, 1, 3), (2, 2, 2), (1, 2, 1))
a = ((1.1, 3.1, 1.1), (2.1, 2.1, 2.2), (1.1, 2.1, 1.1))
transform = [[0.975, 0.05, 0.0], [-1.025, 1.05, 0.0], [0.0, 0.0, 1.0]]
expected = {
'attitude': transform,
'pipette_calibrated_with': pip_id,
'last_modified': None,
'tiprack': lw_hash
}
robot_calibration.save_attitude_matrix(e, a, pip_id, lw_hash)
data = io.read_cal_file(pathway)
data['last_modified'] = None
assert data == expected
def test_load_calibration(ot_config_tempdir):
pathway = config.get_opentrons_path(
'robot_calibration_dir') / 'deck_calibration.json'
data = {
'attitude': [[1, 0, 1], [0, 1, -.5], [0, 0, 1]],
'pipette_calibrated_with': 'fake',
'last_modified': utc_now(),
'tiprack': 'hash'
}
io.save_to_file(pathway, data)
obj = robot_calibration.load_attitude_matrix()
transform = [[1, 0, 1], [0, 1, -.5], [0, 0, 1]]
assert np.allclose(obj.attitude, transform)
def test_load_malformed_calibration(ot_config_tempdir):
pathway = config.get_opentrons_path(
'robot_calibration_dir') / 'deck_calibration.json'
data = {
'atsadasitude': [[1, 0, 1], [0, 1, -.5], [0, 0, 1]],
'last_modified': utc_now(),
'tiprack': 'hash',
'statu': [1, 2, 3],
}
io.save_to_file(pathway, data)
obj = robot_calibration.load_attitude_matrix()
assert np.allclose(obj.attitude, [[1, 0, 0], [0, 1, 0], [0, 0, 1]])
def test_load_pipette_offset(ot_config_tempdir):
pip_id = 'fakePip'
mount = Mount.LEFT
pip_dir = config.get_opentrons_path(
'pipette_calibration_dir') / 'left'
pip_dir.mkdir(parents=True, exist_ok=True)
pathway = pip_dir/'fakePip.json'
data = {
'offset': [1, 2, 3],
'tiprack': 'hash',
'uri': 'opentrons/opentrons_96_tiprack_10ul/1',
'last_modified': utc_now()
}
io.save_to_file(pathway, data)
obj = robot_calibration.load_pipette_offset(pip_id, mount)
offset = [1, 2, 3]
assert np.allclose(obj.offset, offset)
|
apache-2.0
|
beeftornado/sentry
|
src/sentry/migrations/0039_delete_incidentsuspectcommit.py
|
1
|
1478
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.27 on 2020-02-03 22:32
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
# This flag is used to mark that a migration shouldn't be automatically run in
# production. We set this to True for operations that we think are risky and want
# someone from ops to run manually and monitor.
# General advice is that if in doubt, mark your migration as `is_dangerous`.
# Some things you should always mark as dangerous:
# - Large data migrations. Typically we want these to be run manually by ops so that
# they can be monitored. Since data migrations will now hold a transaction open
# this is even more important.
# - Adding columns to highly active tables, even ones that are NULL.
is_dangerous = False
# This flag is used to decide whether to run this migration in a transaction or not.
# By default we prefer to run in a transaction, but for migrations where you want
# to `CREATE INDEX CONCURRENTLY` this needs to be set to False. Typically you'll
# want to create an index concurrently when adding one to an existing table.
atomic = True
dependencies = [("sentry", "0038_auto_20200213_1904")]
operations = [
migrations.SeparateDatabaseAndState(
state_operations=[migrations.DeleteModel(name="IncidentSuspectCommit")],
database_operations=[],
)
]
|
bsd-3-clause
|
firebitsbr/pwn_plug_sources
|
src/metagoofil/extractors/metadataPDF.py
|
9
|
1554
|
#!/usr/bin/env python
#
# metadataPDF.py - dump pdf metadata
#
# Copy of Yusuke's dumppdf to add dumpmeta
import sys, re
from pdfminer.psparser import PSKeyword, PSLiteral
from pdfminer.pdfparser import PDFDocument, PDFParser
from pdfminer.pdftypes import PDFStream, PDFObjRef, resolve1, stream_value
# dumpmeta
class metapdf:
def __init__(self,fname, password=''):
self.fname=fname
self.password=password
self.metadata=''
self.users=[]
self.software=[]
self.paths=[]
self.raw=""
self.company=[]
def getData(self):
try:
doc = PDFDocument()
fp = file(self.fname, 'rb')
parser = PDFParser(fp)
parser.set_document(doc)
doc.set_parser(parser)
doc.initialize(self.password)
metadata = resolve1(doc.catalog['Metadata'])
parser.close()
fp.close()
for xref in doc.xrefs:
info_ref=xref.trailer.get('Info')
if info_ref:
info=resolve1(info_ref)
self.metadata=info
self.raw = info
return "ok"
except:
return "error"
def getUsers(self):
if self.metadata.has_key('Author'):
self.users.append(self.metadata['Author'])
return self.users
def getCompany(self):
if self.metadata.has_key('Company'):
self.users.append(self.metadata['Company'])
return self.company
def getSoftware(self):
if self.metadata.has_key('Producer'):
self.software.append(self.metadata['Producer'])
if self.metadata.has_key('Creator'):
self.software.append(self.metadata['Creator'])
return self.software
def getPaths(self):
return self.paths
def getRaw(self):
return self.raw
|
gpl-3.0
|
Dandandan/wikiprogramming
|
jsrepl/build/extern/python/closured/lib/python2.7/email/mime/message.py
|
573
|
1286
|
# Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""Class representing message/* MIME documents."""
__all__ = ['MIMEMessage']
from email import message
from email.mime.nonmultipart import MIMENonMultipart
class MIMEMessage(MIMENonMultipart):
"""Class representing message/* MIME documents."""
def __init__(self, _msg, _subtype='rfc822'):
"""Create a message/* type MIME document.
_msg is a message object and must be an instance of Message, or a
derived class of Message, otherwise a TypeError is raised.
Optional _subtype defines the subtype of the contained message. The
default is "rfc822" (this is defined by the MIME standard, even though
the term "rfc822" is technically outdated by RFC 2822).
"""
MIMENonMultipart.__init__(self, 'message', _subtype)
if not isinstance(_msg, message.Message):
raise TypeError('Argument is not an instance of Message')
# It's convenient to use this base class method. We need to do it
# this way or we'll get an exception
message.Message.attach(self, _msg)
# And be sure our default type is set correctly
self.set_default_type('message/rfc822')
|
mit
|
huguesmayolle/famille
|
famille/tests/test_templates.py
|
1
|
2004
|
from django.test import TestCase
from famille import models
from famille.templatetags import helpers, users
__all__ = ["TemplateTagsTestCase", ]
class TemplateTagsTestCase(TestCase):
def test_get_class_name(self):
obj = models.Prestataire()
self.assertEqual(helpers.get_class_name(obj), "Prestataire")
obj = models.Famille()
self.assertEqual(helpers.get_class_name(obj), "Famille")
def test_get_range(self):
self.assertEqual(helpers.get_range(""), [])
self.assertEqual(helpers.get_range("2"), [0, 1])
def test_subtract(self):
self.assertEqual(helpers.substract("5", ""), 5)
self.assertEqual(helpers.substract("5", "2"), 3)
def test_get_multi_display(self):
self.assertEqual(users.get_multi_display(None, "language"), "--")
self.assertEqual(users.get_multi_display("0", "language"), "Anglais")
self.assertEqual(users.get_multi_display("0,2", "language"), "Anglais, Chinois")
self.assertEqual(users.get_multi_display("0,829162,2", "language"), "Anglais, Chinois")
def test_contains(self):
self.assertTrue(helpers.contains("toto", None))
self.assertFalse(helpers.contains(None, "toto"))
self.assertFalse(helpers.contains("zjioze", "t"))
self.assertTrue(helpers.contains("zjioze", "z"))
def test_get_badge_icon_garde(self):
obj = models.Prestataire()
self.assertEquals(users.get_badge_icon_garde(obj, "1"), "img/badges/no-1.png")
obj.type_garde = "1"
self.assertEquals(users.get_badge_icon_garde(obj, "1"), "img/badges/1.png")
obj.type_garde = "1,3,6"
self.assertEquals(users.get_badge_icon_garde(obj, "3"), "img/badges/3.png")
def test_get_languages_html(self):
self.assertEquals(users.get_languages_html(None), "")
self.assertIn(users.FLAG_FOLDER % "26", users.get_languages_html("26,27"))
self.assertIn(users.FLAG_FOLDER % "27", users.get_languages_html("26,27"))
|
apache-2.0
|
chrisndodge/edx-platform
|
lms/djangoapps/dashboard/management/commands/tests/test_git_add_course.py
|
17
|
8904
|
"""
Provide tests for git_add_course management command.
"""
import logging
import os
import shutil
import StringIO
import subprocess
import unittest
from uuid import uuid4
from nose.plugins.attrib import attr
from django.conf import settings
from django.core.management import call_command
from django.core.management.base import CommandError
from django.test.utils import override_settings
from opaque_keys.edx.locations import SlashSeparatedCourseKey
import dashboard.git_import as git_import
from dashboard.git_import import (
GitImportError,
GitImportErrorNoDir,
GitImportErrorUrlBad,
GitImportErrorCannotPull,
GitImportErrorBadRepo,
GitImportErrorRemoteBranchMissing,
)
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.mongo_connection import MONGO_PORT_NUM, MONGO_HOST
TEST_MONGODB_LOG = {
'host': MONGO_HOST,
'port': MONGO_PORT_NUM,
'user': '',
'password': '',
'db': 'test_xlog',
}
FEATURES_WITH_SSL_AUTH = settings.FEATURES.copy()
FEATURES_WITH_SSL_AUTH['AUTH_USE_CERTIFICATES'] = True
@attr(shard=3)
@override_settings(
MONGODB_LOG=TEST_MONGODB_LOG,
GIT_REPO_DIR=settings.TEST_ROOT / "course_repos_{}".format(uuid4().hex)
)
@unittest.skipUnless(settings.FEATURES.get('ENABLE_SYSADMIN_DASHBOARD'),
"ENABLE_SYSADMIN_DASHBOARD not set")
class TestGitAddCourse(SharedModuleStoreTestCase):
"""
Tests the git_add_course management command for proper functions.
"""
TEST_REPO = 'https://github.com/mitocw/edx4edx_lite.git'
TEST_COURSE = 'MITx/edx4edx/edx4edx'
TEST_BRANCH = 'testing_do_not_delete'
TEST_BRANCH_COURSE = SlashSeparatedCourseKey('MITx', 'edx4edx_branch', 'edx4edx')
ENABLED_CACHES = ['default', 'mongo_metadata_inheritance', 'loc_cache']
def setUp(self):
super(TestGitAddCourse, self).setUp()
self.git_repo_dir = settings.GIT_REPO_DIR
def assertCommandFailureRegexp(self, regex, *args):
"""
Convenience function for testing command failures
"""
with self.assertRaisesRegexp(CommandError, regex):
call_command('git_add_course', *args, stderr=StringIO.StringIO())
def test_command_args(self):
"""
Validate argument checking
"""
# No argument given.
self.assertCommandFailureRegexp('Error: too few arguments')
# Extra/Un-named arguments given.
self.assertCommandFailureRegexp(
'Error: unrecognized arguments: blah blah blah',
'blah', 'blah', 'blah', 'blah')
# Not a valid path.
self.assertCommandFailureRegexp(
'Path {0} doesn\'t exist, please create it,'.format(self.git_repo_dir),
'blah')
# Test successful import from command
if not os.path.isdir(self.git_repo_dir):
os.mkdir(self.git_repo_dir)
self.addCleanup(shutil.rmtree, self.git_repo_dir)
# Make a course dir that will be replaced with a symlink
# while we are at it.
if not os.path.isdir(self.git_repo_dir / 'edx4edx'):
os.mkdir(self.git_repo_dir / 'edx4edx')
call_command('git_add_course', self.TEST_REPO,
directory_path=self.git_repo_dir / 'edx4edx_lite')
# Test with all three args (branch)
call_command('git_add_course', self.TEST_REPO,
directory_path=self.git_repo_dir / 'edx4edx_lite',
repository_branch=self.TEST_BRANCH)
def test_add_repo(self):
"""
Various exit path tests for test_add_repo
"""
with self.assertRaises(GitImportErrorNoDir):
git_import.add_repo(self.TEST_REPO, None, None)
os.mkdir(self.git_repo_dir)
self.addCleanup(shutil.rmtree, self.git_repo_dir)
with self.assertRaises(GitImportErrorUrlBad):
git_import.add_repo('foo', None, None)
with self.assertRaises(GitImportErrorCannotPull):
git_import.add_repo('file:///foobar.git', None, None)
# Test git repo that exists, but is "broken"
bare_repo = os.path.abspath('{0}/{1}'.format(settings.TEST_ROOT, 'bare.git'))
os.mkdir(bare_repo)
self.addCleanup(shutil.rmtree, bare_repo)
subprocess.check_output(['git', '--bare', 'init', ], stderr=subprocess.STDOUT,
cwd=bare_repo)
with self.assertRaises(GitImportErrorBadRepo):
git_import.add_repo('file://{0}'.format(bare_repo), None, None)
def test_detached_repo(self):
"""
Test repo that is in detached head state.
"""
repo_dir = self.git_repo_dir
# Test successful import from command
try:
os.mkdir(repo_dir)
except OSError:
pass
self.addCleanup(shutil.rmtree, repo_dir)
git_import.add_repo(self.TEST_REPO, repo_dir / 'edx4edx_lite', None)
subprocess.check_output(['git', 'checkout', 'HEAD~2', ],
stderr=subprocess.STDOUT,
cwd=repo_dir / 'edx4edx_lite')
with self.assertRaises(GitImportErrorCannotPull):
git_import.add_repo(self.TEST_REPO, repo_dir / 'edx4edx_lite', None)
def test_branching(self):
"""
Exercise branching code of import
"""
repo_dir = self.git_repo_dir
# Test successful import from command
if not os.path.isdir(repo_dir):
os.mkdir(repo_dir)
self.addCleanup(shutil.rmtree, repo_dir)
# Checkout non existent branch
with self.assertRaises(GitImportErrorRemoteBranchMissing):
git_import.add_repo(self.TEST_REPO, repo_dir / 'edx4edx_lite', 'asdfasdfasdf')
# Checkout new branch
git_import.add_repo(self.TEST_REPO,
repo_dir / 'edx4edx_lite',
self.TEST_BRANCH)
def_ms = modulestore()
# Validate that it is different than master
self.assertIsNotNone(def_ms.get_course(self.TEST_BRANCH_COURSE))
# Attempt to check out the same branch again to validate branch choosing
# works
git_import.add_repo(self.TEST_REPO,
repo_dir / 'edx4edx_lite',
self.TEST_BRANCH)
# Delete to test branching back to master
def_ms.delete_course(self.TEST_BRANCH_COURSE, ModuleStoreEnum.UserID.test)
self.assertIsNone(def_ms.get_course(self.TEST_BRANCH_COURSE))
git_import.add_repo(self.TEST_REPO,
repo_dir / 'edx4edx_lite',
'master')
self.assertIsNone(def_ms.get_course(self.TEST_BRANCH_COURSE))
self.assertIsNotNone(def_ms.get_course(SlashSeparatedCourseKey.from_deprecated_string(self.TEST_COURSE)))
def test_branch_exceptions(self):
"""
This wil create conditions to exercise bad paths in the switch_branch function.
"""
# create bare repo that we can mess with and attempt an import
bare_repo = os.path.abspath('{0}/{1}'.format(settings.TEST_ROOT, 'bare.git'))
os.mkdir(bare_repo)
self.addCleanup(shutil.rmtree, bare_repo)
subprocess.check_output(['git', '--bare', 'init', ], stderr=subprocess.STDOUT,
cwd=bare_repo)
# Build repo dir
repo_dir = self.git_repo_dir
if not os.path.isdir(repo_dir):
os.mkdir(repo_dir)
self.addCleanup(shutil.rmtree, repo_dir)
rdir = '{0}/bare'.format(repo_dir)
with self.assertRaises(GitImportErrorBadRepo):
git_import.add_repo('file://{0}'.format(bare_repo), None, None)
# Get logger for checking strings in logs
output = StringIO.StringIO()
test_log_handler = logging.StreamHandler(output)
test_log_handler.setLevel(logging.DEBUG)
glog = git_import.log
glog.addHandler(test_log_handler)
# Move remote so fetch fails
shutil.move(bare_repo, '{0}/not_bare.git'.format(settings.TEST_ROOT))
try:
git_import.switch_branch('master', rdir)
except GitImportError:
self.assertIn('Unable to fetch remote', output.getvalue())
shutil.move('{0}/not_bare.git'.format(settings.TEST_ROOT), bare_repo)
output.truncate(0)
# Replace origin with a different remote
subprocess.check_output(
['git', 'remote', 'rename', 'origin', 'blah', ],
stderr=subprocess.STDOUT, cwd=rdir
)
with self.assertRaises(GitImportError):
git_import.switch_branch('master', rdir)
self.assertIn('Getting a list of remote branches failed', output.getvalue())
|
agpl-3.0
|
monkeysecurity/npyscreen
|
npyscreen/wgannotatetextbox.py
|
15
|
3116
|
from . import wgwidget
from .wgtextbox import Textfield
class AnnotateTextboxBase(wgwidget.Widget):
"""A base class intented for customization. Note in particular the annotationColor and annotationNoColor methods
which you should override."""
ANNOTATE_WIDTH = 5
def __init__(self, screen, value = False, annotation_color='CONTROL', **keywords):
self.value = value
self.annotation_color = annotation_color
super(AnnotateTextboxBase, self).__init__(screen, **keywords)
self._init_text_area(screen)
if hasattr(self, 'display_value'):
self.text_area.display_value = self.display_value
self.show_bold = False
self.highlight = False
self.important = False
self.hide = False
def _init_text_area(self, screen):
self.text_area = Textfield(screen, rely=self.rely, relx=self.relx+self.ANNOTATE_WIDTH,
width=self.width-self.ANNOTATE_WIDTH, value=self.name)
def _display_annotation_at(self):
return (self.rely, self.relx)
def getAnnotationAndColor(self):
return ('xxx', 'CONTROL')
def annotationColor(self):
displayy, displayx = self._display_annotation_at()
_annotation, _color = self.getAnnotationAndColor()
self.parent.curses_pad.addnstr(displayy, displayx, _annotation, self.ANNOTATE_WIDTH, self.parent.theme_manager.findPair(self, _color))
def annotationNoColor(self):
displayy, displayx = self._display_annotation_at()
_annotation, _color = self.getAnnotationAndColor()
self.parent.curses_pad.addnstr(displayy, displayx, _annotation, self.ANNOTATE_WIDTH)
def update(self, clear=True):
if clear:
self.clear()
if self.hidden:
self.clear()
return False
if self.hide:
return True
self.text_area.value = self.value
if self.do_colors():
self.annotationColor()
else:
self.annotationNoColor()
if self.editing:
self.text_area.highlight = True
else:
self.text_area.highlight = False
if self.show_bold:
self.text_area.show_bold = True
else:
self.text_area.show_bold = False
if self.important:
self.text_area.important = True
else:
self.text_area.important = False
if self.highlight:
self.text_area.highlight = True
else:
self.text_area.highlight = False
self.text_area.update(clear=clear)
def calculate_area_needed(self):
return 1,0
class AnnotateTextboxBaseRight(AnnotateTextboxBase):
def _init_text_area(self, screen):
self.text_area = Textfield(screen, rely=self.rely, relx=self.relx,
width=self.width-self.ANNOTATE_WIDTH, value=self.name)
def _display_annotation_at(self):
return (self.rely, self.relx+self.width-self.ANNOTATE_WIDTH)
|
bsd-2-clause
|
a-doumoulakis/tensorflow
|
tensorflow/python/debug/cli/debugger_cli_common.py
|
68
|
38908
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Building Blocks of TensorFlow Debugger Command-Line Interface."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import os
import re
import sre_constants
import traceback
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.platform import gfile
HELP_INDENT = " "
EXPLICIT_USER_EXIT = "explicit_user_exit"
REGEX_MATCH_LINES_KEY = "regex_match_lines"
INIT_SCROLL_POS_KEY = "init_scroll_pos"
MAIN_MENU_KEY = "mm:"
class CommandLineExit(Exception):
def __init__(self, exit_token=None):
Exception.__init__(self)
self._exit_token = exit_token
@property
def exit_token(self):
return self._exit_token
class RichLine(object):
"""Rich single-line text.
Attributes:
text: A plain string, the raw text represented by this object. Should not
contain newlines.
font_attr_segs: A list of (start, end, font attribute) triples, representing
richness information applied to substrings of text.
"""
def __init__(self, text="", font_attr=None):
"""Construct a RichLine with no rich attributes or a single attribute.
Args:
text: Raw text string
font_attr: If specified, a single font attribute to be applied to the
entire text. Extending this object via concatenation allows creation
of text with varying attributes.
"""
# TODO(ebreck) Make .text and .font_attr protected members when we no
# longer need public access.
self.text = text
if font_attr:
self.font_attr_segs = [(0, len(text), font_attr)]
else:
self.font_attr_segs = []
def __add__(self, other):
"""Concatenate two chunks of maybe rich text to make a longer rich line.
Does not modify self.
Args:
other: Another piece of text to concatenate with this one.
If it is a plain str, it will be appended to this string with no
attributes. If it is a RichLine, it will be appended to this string
with its attributes preserved.
Returns:
A new RichLine comprising both chunks of text, with appropriate
attributes applied to the corresponding substrings.
"""
ret = RichLine()
if isinstance(other, six.string_types):
ret.text = self.text + other
ret.font_attr_segs = self.font_attr_segs[:]
return ret
elif isinstance(other, RichLine):
ret.text = self.text + other.text
ret.font_attr_segs = self.font_attr_segs[:]
old_len = len(self.text)
for start, end, font_attr in other.font_attr_segs:
ret.font_attr_segs.append((old_len + start, old_len + end, font_attr))
return ret
else:
raise TypeError("%r cannot be concatenated with a RichLine" % other)
def __len__(self):
return len(self.text)
def rich_text_lines_from_rich_line_list(rich_text_list, annotations=None):
"""Convert a list of RichLine objects or strings to a RichTextLines object.
Args:
rich_text_list: a list of RichLine objects or strings
annotations: annotatoins for the resultant RichTextLines object.
Returns:
A corresponding RichTextLines object.
"""
lines = []
font_attr_segs = {}
for i, rl in enumerate(rich_text_list):
if isinstance(rl, RichLine):
lines.append(rl.text)
if rl.font_attr_segs:
font_attr_segs[i] = rl.font_attr_segs
else:
lines.append(rl)
return RichTextLines(lines, font_attr_segs, annotations=annotations)
class RichTextLines(object):
"""Rich multi-line text.
Line-by-line text output, with font attributes (e.g., color) and annotations
(e.g., indices in a multi-dimensional tensor). Used as the text output of CLI
commands. Can be rendered on terminal environments such as curses.
This is not to be confused with Rich Text Format (RTF). This class is for text
lines only.
"""
def __init__(self, lines, font_attr_segs=None, annotations=None):
"""Constructor of RichTextLines.
Args:
lines: A list of str or a single str, representing text output to
screen. The latter case is for convenience when the text output is
single-line.
font_attr_segs: A map from 0-based row index to a list of 3-tuples.
It lists segments in each row that have special font attributes, such
as colors, that are not the default attribute. For example:
{1: [(0, 3, "red"), (4, 7, "green")], 2: [(10, 20, "yellow")]}
In each tuple, the 1st element is the start index of the segment. The
2nd element is the end index, in an "open interval" fashion. The 3rd
element is an object or a list of objects that represents the font
attribute. Colors are represented as strings as in the examples above.
annotations: A map from 0-based row index to any object for annotating
the row. A typical use example is annotating rows of the output as
indices in a multi-dimensional tensor. For example, consider the
following text representation of a 3x2x2 tensor:
[[[0, 0], [0, 0]],
[[0, 0], [0, 0]],
[[0, 0], [0, 0]]]
The annotation can indicate the indices of the first element shown in
each row, i.e.,
{0: [0, 0, 0], 1: [1, 0, 0], 2: [2, 0, 0]}
This information can make display of tensors on screen clearer and can
help the user navigate (scroll) to the desired location in a large
tensor.
Raises:
ValueError: If lines is of invalid type.
"""
if isinstance(lines, list):
self._lines = lines
elif isinstance(lines, six.string_types):
self._lines = [lines]
else:
raise ValueError("Unexpected type in lines: %s" % type(lines))
self._font_attr_segs = font_attr_segs
if not self._font_attr_segs:
self._font_attr_segs = {}
# TODO(cais): Refactor to collections.defaultdict(list) to simplify code.
self._annotations = annotations
if not self._annotations:
self._annotations = {}
# TODO(cais): Refactor to collections.defaultdict(list) to simplify code.
@property
def lines(self):
return self._lines
@property
def font_attr_segs(self):
return self._font_attr_segs
@property
def annotations(self):
return self._annotations
def num_lines(self):
return len(self._lines)
def slice(self, begin, end):
"""Slice a RichTextLines object.
The object itself is not changed. A sliced instance is returned.
Args:
begin: (int) Beginning line index (inclusive). Must be >= 0.
end: (int) Ending line index (exclusive). Must be >= 0.
Returns:
(RichTextLines) Sliced output instance of RichTextLines.
Raises:
ValueError: If begin or end is negative.
"""
if begin < 0 or end < 0:
raise ValueError("Encountered negative index.")
# Copy lines.
lines = self.lines[begin:end]
# Slice font attribute segments.
font_attr_segs = {}
for key in self.font_attr_segs:
if key >= begin and key < end:
font_attr_segs[key - begin] = self.font_attr_segs[key]
# Slice annotations.
annotations = {}
for key in self.annotations:
if not isinstance(key, int):
# Annotations can contain keys that are not line numbers.
annotations[key] = self.annotations[key]
elif key >= begin and key < end:
annotations[key - begin] = self.annotations[key]
return RichTextLines(
lines, font_attr_segs=font_attr_segs, annotations=annotations)
def extend(self, other):
"""Extend this instance of RichTextLines with another instance.
The extension takes effect on the text lines, the font attribute segments,
as well as the annotations. The line indices in the font attribute
segments and the annotations are adjusted to account for the existing
lines. If there are duplicate, non-line-index fields in the annotations,
the value from the input argument "other" will override that in this
instance.
Args:
other: (RichTextLines) The other RichTextLines instance to be appended at
the end of this instance.
"""
orig_num_lines = self.num_lines() # Record original number of lines.
# Merge the lines.
self._lines.extend(other.lines)
# Merge the font_attr_segs.
for line_index in other.font_attr_segs:
self._font_attr_segs[orig_num_lines + line_index] = (
other.font_attr_segs[line_index])
# Merge the annotations.
for key in other.annotations:
if isinstance(key, int):
self._annotations[orig_num_lines + key] = (other.annotations[key])
else:
self._annotations[key] = other.annotations[key]
def _extend_before(self, other):
"""Add another RichTextLines object to the front.
Args:
other: (RichTextLines) The other object to add to the front to this
object.
"""
other_num_lines = other.num_lines() # Record original number of lines.
# Merge the lines.
self._lines = other.lines + self._lines
# Merge the font_attr_segs.
new_font_attr_segs = {}
for line_index in self.font_attr_segs:
new_font_attr_segs[other_num_lines + line_index] = (
self.font_attr_segs[line_index])
new_font_attr_segs.update(other.font_attr_segs)
self._font_attr_segs = new_font_attr_segs
# Merge the annotations.
new_annotations = {}
for key in self._annotations:
if isinstance(key, int):
new_annotations[other_num_lines + key] = (self.annotations[key])
else:
new_annotations[key] = other.annotations[key]
new_annotations.update(other.annotations)
self._annotations = new_annotations
def append(self, line, font_attr_segs=None):
"""Append a single line of text.
Args:
line: (str) The text to be added to the end.
font_attr_segs: (list of tuples) Font attribute segments of the appended
line.
"""
self._lines.append(line)
if font_attr_segs:
self._font_attr_segs[len(self._lines) - 1] = font_attr_segs
def append_rich_line(self, rich_line):
self.append(rich_line.text, rich_line.font_attr_segs)
def prepend(self, line, font_attr_segs=None):
"""Prepend (i.e., add to the front) a single line of text.
Args:
line: (str) The text to be added to the front.
font_attr_segs: (list of tuples) Font attribute segments of the appended
line.
"""
other = RichTextLines(line)
if font_attr_segs:
other.font_attr_segs[0] = font_attr_segs
self._extend_before(other)
def write_to_file(self, file_path):
"""Write the object itself to file, in a plain format.
The font_attr_segs and annotations are ignored.
Args:
file_path: (str) path of the file to write to.
"""
with gfile.Open(file_path, "w") as f:
for line in self._lines:
f.write(line + "\n")
# TODO(cais): Add a method to allow appending to a line in RichTextLines with
# both text and font_attr_segs.
def regex_find(orig_screen_output, regex, font_attr):
"""Perform regex match in rich text lines.
Produces a new RichTextLines object with font_attr_segs containing highlighted
regex matches.
Example use cases include:
1) search for specific items in a large list of items, and
2) search for specific numerical values in a large tensor.
Args:
orig_screen_output: The original RichTextLines, in which the regex find
is to be performed.
regex: The regex used for matching.
font_attr: Font attribute used for highlighting the found result.
Returns:
A modified copy of orig_screen_output.
Raises:
ValueError: If input str regex is not a valid regular expression.
"""
new_screen_output = RichTextLines(
orig_screen_output.lines,
font_attr_segs=copy.deepcopy(orig_screen_output.font_attr_segs),
annotations=orig_screen_output.annotations)
try:
re_prog = re.compile(regex)
except sre_constants.error:
raise ValueError("Invalid regular expression: \"%s\"" % regex)
regex_match_lines = []
for i in xrange(len(new_screen_output.lines)):
line = new_screen_output.lines[i]
find_it = re_prog.finditer(line)
match_segs = []
for match in find_it:
match_segs.append((match.start(), match.end(), font_attr))
if match_segs:
if i not in new_screen_output.font_attr_segs:
new_screen_output.font_attr_segs[i] = match_segs
else:
new_screen_output.font_attr_segs[i].extend(match_segs)
new_screen_output.font_attr_segs[i] = sorted(
new_screen_output.font_attr_segs[i], key=lambda x: x[0])
regex_match_lines.append(i)
new_screen_output.annotations[REGEX_MATCH_LINES_KEY] = regex_match_lines
return new_screen_output
def wrap_rich_text_lines(inp, cols):
"""Wrap RichTextLines according to maximum number of columns.
Produces a new RichTextLines object with the text lines, font_attr_segs and
annotations properly wrapped. This ought to be used sparingly, as in most
cases, command handlers producing RichTextLines outputs should know the
screen/panel width via the screen_info kwarg and should produce properly
length-limited lines in the output accordingly.
Args:
inp: Input RichTextLines object.
cols: Number of columns, as an int.
Returns:
1) A new instance of RichTextLines, with line lengths limited to cols.
2) A list of new (wrapped) line index. For example, if the original input
consists of three lines and only the second line is wrapped, and it's
wrapped into two lines, this return value will be: [0, 1, 3].
Raises:
ValueError: If inputs have invalid types.
"""
new_line_indices = []
if not isinstance(inp, RichTextLines):
raise ValueError("Invalid type of input screen_output")
if not isinstance(cols, int):
raise ValueError("Invalid type of input cols")
out = RichTextLines([])
row_counter = 0 # Counter for new row index
for i in xrange(len(inp.lines)):
new_line_indices.append(out.num_lines())
line = inp.lines[i]
if i in inp.annotations:
out.annotations[row_counter] = inp.annotations[i]
if len(line) <= cols:
# No wrapping.
out.lines.append(line)
if i in inp.font_attr_segs:
out.font_attr_segs[row_counter] = inp.font_attr_segs[i]
row_counter += 1
else:
# Wrap.
wlines = [] # Wrapped lines.
osegs = []
if i in inp.font_attr_segs:
osegs = inp.font_attr_segs[i]
idx = 0
while idx < len(line):
if idx + cols > len(line):
rlim = len(line)
else:
rlim = idx + cols
wlines.append(line[idx:rlim])
for seg in osegs:
if (seg[0] < rlim) and (seg[1] >= idx):
# Calculate left bound within wrapped line.
if seg[0] >= idx:
lb = seg[0] - idx
else:
lb = 0
# Calculate right bound within wrapped line.
if seg[1] < rlim:
rb = seg[1] - idx
else:
rb = rlim - idx
if rb > lb: # Omit zero-length segments.
wseg = (lb, rb, seg[2])
if row_counter not in out.font_attr_segs:
out.font_attr_segs[row_counter] = [wseg]
else:
out.font_attr_segs[row_counter].append(wseg)
idx += cols
row_counter += 1
out.lines.extend(wlines)
# Copy over keys of annotation that are not row indices.
for key in inp.annotations:
if not isinstance(key, int):
out.annotations[key] = inp.annotations[key]
return out, new_line_indices
class CommandHandlerRegistry(object):
"""Registry of command handlers for CLI.
Handler methods (callables) for user commands can be registered with this
class, which then is able to dispatch commands to the correct handlers and
retrieve the RichTextLines output.
For example, suppose you have the following handler defined:
def echo(argv, screen_info=None):
return RichTextLines(["arguments = %s" % " ".join(argv),
"screen_info = " + repr(screen_info)])
you can register the handler with the command prefix "echo" and alias "e":
registry = CommandHandlerRegistry()
registry.register_command_handler("echo", echo,
"Echo arguments, along with screen info", prefix_aliases=["e"])
then to invoke this command handler with some arguments and screen_info, do:
registry.dispatch_command("echo", ["foo", "bar"], screen_info={"cols": 80})
or with the prefix alias:
registry.dispatch_command("e", ["foo", "bar"], screen_info={"cols": 80})
The call will return a RichTextLines object which can be rendered by a CLI.
"""
HELP_COMMAND = "help"
HELP_COMMAND_ALIASES = ["h"]
def __init__(self):
# A dictionary from command prefix to handler.
self._handlers = {}
# A dictionary from prefix alias to prefix.
self._alias_to_prefix = {}
# A dictionary from prefix to aliases.
self._prefix_to_aliases = {}
# A dictionary from command prefix to help string.
self._prefix_to_help = {}
# Introductory text to help information.
self._help_intro = None
# Register a default handler for the command "help".
self.register_command_handler(
self.HELP_COMMAND,
self._help_handler,
"Print this help message.",
prefix_aliases=self.HELP_COMMAND_ALIASES)
def register_command_handler(self,
prefix,
handler,
help_info,
prefix_aliases=None):
"""Register a callable as a command handler.
Args:
prefix: Command prefix, i.e., the first word in a command, e.g.,
"print" as in "print tensor_1".
handler: A callable of the following signature:
foo_handler(argv, screen_info=None),
where argv is the argument vector (excluding the command prefix) and
screen_info is a dictionary containing information about the screen,
such as number of columns, e.g., {"cols": 100}.
The callable should return:
1) a RichTextLines object representing the screen output.
The callable can also raise an exception of the type CommandLineExit,
which if caught by the command-line interface, will lead to its exit.
The exception can optionally carry an exit token of arbitrary type.
help_info: A help string.
prefix_aliases: Aliases for the command prefix, as a list of str. E.g.,
shorthands for the command prefix: ["p", "pr"]
Raises:
ValueError: If
1) the prefix is empty, or
2) handler is not callable, or
3) a handler is already registered for the prefix, or
4) elements in prefix_aliases clash with existing aliases.
5) help_info is not a str.
"""
if not prefix:
raise ValueError("Empty command prefix")
if prefix in self._handlers:
raise ValueError(
"A handler is already registered for command prefix \"%s\"" % prefix)
# Make sure handler is callable.
if not callable(handler):
raise ValueError("handler is not callable")
# Make sure that help info is a string.
if not isinstance(help_info, six.string_types):
raise ValueError("help_info is not a str")
# Process prefix aliases.
if prefix_aliases:
for alias in prefix_aliases:
if self._resolve_prefix(alias):
raise ValueError(
"The prefix alias \"%s\" clashes with existing prefixes or "
"aliases." % alias)
self._alias_to_prefix[alias] = prefix
self._prefix_to_aliases[prefix] = prefix_aliases
# Store handler.
self._handlers[prefix] = handler
# Store help info.
self._prefix_to_help[prefix] = help_info
def dispatch_command(self, prefix, argv, screen_info=None):
"""Handles a command by dispatching it to a registered command handler.
Args:
prefix: Command prefix, as a str, e.g., "print".
argv: Command argument vector, excluding the command prefix, represented
as a list of str, e.g.,
["tensor_1"]
screen_info: A dictionary containing screen info, e.g., {"cols": 100}.
Returns:
An instance of RichTextLines or None. If any exception is caught during
the invocation of the command handler, the RichTextLines will wrap the
error type and message.
Raises:
ValueError: If
1) prefix is empty, or
2) no command handler is registered for the command prefix, or
3) the handler is found for the prefix, but it fails to return a
RichTextLines or raise any exception.
CommandLineExit:
If the command handler raises this type of exception, this method will
simply pass it along.
"""
if not prefix:
raise ValueError("Prefix is empty")
resolved_prefix = self._resolve_prefix(prefix)
if not resolved_prefix:
raise ValueError("No handler is registered for command prefix \"%s\"" %
prefix)
handler = self._handlers[resolved_prefix]
try:
output = handler(argv, screen_info=screen_info)
except CommandLineExit as e:
raise e
except SystemExit as e:
# Special case for syntax errors caught by argparse.
lines = ["Syntax error for command: %s" % prefix,
"For help, do \"help %s\"" % prefix]
output = RichTextLines(lines)
except BaseException as e: # pylint: disable=broad-except
lines = ["Error occurred during handling of command: %s %s:" %
(resolved_prefix, " ".join(argv)), "%s: %s" % (type(e), str(e))]
# Include traceback of the exception.
lines.append("")
lines.extend(traceback.format_exc().split("\n"))
output = RichTextLines(lines)
if not isinstance(output, RichTextLines) and output is not None:
raise ValueError(
"Return value from command handler %s is not None or a RichTextLines "
"instance" % str(handler))
return output
def is_registered(self, prefix):
"""Test if a command prefix or its alias is has a registered handler.
Args:
prefix: A prefix or its alias, as a str.
Returns:
True iff a handler is registered for prefix.
"""
return self._resolve_prefix(prefix) is not None
def get_help(self, cmd_prefix=None):
"""Compile help information into a RichTextLines object.
Args:
cmd_prefix: Optional command prefix. As the prefix itself or one of its
aliases.
Returns:
A RichTextLines object containing the help information. If cmd_prefix
is None, the return value will be the full command-line help. Otherwise,
it will be the help information for the specified command.
"""
if not cmd_prefix:
# Print full help information, in sorted order of the command prefixes.
help_info = RichTextLines([])
if self._help_intro:
# If help intro is available, show it at the beginning.
help_info.extend(self._help_intro)
sorted_prefixes = sorted(self._handlers)
for cmd_prefix in sorted_prefixes:
lines = self._get_help_for_command_prefix(cmd_prefix)
lines.append("")
lines.append("")
help_info.extend(RichTextLines(lines))
return help_info
else:
return RichTextLines(self._get_help_for_command_prefix(cmd_prefix))
def set_help_intro(self, help_intro):
"""Set an introductory message to help output.
Args:
help_intro: (RichTextLines) Rich text lines appended to the
beginning of the output of the command "help", as introductory
information.
"""
self._help_intro = help_intro
def _help_handler(self, args, screen_info=None):
"""Command handler for "help".
"help" is a common command that merits built-in support from this class.
Args:
args: Command line arguments to "help" (not including "help" itself).
screen_info: (dict) Information regarding the screen, e.g., the screen
width in characters: {"cols": 80}
Returns:
(RichTextLines) Screen text output.
"""
_ = screen_info # Unused currently.
if not args:
return self.get_help()
elif len(args) == 1:
return self.get_help(args[0])
else:
return RichTextLines(["ERROR: help takes only 0 or 1 input argument."])
def _resolve_prefix(self, token):
"""Resolve command prefix from the prefix itself or its alias.
Args:
token: a str to be resolved.
Returns:
If resolvable, the resolved command prefix.
If not resolvable, None.
"""
if token in self._handlers:
return token
elif token in self._alias_to_prefix:
return self._alias_to_prefix[token]
else:
return None
def _get_help_for_command_prefix(self, cmd_prefix):
"""Compile the help information for a given command prefix.
Args:
cmd_prefix: Command prefix, as the prefix itself or one of its
aliases.
Returns:
A list of str as the help information fo cmd_prefix. If the cmd_prefix
does not exist, the returned list of str will indicate that.
"""
lines = []
resolved_prefix = self._resolve_prefix(cmd_prefix)
if not resolved_prefix:
lines.append("Invalid command prefix: \"%s\"" % cmd_prefix)
return lines
lines.append(resolved_prefix)
if resolved_prefix in self._prefix_to_aliases:
lines.append(HELP_INDENT + "Aliases: " + ", ".join(
self._prefix_to_aliases[resolved_prefix]))
lines.append("")
help_lines = self._prefix_to_help[resolved_prefix].split("\n")
for line in help_lines:
lines.append(HELP_INDENT + line)
return lines
class TabCompletionRegistry(object):
"""Registry for tab completion responses."""
def __init__(self):
self._comp_dict = {}
# TODO(cais): Rename method names with "comp" to "*completion*" to avoid
# confusion.
def register_tab_comp_context(self, context_words, comp_items):
"""Register a tab-completion context.
Register that, for each word in context_words, the potential tab-completions
are the words in comp_items.
A context word is a pre-existing, completed word in the command line that
determines how tab-completion works for another, incomplete word in the same
command line.
Completion items consist of potential candidates for the incomplete word.
To give a general example, a context word can be "drink", and the completion
items can be ["coffee", "tea", "water"]
Note: A context word can be empty, in which case the context is for the
top-level commands.
Args:
context_words: A list of context words belonging to the context being
registered. It is a list of str, instead of a single string, to support
synonym words triggering the same tab-completion context, e.g.,
both "drink" and the short-hand "dr" can trigger the same context.
comp_items: A list of completion items, as a list of str.
Raises:
TypeError: if the input arguments are not all of the correct types.
"""
if not isinstance(context_words, list):
raise TypeError("Incorrect type in context_list: Expected list, got %s" %
type(context_words))
if not isinstance(comp_items, list):
raise TypeError("Incorrect type in comp_items: Expected list, got %s" %
type(comp_items))
# Sort the completion items on registration, so that later during
# get_completions calls, no sorting will be necessary.
sorted_comp_items = sorted(comp_items)
for context_word in context_words:
self._comp_dict[context_word] = sorted_comp_items
def deregister_context(self, context_words):
"""Deregister a list of context words.
Args:
context_words: A list of context words to deregister, as a list of str.
Raises:
KeyError: if there are word(s) in context_words that do not correspond
to any registered contexts.
"""
for context_word in context_words:
if context_word not in self._comp_dict:
raise KeyError("Cannot deregister unregistered context word \"%s\"" %
context_word)
for context_word in context_words:
del self._comp_dict[context_word]
def extend_comp_items(self, context_word, new_comp_items):
"""Add a list of completion items to a completion context.
Args:
context_word: A single completion word as a string. The extension will
also apply to all other context words of the same context.
new_comp_items: (list of str) New completion items to add.
Raises:
KeyError: if the context word has not been registered.
"""
if context_word not in self._comp_dict:
raise KeyError("Context word \"%s\" has not been registered" %
context_word)
self._comp_dict[context_word].extend(new_comp_items)
self._comp_dict[context_word] = sorted(self._comp_dict[context_word])
def remove_comp_items(self, context_word, comp_items):
"""Remove a list of completion items from a completion context.
Args:
context_word: A single completion word as a string. The removal will
also apply to all other context words of the same context.
comp_items: Completion items to remove.
Raises:
KeyError: if the context word has not been registered.
"""
if context_word not in self._comp_dict:
raise KeyError("Context word \"%s\" has not been registered" %
context_word)
for item in comp_items:
self._comp_dict[context_word].remove(item)
def get_completions(self, context_word, prefix):
"""Get the tab completions given a context word and a prefix.
Args:
context_word: The context word.
prefix: The prefix of the incomplete word.
Returns:
(1) None if no registered context matches the context_word.
A list of str for the matching completion items. Can be an empty list
of a matching context exists, but no completion item matches the
prefix.
(2) Common prefix of all the words in the first return value. If the
first return value is None, this return value will be None, too. If
the first return value is not None, i.e., a list, this return value
will be a str, which can be an empty str if there is no common
prefix among the items of the list.
"""
if context_word not in self._comp_dict:
return None, None
comp_items = self._comp_dict[context_word]
comp_items = sorted(
[item for item in comp_items if item.startswith(prefix)])
return comp_items, self._common_prefix(comp_items)
def _common_prefix(self, m):
"""Given a list of str, returns the longest common prefix.
Args:
m: (list of str) A list of strings.
Returns:
(str) The longest common prefix.
"""
if not m:
return ""
s1 = min(m)
s2 = max(m)
for i, c in enumerate(s1):
if c != s2[i]:
return s1[:i]
return s1
class CommandHistory(object):
"""Keeps command history and supports lookup."""
_HISTORY_FILE_NAME = ".tfdbg_history"
def __init__(self, limit=100, history_file_path=None):
"""CommandHistory constructor.
Args:
limit: Maximum number of the most recent commands that this instance
keeps track of, as an int.
history_file_path: (str) Manually specified path to history file. Used in
testing.
"""
self._commands = []
self._limit = limit
self._history_file_path = (
history_file_path or self._get_default_history_file_path())
self._load_history_from_file()
def _load_history_from_file(self):
if os.path.isfile(self._history_file_path):
try:
with open(self._history_file_path, "rt") as history_file:
commands = history_file.readlines()
self._commands = [command.strip() for command in commands
if command.strip()]
# Limit the size of the history file.
if len(self._commands) > self._limit:
self._commands = self._commands[-self._limit:]
with open(self._history_file_path, "wt") as history_file:
for command in self._commands:
history_file.write(command + "\n")
except IOError:
print("WARNING: writing history file failed.")
def _add_command_to_history_file(self, command):
try:
with open(self._history_file_path, "at") as history_file:
history_file.write(command + "\n")
except IOError:
pass
@classmethod
def _get_default_history_file_path(cls):
return os.path.join(os.path.expanduser("~"), cls._HISTORY_FILE_NAME)
def add_command(self, command):
"""Add a command to the command history.
Args:
command: The history command, as a str.
Raises:
TypeError: if command is not a str.
"""
if self._commands and command == self._commands[-1]:
# Ignore repeating commands in a row.
return
if not isinstance(command, six.string_types):
raise TypeError("Attempt to enter non-str entry to command history")
self._commands.append(command)
if len(self._commands) > self._limit:
self._commands = self._commands[-self._limit:]
self._add_command_to_history_file(command)
def most_recent_n(self, n):
"""Look up the n most recent commands.
Args:
n: Number of most recent commands to look up.
Returns:
A list of n most recent commands, or all available most recent commands,
if n exceeds size of the command history, in chronological order.
"""
return self._commands[-n:]
def lookup_prefix(self, prefix, n):
"""Look up the n most recent commands that starts with prefix.
Args:
prefix: The prefix to lookup.
n: Number of most recent commands to look up.
Returns:
A list of n most recent commands that have the specified prefix, or all
available most recent commands that have the prefix, if n exceeds the
number of history commands with the prefix.
"""
commands = [cmd for cmd in self._commands if cmd.startswith(prefix)]
return commands[-n:]
# TODO(cais): Lookup by regex.
class MenuItem(object):
"""A class for an item in a text-based menu."""
def __init__(self, caption, content, enabled=True):
"""Menu constructor.
TODO(cais): Nested menu is currently not supported. Support it.
Args:
caption: (str) caption of the menu item.
content: Content of the menu item. For a menu item that triggers
a command, for example, content is the command string.
enabled: (bool) whether this menu item is enabled.
"""
self._caption = caption
self._content = content
self._enabled = enabled
@property
def caption(self):
return self._caption
@property
def type(self):
return self._node_type
@property
def content(self):
return self._content
def is_enabled(self):
return self._enabled
def disable(self):
self._enabled = False
def enable(self):
self._enabled = True
class Menu(object):
"""A class for text-based menu."""
def __init__(self, name=None):
"""Menu constructor.
Args:
name: (str or None) name of this menu.
"""
self._name = name
self._items = []
def append(self, item):
"""Append an item to the Menu.
Args:
item: (MenuItem) the item to be appended.
"""
self._items.append(item)
def insert(self, index, item):
self._items.insert(index, item)
def num_items(self):
return len(self._items)
def captions(self):
return [item.caption for item in self._items]
def caption_to_item(self, caption):
"""Get a MenuItem from the caption.
Args:
caption: (str) The caption to look up.
Returns:
(MenuItem) The first-match menu item with the caption, if any.
Raises:
LookupError: If a menu item with the caption does not exist.
"""
captions = self.captions()
if caption not in captions:
raise LookupError("There is no menu item with the caption \"%s\"" %
caption)
return self._items[captions.index(caption)]
def format_as_single_line(self,
prefix=None,
divider=" | ",
enabled_item_attrs=None,
disabled_item_attrs=None):
"""Format the menu as a single-line RichTextLines object.
Args:
prefix: (str) String added to the beginning of the line.
divider: (str) The dividing string between the menu items.
enabled_item_attrs: (list or str) Attributes applied to each enabled
menu item, e.g., ["bold", "underline"].
disabled_item_attrs: (list or str) Attributes applied to each
disabled menu item, e.g., ["red"].
Returns:
(RichTextLines) A single-line output representing the menu, with
font_attr_segs marking the individual menu items.
"""
if (enabled_item_attrs is not None and
not isinstance(enabled_item_attrs, list)):
enabled_item_attrs = [enabled_item_attrs]
if (disabled_item_attrs is not None and
not isinstance(disabled_item_attrs, list)):
disabled_item_attrs = [disabled_item_attrs]
menu_line = prefix if prefix is not None else ""
attr_segs = []
for item in self._items:
menu_line += item.caption
item_name_begin = len(menu_line) - len(item.caption)
if item.is_enabled():
final_attrs = [item]
if enabled_item_attrs:
final_attrs.extend(enabled_item_attrs)
attr_segs.append((item_name_begin, len(menu_line), final_attrs))
else:
if disabled_item_attrs:
attr_segs.append(
(item_name_begin, len(menu_line), disabled_item_attrs))
menu_line += divider
return RichTextLines(menu_line, font_attr_segs={0: attr_segs})
|
apache-2.0
|
himikof/aiohttp_debugtoolbar
|
aiohttp_debugtoolbar/tbtools/tbtools.py
|
2
|
14461
|
# -*- coding: utf-8 -*-
"""
werkzeug.debug.tbtools
~~~~~~~~~~~~~~~~~~~~~~
This module provides various traceback related utility functions.
:copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD.
"""
import re
import os
import sys
import inspect
import traceback
import codecs
from tokenize import TokenError
from aiohttp.helpers import reify
from .console import Console
from ..tbtools import text_
from ..utils import render, STATIC_ROUTE_NAME, APP_KEY
from ..utils import escape
from ..utils import ROOT_ROUTE_NAME
from ..utils import EXC_ROUTE_NAME
_coding_re = re.compile(r'coding[:=]\s*([-\w.]+)')
_line_re = re.compile(r'^(.*?)$(?m)')
_funcdef_re = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)')
UTF8_COOKIE = '\xef\xbb\xbf'
system_exceptions = (SystemExit, KeyboardInterrupt)
try:
system_exceptions += (GeneratorExit,)
except NameError:
pass
FRAME_HTML = '''\
<div class="frame" id="frame-%(id)d">
<h4>File <cite class="filename">"%(filename)s"</cite>,
line <em class="line">%(lineno)s</em>,
in <code class="function">%(function_name)s</code></h4>
<pre>%(current_line)s</pre>
</div>
'''
SOURCE_TABLE_HTML = '<table class=source>%s</table>'
SOURCE_LINE_HTML = '''\
<tr class="%(classes)s">
<td class=lineno>%(lineno)s</td>
<td>%(code)s</td>
</tr>
'''
def get_current_traceback(*, ignore_system_exceptions=False,
show_hidden_frames=False, skip=0, exc):
"""Get the current exception info as `Traceback` object. Per default
calling this method will reraise system exceptions such as generator exit,
system exit or others. This behavior can be disabled by passing `False`
to the function as first parameter.
"""
info = sys.exc_info()
return get_traceback(info,
ignore_system_exceptions=ignore_system_exceptions,
show_hidden_frames=show_hidden_frames, skip=skip,
exc=exc)
def get_traceback(info, *, ignore_system_exceptions=False,
show_hidden_frames=False, skip=0, exc):
exc_type, exc_value, tb = info
if ignore_system_exceptions and exc_type in system_exceptions:
raise exc
for x in range(skip):
if tb.tb_next is None:
break
tb = tb.tb_next
tb = Traceback(exc_type, exc_value, tb)
if not show_hidden_frames:
tb.filter_hidden_frames()
return tb
class Line(object):
"""Helper for the source renderer."""
__slots__ = ('lineno', 'code', 'in_frame', 'current')
def __init__(self, lineno, code):
self.lineno = lineno
self.code = code
self.in_frame = False
self.current = False
def classes(self):
rv = ['line']
if self.in_frame:
rv.append('in-frame')
if self.current:
rv.append('current')
return rv
classes = property(classes)
def render(self):
return SOURCE_LINE_HTML % {
'classes': text_(' '.join(self.classes)),
'lineno': self.lineno,
'code': escape(self.code)
}
class Traceback:
"""Wraps a traceback."""
def __init__(self, exc_type, exc_value, tb):
self.exc_type = exc_type
self.exc_value = exc_value
if not isinstance(exc_type, str):
exception_type = exc_type.__name__
if exc_type.__module__ not in ('__builtin__', 'exceptions'):
exception_type = exc_type.__module__ + '.' + exception_type
else:
exception_type = exc_type
self.exception_type = exception_type
# we only add frames to the list that are not hidden. This follows
# the the magic variables as defined by paste.exceptions.collector
self.frames = []
while tb:
self.frames.append(Frame(exc_type, exc_value, tb))
tb = tb.tb_next
def filter_hidden_frames(self):
"""Remove the frames according to the paste spec."""
if not self.frames:
return
new_frames = []
hidden = False
for frame in self.frames:
hide = frame.hide
if hide in ('before', 'before_and_this'):
new_frames = []
hidden = False
if hide == 'before_and_this':
continue
elif hide in ('reset', 'reset_and_this'):
hidden = False
if hide == 'reset_and_this':
continue
elif hide in ('after', 'after_and_this'):
hidden = True
if hide == 'after_and_this':
continue
elif hide or hidden:
continue
new_frames.append(frame)
# if we only have one frame and that frame is from the codeop
# module, remove it.
if len(new_frames) == 1 and self.frames[0].module == 'codeop':
del self.frames[:]
# if the last frame is missing something went terrible wrong :(
elif self.frames[-1] in new_frames:
self.frames[:] = new_frames
def is_syntax_error(self):
"""Is it a syntax error?"""
return isinstance(self.exc_value, SyntaxError)
is_syntax_error = property(is_syntax_error)
def exception(self):
"""String representation of the exception."""
buf = traceback.format_exception_only(self.exc_type, self.exc_value)
return ''.join(buf).strip()
exception = property(exception)
def log(self, logfile=None):
"""Log the ASCII traceback into a file object."""
if logfile is None:
logfile = sys.stderr
tb = self.plaintext.encode('utf-8', 'replace').rstrip() + '\n'
logfile.write(tb)
# TODO: Looks like dead code
# def paste(self, lodgeit_url):
# """Create a paste and return the paste id."""
# from xmlrpclib import ServerProxy
# srv = ServerProxy('%sxmlrpc/' % lodgeit_url)
# return srv.pastes.newPaste('pytb', self.plaintext)
def render_summary(self, include_title=True, request=None):
"""Render the traceback for the interactive console."""
title = ''
frames = []
classes = ['traceback']
if not self.frames:
classes.append('noframe-traceback')
if include_title:
if self.is_syntax_error:
title = text_('Syntax Error')
else:
title = text_('Traceback <small>(most recent call last)'
'</small>')
for frame in self.frames:
frames.append(
text_('<li%s>%s') % (
frame.info and text_(' title="%s"' % escape(frame.info)) or
text_(''),
frame.render()
))
if self.is_syntax_error:
description_wrapper = text_('<pre class=syntaxerror>%s</pre>')
else:
description_wrapper = text_('<blockquote>%s</blockquote>')
vars = {
'classes': text_(' '.join(classes)),
'title': title and text_('<h3 class="traceback">%s</h3>'
% title) or text_(''),
'frames': text_('\n'.join(frames)),
'description': description_wrapper % escape(self.exception),
}
app = request.app
return render('exception_summary.jinja2', app, vars, request=request)
def render_full(self, request, lodgeit_url=None):
"""Render the Full HTML page with the traceback info."""
static_path = request.app.router[STATIC_ROUTE_NAME].url(filename='')
root_path = request.app.router[ROOT_ROUTE_NAME].url()
exc = escape(self.exception)
summary = self.render_summary(include_title=False, request=request)
token = request.app[APP_KEY]['pdtb_token']
qs = {'token': token, 'tb': str(self.id)}
url = request.app.router[EXC_ROUTE_NAME].url(query=qs)
evalex = request.app[APP_KEY]['exc_history'].eval_exc
vars = {
'evalex': evalex and 'true' or 'false',
'console': 'false',
'lodgeit_url': escape(lodgeit_url),
'title': exc,
'exception': exc,
'exception_type': escape(self.exception_type),
'summary': summary,
'plaintext': self.plaintext,
'plaintext_cs': re.sub('-{2,}', '-', self.plaintext),
'traceback_id': self.id,
'static_path': static_path,
'token': token,
'root_path': root_path,
'url': url,
}
return render('exception.jinja2', request.app, vars, request=request)
def generate_plaintext_traceback(self):
"""Like the plaintext attribute but returns a generator"""
yield text_('Traceback (most recent call last):')
for frame in self.frames:
yield text_(' File "%s", line %s, in %s' % (
frame.filename,
frame.lineno,
frame.function_name
))
yield text_(' ' + frame.current_line.strip())
yield text_(self.exception, 'utf-8')
@reify
def plaintext(self):
return text_('\n'.join(self.generate_plaintext_traceback()))
id = property(lambda x: id(x))
class Frame(object):
"""A single frame in a traceback."""
def __init__(self, exc_type, exc_value, tb):
self.lineno = tb.tb_lineno
self.function_name = tb.tb_frame.f_code.co_name
self.locals = tb.tb_frame.f_locals
self.globals = tb.tb_frame.f_globals
fn = inspect.getsourcefile(tb) or inspect.getfile(tb)
if fn[-4:] in ('.pyo', '.pyc'):
fn = fn[:-1]
# if it's a file on the file system resolve the real filename.
if os.path.isfile(fn):
fn = os.path.realpath(fn)
self.filename = fn
self.module = self.globals.get('__name__')
self.loader = self.globals.get('__loader__')
self.code = tb.tb_frame.f_code
# support for paste's traceback extensions
self.hide = self.locals.get('__traceback_hide__', False)
info = self.locals.get('__traceback_info__')
if info is not None:
try:
info = str(info)
except UnicodeError:
info = str(info).decode('utf-8', 'replace')
self.info = info
def render(self):
"""Render a single frame in a traceback."""
return FRAME_HTML % {
'id': self.id,
'filename': escape(self.filename),
'lineno': self.lineno,
'function_name': escape(self.function_name),
'current_line': escape(self.current_line.strip())
}
def get_annotated_lines(self):
"""Helper function that returns lines with extra information."""
lines = [Line(idx + 1, x) for idx, x in enumerate(self.sourcelines)]
# find function definition and mark lines
if hasattr(self.code, 'co_firstlineno'):
lineno = self.code.co_firstlineno - 1
while lineno > 0:
if _funcdef_re.match(lines[lineno].code):
break
lineno -= 1
try:
offset = len(inspect.getblock([x.code + '\n' for x
in lines[lineno:]]))
except TokenError:
offset = 0
for line in lines[lineno:lineno + offset]:
line.in_frame = True
# mark current line
try:
lines[self.lineno - 1].current = True
except IndexError:
pass
return lines
def render_source(self):
"""Render the sourcecode."""
return SOURCE_TABLE_HTML % text_('\n'.join(line.render() for line in
self.get_annotated_lines()))
def eval(self, code, mode='single'):
"""Evaluate code in the context of the frame."""
if isinstance(code, str):
if isinstance(code, str):
code = UTF8_COOKIE + code.encode('utf-8')
code = compile(code, '<interactive>', mode)
if mode != 'exec':
return eval(code, self.globals, self.locals)
exec(code, self.globals, self.locals)
@reify
def sourcelines(self):
"""The sourcecode of the file as list of unicode strings."""
# get sourcecode from loader or file
source = None
if self.loader is not None:
try:
if hasattr(self.loader, 'get_source'):
source = self.loader.get_source(self.module)
elif hasattr(self.loader, 'get_source_by_code'):
source = self.loader.get_source_by_code(self.code)
except Exception:
# we munch the exception so that we don't cause troubles
# if the loader is broken.
pass
if source is None:
try:
f = open(self.filename)
except IOError:
return []
try:
source = f.read()
finally:
f.close()
# already unicode? return right away
if isinstance(source, str):
return source.splitlines()
# yes. it should be ascii, but we don't want to reject too many
# characters in the debugger if something breaks
charset = 'utf-8'
if source.startswith(UTF8_COOKIE):
source = source[3:]
else:
for idx, match in enumerate(_line_re.finditer(source)):
match = _line_re.search(match.group())
if match is not None:
charset = match.group(1)
break
if idx > 1:
break
# on broken cookies we fall back to utf-8 too
try:
codecs.lookup(charset)
except LookupError:
charset = 'utf-8'
return source.decode(charset, 'replace').splitlines()
@property
def current_line(self):
try:
return self.sourcelines[self.lineno - 1]
except IndexError:
return text_('')
@reify
def console(self):
return Console(self.globals, self.locals)
id = property(lambda x: id(x))
|
apache-2.0
|
Vvucinic/Wander
|
venv_2_7/lib/python2.7/site-packages/Django-1.9-py2.7.egg/django/core/management/commands/showmigrations.py
|
440
|
4901
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS, connections
from django.db.migrations.loader import MigrationLoader
class Command(BaseCommand):
help = "Shows all available migrations for the current project"
def add_arguments(self, parser):
parser.add_argument('app_labels', nargs='*',
help='App labels of applications to limit the output to.')
parser.add_argument('--database', action='store', dest='database', default=DEFAULT_DB_ALIAS,
help='Nominates a database to synchronize. Defaults to the "default" database.')
formats = parser.add_mutually_exclusive_group()
formats.add_argument('--list', '-l', action='store_const', dest='format', const='list',
help='Shows a list of all migrations and which are applied.')
formats.add_argument('--plan', '-p', action='store_const', dest='format', const='plan',
help='Shows all migrations in the order they will be applied.')
parser.set_defaults(format='list')
def handle(self, *args, **options):
self.verbosity = options.get('verbosity')
# Get the database we're operating from
db = options.get('database')
connection = connections[db]
if options['format'] == "plan":
return self.show_plan(connection)
else:
return self.show_list(connection, options['app_labels'])
def show_list(self, connection, app_names=None):
"""
Shows a list of all migrations on the system, or only those of
some named apps.
"""
# Load migrations from disk/DB
loader = MigrationLoader(connection, ignore_no_migrations=True)
graph = loader.graph
# If we were passed a list of apps, validate it
if app_names:
invalid_apps = []
for app_name in app_names:
if app_name not in loader.migrated_apps:
invalid_apps.append(app_name)
if invalid_apps:
raise CommandError("No migrations present for: %s" % (", ".join(invalid_apps)))
# Otherwise, show all apps in alphabetic order
else:
app_names = sorted(loader.migrated_apps)
# For each app, print its migrations in order from oldest (roots) to
# newest (leaves).
for app_name in app_names:
self.stdout.write(app_name, self.style.MIGRATE_LABEL)
shown = set()
for node in graph.leaf_nodes(app_name):
for plan_node in graph.forwards_plan(node):
if plan_node not in shown and plan_node[0] == app_name:
# Give it a nice title if it's a squashed one
title = plan_node[1]
if graph.nodes[plan_node].replaces:
title += " (%s squashed migrations)" % len(graph.nodes[plan_node].replaces)
# Mark it as applied/unapplied
if plan_node in loader.applied_migrations:
self.stdout.write(" [X] %s" % title)
else:
self.stdout.write(" [ ] %s" % title)
shown.add(plan_node)
# If we didn't print anything, then a small message
if not shown:
self.stdout.write(" (no migrations)", self.style.MIGRATE_FAILURE)
def show_plan(self, connection):
"""
Shows all known migrations in the order they will be applied
"""
# Load migrations from disk/DB
loader = MigrationLoader(connection)
graph = loader.graph
targets = graph.leaf_nodes()
plan = []
seen = set()
# Generate the plan
for target in targets:
for migration in graph.forwards_plan(target):
if migration not in seen:
plan.append(graph.nodes[migration])
seen.add(migration)
# Output
def print_deps(migration):
out = []
for dep in migration.dependencies:
if dep[1] == "__first__":
roots = graph.root_nodes(dep[0])
dep = roots[0] if roots else (dep[0], "__first__")
out.append("%s.%s" % dep)
if out:
return " ... (%s)" % ", ".join(out)
return ""
for migration in plan:
deps = ""
if self.verbosity >= 2:
deps = print_deps(migration)
if (migration.app_label, migration.name) in loader.applied_migrations:
self.stdout.write("[X] %s%s" % (migration, deps))
else:
self.stdout.write("[ ] %s%s" % (migration, deps))
|
artistic-2.0
|
jms/FlyNi-API
|
docs/conf.py
|
1
|
7787
|
# -*- coding: utf-8 -*-
#
# FlyNi-API documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'FlyNi-API'
copyright = u"2015, Oscar Cortez"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'FlyNi-APIdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'FlyNi-API.tex',
u'FlyNi-API Documentation',
u"Oscar Cortez", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'FlyNi-API', u'FlyNi-API Documentation',
[u"Oscar Cortez"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'FlyNi-API', u'FlyNi-API Documentation',
u"Oscar Cortez", 'FlyNi-API',
'The FlyNi API', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
|
bsd-3-clause
|
HashAppsLabs/TiCairo
|
plugins/ti.alloy/plugin.py
|
229
|
4668
|
import os, sys, subprocess, hashlib
import subprocess
def check_output(*popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte string.
Backported from Python 2.7 as it's implemented as pure python on stdlib.
>>> check_output(['/usr/bin/python', '--version'])
Python 2.6.2
"""
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
error = subprocess.CalledProcessError(retcode, cmd)
error.output = output
raise error
return output
def compile(config):
paths = {}
binaries = ["alloy","node"]
for binary in binaries:
try:
# see if the environment variable is defined
paths[binary] = os.environ["ALLOY_" + ("NODE_" if binary == "node" else "") + "PATH"]
except KeyError as ex:
# next try PATH, and then our guess paths
if sys.platform == "darwin" or sys.platform.startswith('linux'):
userPath = os.environ["HOME"]
guessPaths = [
"/usr/local/bin/"+binary,
"/opt/local/bin/"+binary,
userPath+"/local/bin/"+binary,
"/opt/bin/"+binary,
"/usr/bin/"+binary,
"/usr/local/share/npm/bin/"+binary
]
try:
binaryPath = check_output(["which",binary], stderr=subprocess.STDOUT).strip()
print "[DEBUG] %s installed at '%s'" % (binary,binaryPath)
except:
print "[WARN] Couldn't find %s on your PATH:" % binary
print "[WARN] %s" % os.environ["PATH"]
print "[WARN]"
print "[WARN] Checking for %s in a few default locations:" % binary
for p in guessPaths:
sys.stdout.write("[WARN] %s -> " % p)
if os.path.exists(p):
binaryPath = p
print "FOUND"
break
else:
print "not found"
binaryPath = None
if binaryPath is None:
print "[ERROR] Couldn't find %s" % binary
sys.exit(1)
else:
paths[binary] = binaryPath
# no guesses on windows, just use the PATH
elif sys.platform == "win32":
paths["alloy"] = "alloy.cmd"
f = os.path.abspath(os.path.join(config['project_dir'], 'app'))
if os.path.exists(f):
print "[INFO] alloy app found at %s" % f
rd = os.path.abspath(os.path.join(config['project_dir'], 'Resources'))
devicefamily = 'none'
simtype = 'none'
version = '0'
deploytype = 'development'
if config['platform']==u'ios':
version = config['iphone_version']
devicefamily = config['devicefamily']
deploytype = config['deploytype']
if config['platform']==u'android':
builder = config['android_builder']
version = builder.tool_api_level
deploytype = config['deploy_type']
if config['platform']==u'mobileweb':
builder = config['mobileweb_builder']
deploytype = config['deploytype']
cfg = "platform=%s,version=%s,simtype=%s,devicefamily=%s,deploytype=%s," % (config['platform'],version,simtype,devicefamily,deploytype)
if sys.platform == "win32":
cmd = [paths["alloy"], "compile", f, "--no-colors", "--config", cfg]
else:
cmd = [paths["node"], paths["alloy"], "compile", f, "--no-colors", "--config", cfg]
print "[INFO] Executing Alloy compile:"
print "[INFO] %s" % " ".join(cmd)
try:
print check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as ex:
if hasattr(ex, 'output'):
print ex.output
print "[ERROR] Alloy compile failed"
retcode = 1
if hasattr(ex, 'returncode'):
retcode = ex.returncode
sys.exit(retcode)
except EnvironmentError as ex:
print "[ERROR] Unexpected error with Alloy compiler plugin: %s" % ex.strerror
sys.exit(2)
|
apache-2.0
|
slashback/handsome-echo-backend
|
tests/feed.py
|
1
|
5157
|
from tornado import testing
from mainWebserver import make_app
import json
from echo.feeds.feed import Feed
HTTP_SUCCESS_CODE = 200
HTTP_NOT_FOUND_CODE = 404
class CustomFeedTest(testing.AsyncHTTPTestCase):
def get_app(self):
return make_app()
def test_crud_feed(self):
payload = {
'predicates': [
{
'entity': 'contributors',
'value': 'http://echo.msk.ru/contributors/5013/',
'type': 'IN'
},
{
'entity': 'guests',
'value': 'http://echo.msk.ru/guests/293/',
'type': 'IN'
},
]
}
# create
response = self.fetch('/api/custom_feed', method="POST", body=json.dumps(payload))
self.assertEqual(response.code, HTTP_SUCCESS_CODE)
body = json.loads(response.body.decode('utf-8'))
self.assertIn('feed_id', body)
_id = body['feed_id']
# get
response = self.fetch('/api/custom_feed/{}'.format(_id), method="GET")
self.assertEqual(response.code, HTTP_SUCCESS_CODE)
# body = json.loads(response.body.decode('utf-8'))
# self.assertGreater(len(body), 1)
# get feed xml
response = self.fetch('/api/feed/{}'.format(_id), method="GET")
self.assertEqual(response.code, HTTP_SUCCESS_CODE)
# delete
response = self.fetch('/api/custom_feed/{}'.format(_id), method="DELETE")
self.assertEqual(response.code, HTTP_SUCCESS_CODE)
# get 404
response = self.fetch('/api/custom_feed/{}'.format(_id), method="GET")
self.assertEqual(response.code, HTTP_NOT_FOUND_CODE)
class CustomFeedBuilderTest(testing.AsyncTestCase):
maxDiff = None
def test_builder(self):
predicates = [
{
'entity': 'contributors',
'value': "http://echo.msk.ru/contributors/5013/",
'type': 'IN'
},
{
'entity': 'guests',
'value': "http://echo.msk.ru/guests/293/",
'type': 'IN'
},
]
feed = Feed(predicates)
result = feed.build()
expected_query = {
'$or': [
{
'contributors': {
'$elemMatch': {
'url': 'http://echo.msk.ru/contributors/5013/'
}
}
},
{
'guests': {
'$elemMatch': {
'url': 'http://echo.msk.ru/guests/293/'
}
}
}
]
}
self.assertDictEqual(expected_query, result)
def test_advance_builder(self):
predicates = [
{
'entity': 'contributors',
'value': "http://echo.msk.ru/contributors/5013/",
'type': 'IN'
},
{
'entity': 'guests',
'value': "http://echo.msk.ru/guests/293/",
'type': 'IN'
},
{
'entity': 'guests',
'value': "http://echo.msk.ru/guests/755/",
'type': 'NOT IN'
},
{
'entity': 'contributors',
'value': "http://echo.msk.ru/contributors/898/",
'type': 'NOT IN'
},
]
feed = Feed(predicates)
result = feed.build()
expected_query = {
'$and': [
{
'contributors': {
'$not': {
'$elemMatch': {
'url': 'http://echo.msk.ru/contributors/898/'
},
},
}
},
{
'guests': {
'$not': {
'$elemMatch': {
'url': 'http://echo.msk.ru/guests/755/'
},
},
}
},
{
'$or': [
{
'contributors': {
'$elemMatch': {
'url': 'http://echo.msk.ru/contributors/5013/'
},
},
},
{
'guests': {
'$elemMatch': {
'url': 'http://echo.msk.ru/guests/293/'
},
},
},
],
}
],
}
self.assertDictEqual(expected_query, result)
|
mit
|
jgressmann/sc2links
|
addon.py
|
1
|
11878
|
__author__ = 'jgressmann'
from datetime import date
import pickle
import sys
import traceback
import urllib
import urlparse
#import urlresolver
import xbmc
import xbmcaddon
import xbmcgui
import xbmcplugin
import zlib
import resources.lib.sc2links as sc2links
addon = xbmcaddon.Addon()
#__addonname__ = addon.getAddonInfo('name')
addonid = addon.getAddonInfo('id')
def debug(val):
if isinstance(val, str) or isinstance(val, unicode):
pass
else:
val = repr(val)
message = u'%s: %s' % (addonid, val)
xbmc.log(message.encode('utf-8'), xbmc.LOGDEBUG)
def build_url(query):
return sys.argv[0] + '?' + urllib.urlencode(query)
handle = int(sys.argv[1])
args = dict(urlparse.parse_qsl(sys.argv[2][1:]))
debug("url args: " + repr(args))
revealMatches = addon.getSetting('reveal_matches') == 'true'
debug('reveal matches: ' + str(revealMatches))
lastNYears = 0
try:
lastNYears = int(addon.getSetting('last_n_years_to_fetch'))
except:
pass
debug('last_n_years_to_fetch: ' + repr(lastNYears))
def get_youtube_info(url):
parsed = urlparse.urlparse(url)
args = urlparse.parse_qs(parsed.query)
id = None
time = None
# 'https://www.youtube.com/embed/TdjhjhbT3eA'
if parsed.path.startswith('/embed/'):
id = parsed.path[7:]
else:
# parse something like https://www.youtube.com/watch?v=XqywDF675kQ
#debug(str(args))
time = args.get('t', [''])[0]
id = args.get('v', [''])[0]
if not id:
# parse something like https://youtu.be/3A3guAd42Dw?t=9
if parsed.hostname == 'youtu.be':
pathParts = (parsed.path or '').split('/')
if len(pathParts) == 2:
id = pathParts[1]
if id:
return (id, time)
def get_youtube_plugin_url(web_url):
data = get_youtube_info(web_url)
if data:
id = data[0]
time = data[1]
if id:
args = {'play': 'plugin://plugin.video.youtube/play/?video_id={}'.format(id)}
if time:
args['time'] = time
return build_url(args)
debug('failed to get youtube id for ' + repr(web_url))
def get_twitch_info(url):
# parse something like https://www.twitch.tv/videos/161472611?t=07h49m09s
def _twitch_time_to_seconds(t):
seconds = 0
buf = ''
for c in t:
if c == 'h':
if len(buf):
seconds += int(buf) * 3600
buf = ''
elif c == 'm':
if len(buf):
seconds += int(buf) * 60
buf = ''
elif c == 's':
if len(buf):
seconds += int(buf)
buf = ''
elif c.isdigit():
buf += c
else:
# oh well
pass
return seconds
id = None
time = None
parsed = urlparse.urlparse(url)
args = urlparse.parse_qs(parsed.query)
#debug('path: ' + str(parsed.path))
if parsed.path.find('/videos/') == 0:
id = parsed.path[8:]
#debug('id: ' + str(id))
if id and id.isdigit():
time = args.get('t', [None])[0]
if time:
time = _twitch_time_to_seconds(time)
else:
# https://player.twitch.tv/?video=v187746182&autoplay=false&time=
args = urlparse.parse_qs(parsed.query)
id = args.get('video', [None])[0]
if id:
id = id[1:]
time = args.get('time', [None])[0]
if time:
time = _twitch_time_to_seconds(time)
return (id, time)
def get_twitch_plugin_url(web_url):
data = get_twitch_info(web_url)
if data:
id = data[0]
time = data[1]
if id:
#@dispatcher.register(MODES.PLAY, kwargs=['seek_time', 'channel_id', 'video_id', 'slug', 'ask', 'use_player', 'quality'])
args = {'play': 'plugin://plugin.video.twitch/?mode=play&video_id={}'.format(id)}
if time:
args['time'] = time
return build_url(args)
debug('failed to get twitch id for ' + repr(web_url))
compress = True
def by_name(lhs, rhs):
return cmp(lhs.name, rhs.name)
def build():
level = int(args.get('level', 0))
debug("level " + repr(level))
args.update({'level': level+1})
data0 = args.get('data0', None)
if data0:
if compress:
#debug("data1z " + repr(data0))
data0 = zlib.decompress(data0)
#debug("data1p " + repr(data0))
data0 = pickle.loads(data0)
#debug("data0 " + repr(data0))
data1 = args.get('data1', None)
if data1:
if compress:
#debug("data1z " + repr(data1))
data1 = zlib.decompress(data1)
#debug("datap " + repr(data1))
data1 = pickle.loads(data1)
#debug("data1 " + repr(data1))
year = args.get('year', None)
if year:
year = int(year)
debug("year " + repr(year))
name = args.get('name', None)
debug("name " + repr(name))
stage_name = args.get('stage_name', None)
debug("stage_name " + repr(stage_name))
overrideFilter = args.get('override_filter', False)
debug("overrideFilter " + repr(overrideFilter))
# yearsFiltered = args.get('years_filtered', None)
# debug("yearsFiltered " + repr(yearsFiltered))
if level == 0:
args.update({'order': 0})
url = build_url(args)
xbmcplugin.addDirectoryItem(handle, url, xbmcgui.ListItem('By Name'), isFolder=1)
args.update({'order': 1})
url = build_url(args)
xbmcplugin.addDirectoryItem(handle, url, xbmcgui.ListItem('By Year'), isFolder=1)
elif level == 1:
order = int(args.get('order', 0))
# want only the last n years worth of shows?
yearsFiltered = not overrideFilter and lastNYears >= 1
if yearsFiltered:
currentYear = date.today().year + 1
years = [x for x in range(currentYear-(lastNYears), currentYear)]
#debug("years: " + repr(years))
sc2 = sc2links.Sc2Links(years=years)
else:
sc2 = sc2links.Sc2Links()
children = sc2.children
# debug("children: " + repr(children))
data = pickle.dumps(children)
if compress:
data = zlib.compress(data)
args.update({'data0': data})
if order == 1:
years = [x.year for x in children]
years = set(years)
years = sorted(years, reverse=True)
#debug('years: ' + repr(years))
for year in years:
displayYear = str(year or 'Other')
args.update({'year': year or -1})
url = build_url(args)
xbmcplugin.addDirectoryItem(handle, url, xbmcgui.ListItem(displayYear), isFolder=1)
else:
names = [x.name for x in children]
names = set(names)
names = sorted(names)
#debug('names: ' + repr(names))
for name in names:
args.update({'name': name})
url = build_url(args)
xbmcplugin.addDirectoryItem(handle, url, xbmcgui.ListItem(name), isFolder=1)
if yearsFiltered: # load all item
args.update({'override_filter': True, 'level': level})
debug('args ' + repr(args))
url = build_url(args)
xbmcplugin.addDirectoryItem(handle, url, xbmcgui.ListItem("Load all"), isFolder=1)
elif level == 2:
children = data0
if year is None:
filtered = [child for child in children if child.name == name]
years = [x.year for x in filtered]
years = set(years)
years = sorted(years, reverse=True)
#debug('# children by name' + repr(len(years)))
for year in years:
displayYear = str(year or 'Other')
args.update({'year': year or -1})
url = build_url(args)
xbmcplugin.addDirectoryItem(handle, url, xbmcgui.ListItem(displayYear), isFolder=1)
else:
filtered = [child for child in children if child.year == year]
sortedByName = sorted(filtered, cmp=by_name)
#debug('# children by year' + repr(len(sortedByName)))
for child in sortedByName:
args.update({'name': child.name})
url = build_url(args)
xbmcplugin.addDirectoryItem(handle, url, xbmcgui.ListItem(child.name), isFolder=1)
elif level == 3:
item = None
for child in data0:
if child.name == name and child.year == year:
item = child
break
if item:
children = item.children
data = pickle.dumps(children)
if compress:
data = zlib.compress(data)
args.update({'data1': data})
for child in children:
args.update({'stage_name': child.name})
url = build_url(args)
xbmcplugin.addDirectoryItem(handle, url, xbmcgui.ListItem(child.name), isFolder=1)
elif level == 4:
item = None
for child in data1:
if child.name == stage_name:
item = child
break
if item:
vods = item.children
for vod in vods:
url = vod.url
debug('vod url' + repr(url))
if not url: # match didn't take place
continue
plugin_url = get_youtube_plugin_url(url) or get_twitch_plugin_url(url)
debug('plugin url:' + repr(plugin_url))
if not plugin_url: # couldn't resolve vod url
continue
label = 'Match ' + str(vod.match_number)
if revealMatches:
if len(vod.side2):
label += u' {} - {}'.format(vod.side1, vod.side2)
else:
label += ' ' + vod.side1
xbmcplugin.addDirectoryItem(handle, plugin_url, xbmcgui.ListItem(label), False)
xbmcplugin.endOfDirectory(handle)
def play(url, args):
time = args.get('time', None)
# BROKEN URL RESOLVER
# media_url = urlresolver.resolve('https://www.youtube.com/watch?v=7OXVPgu6urw')
# # Create a playable item with a path to play.
# play_item = xbmcgui.ListItem(path=url)
# play_item.setProperty('StartOffset', time)
# # Pass the item to the Kodi player.
# xbmcplugin.setResolvedUrl(handle, True, listitem=play_item)
# return
# stop whatever is playing
player = xbmc.Player()
player.stop()
# launch youtube plugin
xbmc.executebuiltin('PlayMedia({})'.format(url))
# seek?
if time:
delay = 5
try:
delay = int(addon.getSetting('youtube_seek_delay_s'))
except:
pass
timeout = 20
try:
timeout = int(addon.getSetting('youtube_seek_delay_s'))
except:
pass
# xbmcgui.Dialog().ok(addonname, "have time: " + time)
# wait for playback
if timeout > 0:
for i in range(0, timeout):
if player.isPlaying():
debug('player is playing')
break
xbmc.sleep(1000)
# seek
if player.isPlaying() and delay > 0:
xbmc.sleep(delay * 1000)
player.seekTime(int(time))
__run = 0
try:
url = args.get('play', '')
if url:
play(url, args)
else:
build()
debug("run: " + repr(__run))
__run += 1
except Exception as e:
debug(u'Exception: ' + str(e))
map(debug, str(traceback.format_exc()).splitlines())
|
mit
|
xenigmax/seqan
|
util/py_lib/seqan/dox/validation.py
|
9
|
6106
|
#!/usr/bin/env python2
"""Some validation for proc_doc.Proc*"""
__author__ = 'Manuel Holtgrewe <manuel.holtgrewe@fu-berlin.de>'
class ProcDocValidator(object):
"""Validate proc_doc.Proc* objects.
Implements the visitor pattern.
"""
def __init__(self, msg_printer):
self.msg_printer = msg_printer
def validate(self, proc_entry):
return
class MissingSignatureValidator(ProcDocValidator):
"""Validates for missing or empty signature."""
def validate(self, proc_entry):
IGNORED = ['variable', 'member_variable', 'tag', 'grouped_tag', 'typedef',
'grouped_typedef', 'signature', 'concept', 'member_typedef',
'enum', 'grouped_enum', 'enum_value']
if not hasattr(proc_entry, 'signatures') or proc_entry.kind in IGNORED:
return # Skip if type has no signatures.
if not proc_entry.signatures:
msg = 'Missing @signature for this entry!'
self.msg_printer.printTokenError(proc_entry.raw.first_token, msg, 'warning')
class MissingSignatureKeywordsValidator(ProcDocValidator):
"""Validates for missing keywords in signature (e.g. "class" for @class)."""
def validate(self, proc_entry):
if proc_entry.kind not in ['class', 'specialization']:
return # only handle those
for i, sig in enumerate(proc_entry.raw.signatures):
# TODO(holtgrew): Really allow typedef and ::Type/mfns here?
if 'class ' not in sig.text.text and 'struct ' not in sig.text.text \
and 'typedef ' not in sig.text.text and 'using' not in sig.text.text \
and not sig.text.text.strip().endswith('::Type') \
and not sig.text.text.strip().endswith('::Type;'):
msg = 'Missing keyword "class", "struct", "typedef", "using" in signature.'
self.msg_printer.printTokenError(proc_entry.raw.signatures[i].text.tokens[0], msg, 'warning')
class OnlyRemarksInBodyValidator(ProcDocValidator):
"""Validates for the body starting with '@section Remarks'."""
def validate(self, proc_entry):
if not hasattr(proc_entry, 'body') or not proc_entry.body.children:
return # only handle if has non-empty body
if proc_entry.body.children[0].type in ['h1', 'h2', 'h3', 'h4', 'h5'] and \
proc_entry.body.children[0].children and \
proc_entry.body.children[0].children[0].text == 'Remarks':
msg = 'Detailed descrition starts with Remarks'
self.msg_printer.printTokenError(proc_entry.raw.first_token, msg, 'warning')
class MissingParameterDescriptionValidator(ProcDocValidator):
"""Warns if the description is missing for a @param or @return."""
def validate(self, proc_entry):
if not hasattr(proc_entry, 'params') and \
not hasattr(proc_entry, 'tparams') and \
not hasattr(proc_entry, 'returns'):
return # Skip if type has no parameters
# Check for empty name.
for key in ['params', 'tparams', 'returns']:
if not hasattr(proc_entry, key):
continue # Skip if missing.
for val in getattr(proc_entry, key):
if hasattr(val, 'name') and not val.name:
msg = 'Missing name for @%s' % key[:-1]
elif hasattr(val, 'type') and not val.type:
msg = 'Missing type for @%s' % key[:-1]
else:
continue # skip
self.msg_printer.printTokenError(val.raw.first_token, msg, 'warning')
# Check for empty description.
for key in ['params', 'tparams', 'returns']:
if not hasattr(proc_entry, key):
continue # Skip if missing.
for val in getattr(proc_entry, key):
if val.desc.empty:
msg = 'Missing description for @%s' % key[:-1]
self.msg_printer.printTokenError(val.raw.first_token, msg, 'warning')
class ReturnVoidValidator(ProcDocValidator):
"""Warns if there is a (superfluous) @return void entry."""
def validate(self, proc_entry):
if not hasattr(proc_entry, 'returns'):
return # Skip if type has no returns member.
for r in proc_entry.returns:
if r.type == 'void':
msg = '@return superfluous for "void" type -- simply show "void" in signature.'
self.msg_printer.printTokenError(r.raw.first_token, msg, 'warning')
class EmptyBriefValidator(ProcDocValidator):
"""Warns if there is no non-empty @brief section for an entry."""
def validate(self, proc_entry):
IGNORED = ['mainpage', 'page']
if proc_entry.kind in IGNORED:
return # Skip.
if not hasattr(proc_entry, 'brief'):
return # Skip if type has no returns member.
if not proc_entry.brief or proc_entry.brief.empty:
msg = 'Missing non-empty @brief clause.'
self.msg_printer.printTokenError(proc_entry.raw.first_token, msg, 'warning')
class ClassCannotExtendConceptValidator(ProcDocValidator):
"""Warns a class extends a concept."""
def validate(self, proc_entry):
if proc_entry.kind not in ['class', 'specialization']:
return # Skip.
for ext in proc_entry.extends:
if not ext in proc_entry.doc.top_level_entries:
continue
if proc_entry.doc.top_level_entries[ext].kind not in ['class', 'specialization']:
msg = 'Class %s tries to inherit from non-class %s' % (proc_entry.name, ext)
self.msg_printer.printTokenError(proc_entry.raw.first_token, msg, 'error')
# Array with the validator classes to use.
VALIDATORS = [MissingSignatureValidator,
MissingParameterDescriptionValidator,
MissingSignatureKeywordsValidator,
#OnlyRemarksInBodyValidator,
ReturnVoidValidator,
EmptyBriefValidator,
ClassCannotExtendConceptValidator]
|
bsd-3-clause
|
egabancho/invenio-ext
|
invenio_ext/jasmine/bundles.py
|
6
|
1520
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Bundles for Jasmine test runner."""
from __future__ import unicode_literals
from invenio_base.bundles import invenio as _i
from invenio_base.bundles import jquery as _j
from invenio_ext.assets import Bundle, RequireJSFilter
jasmine_js = Bundle(
# es5-shim is needed by PhantomJS
# 'vendors/es5-shim/es5-shim.js',
# 'vendors/es5-shim/es5-sham.js',
"js/jasmine/init.js",
output="jasmine.js",
weight=50,
filters=RequireJSFilter(exclude=[_j, _i]),
bower={
"jasmine": ">=2",
"jasmine-jquery": ">=2",
"jasmine-flight": ">=3",
"jasmine-ajax": ">=2",
}
)
jasmine_styles = Bundle(
'vendors/jasmine/lib/jasmine-core/jasmine.css',
weight=-1,
output='jasmine.css'
)
|
gpl-2.0
|
OpenFacetracker/facetracker-core
|
lib/youtube-dl/youtube_dl/compat.py
|
5
|
13628
|
from __future__ import unicode_literals
import collections
import getpass
import optparse
import os
import re
import shutil
import socket
import subprocess
import sys
try:
import urllib.request as compat_urllib_request
except ImportError: # Python 2
import urllib2 as compat_urllib_request
try:
import urllib.error as compat_urllib_error
except ImportError: # Python 2
import urllib2 as compat_urllib_error
try:
import urllib.parse as compat_urllib_parse
except ImportError: # Python 2
import urllib as compat_urllib_parse
try:
from urllib.parse import urlparse as compat_urllib_parse_urlparse
except ImportError: # Python 2
from urlparse import urlparse as compat_urllib_parse_urlparse
try:
import urllib.parse as compat_urlparse
except ImportError: # Python 2
import urlparse as compat_urlparse
try:
import http.cookiejar as compat_cookiejar
except ImportError: # Python 2
import cookielib as compat_cookiejar
try:
import html.entities as compat_html_entities
except ImportError: # Python 2
import htmlentitydefs as compat_html_entities
try:
import html.parser as compat_html_parser
except ImportError: # Python 2
import HTMLParser as compat_html_parser
try:
import http.client as compat_http_client
except ImportError: # Python 2
import httplib as compat_http_client
try:
from urllib.error import HTTPError as compat_HTTPError
except ImportError: # Python 2
from urllib2 import HTTPError as compat_HTTPError
try:
from urllib.request import urlretrieve as compat_urlretrieve
except ImportError: # Python 2
from urllib import urlretrieve as compat_urlretrieve
try:
from subprocess import DEVNULL
compat_subprocess_get_DEVNULL = lambda: DEVNULL
except ImportError:
compat_subprocess_get_DEVNULL = lambda: open(os.path.devnull, 'w')
try:
import http.server as compat_http_server
except ImportError:
import BaseHTTPServer as compat_http_server
try:
from urllib.parse import unquote as compat_urllib_parse_unquote
except ImportError:
def compat_urllib_parse_unquote(string, encoding='utf-8', errors='replace'):
if string == '':
return string
res = string.split('%')
if len(res) == 1:
return string
if encoding is None:
encoding = 'utf-8'
if errors is None:
errors = 'replace'
# pct_sequence: contiguous sequence of percent-encoded bytes, decoded
pct_sequence = b''
string = res[0]
for item in res[1:]:
try:
if not item:
raise ValueError
pct_sequence += item[:2].decode('hex')
rest = item[2:]
if not rest:
# This segment was just a single percent-encoded character.
# May be part of a sequence of code units, so delay decoding.
# (Stored in pct_sequence).
continue
except ValueError:
rest = '%' + item
# Encountered non-percent-encoded characters. Flush the current
# pct_sequence.
string += pct_sequence.decode(encoding, errors) + rest
pct_sequence = b''
if pct_sequence:
# Flush the final pct_sequence
string += pct_sequence.decode(encoding, errors)
return string
try:
compat_str = unicode # Python 2
except NameError:
compat_str = str
try:
compat_basestring = basestring # Python 2
except NameError:
compat_basestring = str
try:
compat_chr = unichr # Python 2
except NameError:
compat_chr = chr
try:
from xml.etree.ElementTree import ParseError as compat_xml_parse_error
except ImportError: # Python 2.6
from xml.parsers.expat import ExpatError as compat_xml_parse_error
try:
from urllib.parse import parse_qs as compat_parse_qs
except ImportError: # Python 2
# HACK: The following is the correct parse_qs implementation from cpython 3's stdlib.
# Python 2's version is apparently totally broken
def _parse_qsl(qs, keep_blank_values=False, strict_parsing=False,
encoding='utf-8', errors='replace'):
qs, _coerce_result = qs, compat_str
pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
r = []
for name_value in pairs:
if not name_value and not strict_parsing:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
if strict_parsing:
raise ValueError("bad query field: %r" % (name_value,))
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if len(nv[1]) or keep_blank_values:
name = nv[0].replace('+', ' ')
name = compat_urllib_parse_unquote(
name, encoding=encoding, errors=errors)
name = _coerce_result(name)
value = nv[1].replace('+', ' ')
value = compat_urllib_parse_unquote(
value, encoding=encoding, errors=errors)
value = _coerce_result(value)
r.append((name, value))
return r
def compat_parse_qs(qs, keep_blank_values=False, strict_parsing=False,
encoding='utf-8', errors='replace'):
parsed_result = {}
pairs = _parse_qsl(qs, keep_blank_values, strict_parsing,
encoding=encoding, errors=errors)
for name, value in pairs:
if name in parsed_result:
parsed_result[name].append(value)
else:
parsed_result[name] = [value]
return parsed_result
try:
from shlex import quote as shlex_quote
except ImportError: # Python < 3.3
def shlex_quote(s):
if re.match(r'^[-_\w./]+$', s):
return s
else:
return "'" + s.replace("'", "'\"'\"'") + "'"
def compat_ord(c):
if type(c) is int:
return c
else:
return ord(c)
if sys.version_info >= (3, 0):
compat_getenv = os.getenv
compat_expanduser = os.path.expanduser
else:
# Environment variables should be decoded with filesystem encoding.
# Otherwise it will fail if any non-ASCII characters present (see #3854 #3217 #2918)
def compat_getenv(key, default=None):
from .utils import get_filesystem_encoding
env = os.getenv(key, default)
if env:
env = env.decode(get_filesystem_encoding())
return env
# HACK: The default implementations of os.path.expanduser from cpython do not decode
# environment variables with filesystem encoding. We will work around this by
# providing adjusted implementations.
# The following are os.path.expanduser implementations from cpython 2.7.8 stdlib
# for different platforms with correct environment variables decoding.
if os.name == 'posix':
def compat_expanduser(path):
"""Expand ~ and ~user constructions. If user or $HOME is unknown,
do nothing."""
if not path.startswith('~'):
return path
i = path.find('/', 1)
if i < 0:
i = len(path)
if i == 1:
if 'HOME' not in os.environ:
import pwd
userhome = pwd.getpwuid(os.getuid()).pw_dir
else:
userhome = compat_getenv('HOME')
else:
import pwd
try:
pwent = pwd.getpwnam(path[1:i])
except KeyError:
return path
userhome = pwent.pw_dir
userhome = userhome.rstrip('/')
return (userhome + path[i:]) or '/'
elif os.name == 'nt' or os.name == 'ce':
def compat_expanduser(path):
"""Expand ~ and ~user constructs.
If user or $HOME is unknown, do nothing."""
if path[:1] != '~':
return path
i, n = 1, len(path)
while i < n and path[i] not in '/\\':
i = i + 1
if 'HOME' in os.environ:
userhome = compat_getenv('HOME')
elif 'USERPROFILE' in os.environ:
userhome = compat_getenv('USERPROFILE')
elif 'HOMEPATH' not in os.environ:
return path
else:
try:
drive = compat_getenv('HOMEDRIVE')
except KeyError:
drive = ''
userhome = os.path.join(drive, compat_getenv('HOMEPATH'))
if i != 1: # ~user
userhome = os.path.join(os.path.dirname(userhome), path[1:i])
return userhome + path[i:]
else:
compat_expanduser = os.path.expanduser
if sys.version_info < (3, 0):
def compat_print(s):
from .utils import preferredencoding
print(s.encode(preferredencoding(), 'xmlcharrefreplace'))
else:
def compat_print(s):
assert isinstance(s, compat_str)
print(s)
try:
subprocess_check_output = subprocess.check_output
except AttributeError:
def subprocess_check_output(*args, **kwargs):
assert 'input' not in kwargs
p = subprocess.Popen(*args, stdout=subprocess.PIPE, **kwargs)
output, _ = p.communicate()
ret = p.poll()
if ret:
raise subprocess.CalledProcessError(ret, p.args, output=output)
return output
if sys.version_info < (3, 0) and sys.platform == 'win32':
def compat_getpass(prompt, *args, **kwargs):
if isinstance(prompt, compat_str):
from .utils import preferredencoding
prompt = prompt.encode(preferredencoding())
return getpass.getpass(prompt, *args, **kwargs)
else:
compat_getpass = getpass.getpass
# Old 2.6 and 2.7 releases require kwargs to be bytes
try:
def _testfunc(x):
pass
_testfunc(**{'x': 0})
except TypeError:
def compat_kwargs(kwargs):
return dict((bytes(k), v) for k, v in kwargs.items())
else:
compat_kwargs = lambda kwargs: kwargs
if sys.version_info < (2, 7):
def compat_socket_create_connection(address, timeout, source_address=None):
host, port = address
err = None
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except socket.error as _:
err = _
if sock is not None:
sock.close()
if err is not None:
raise err
else:
raise socket.error("getaddrinfo returns an empty list")
else:
compat_socket_create_connection = socket.create_connection
# Fix https://github.com/rg3/youtube-dl/issues/4223
# See http://bugs.python.org/issue9161 for what is broken
def workaround_optparse_bug9161():
op = optparse.OptionParser()
og = optparse.OptionGroup(op, 'foo')
try:
og.add_option('-t')
except TypeError:
real_add_option = optparse.OptionGroup.add_option
def _compat_add_option(self, *args, **kwargs):
enc = lambda v: (
v.encode('ascii', 'replace') if isinstance(v, compat_str)
else v)
bargs = [enc(a) for a in args]
bkwargs = dict(
(k, enc(v)) for k, v in kwargs.items())
return real_add_option(self, *bargs, **bkwargs)
optparse.OptionGroup.add_option = _compat_add_option
if hasattr(shutil, 'get_terminal_size'): # Python >= 3.3
compat_get_terminal_size = shutil.get_terminal_size
else:
_terminal_size = collections.namedtuple('terminal_size', ['columns', 'lines'])
def compat_get_terminal_size():
columns = compat_getenv('COLUMNS', None)
if columns:
columns = int(columns)
else:
columns = None
lines = compat_getenv('LINES', None)
if lines:
lines = int(lines)
else:
lines = None
try:
sp = subprocess.Popen(
['stty', 'size'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = sp.communicate()
lines, columns = map(int, out.split())
except:
pass
return _terminal_size(columns, lines)
__all__ = [
'compat_HTTPError',
'compat_basestring',
'compat_chr',
'compat_cookiejar',
'compat_expanduser',
'compat_get_terminal_size',
'compat_getenv',
'compat_getpass',
'compat_html_entities',
'compat_html_parser',
'compat_http_client',
'compat_http_server',
'compat_kwargs',
'compat_ord',
'compat_parse_qs',
'compat_print',
'compat_socket_create_connection',
'compat_str',
'compat_subprocess_get_DEVNULL',
'compat_urllib_error',
'compat_urllib_parse',
'compat_urllib_parse_unquote',
'compat_urllib_parse_urlparse',
'compat_urllib_request',
'compat_urlparse',
'compat_urlretrieve',
'compat_xml_parse_error',
'shlex_quote',
'subprocess_check_output',
'workaround_optparse_bug9161',
]
|
gpl-2.0
|
alexeyum/scikit-learn
|
sklearn/cluster/tests/test_spectral.py
|
72
|
7950
|
"""Testing for Spectral Clustering methods"""
from sklearn.externals.six.moves import cPickle
dumps, loads = cPickle.dumps, cPickle.loads
import numpy as np
from scipy import sparse
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_warns_message
from sklearn.cluster import SpectralClustering, spectral_clustering
from sklearn.cluster.spectral import spectral_embedding
from sklearn.cluster.spectral import discretize
from sklearn.metrics import pairwise_distances
from sklearn.metrics import adjusted_rand_score
from sklearn.metrics.pairwise import kernel_metrics, rbf_kernel
from sklearn.datasets.samples_generator import make_blobs
def test_spectral_clustering():
S = np.array([[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[0.2, 0.2, 0.2, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])
for eigen_solver in ('arpack', 'lobpcg'):
for assign_labels in ('kmeans', 'discretize'):
for mat in (S, sparse.csr_matrix(S)):
model = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed',
eigen_solver=eigen_solver,
assign_labels=assign_labels
).fit(mat)
labels = model.labels_
if labels[0] == 0:
labels = 1 - labels
assert_array_equal(labels, [1, 1, 1, 0, 0, 0, 0])
model_copy = loads(dumps(model))
assert_equal(model_copy.n_clusters, model.n_clusters)
assert_equal(model_copy.eigen_solver, model.eigen_solver)
assert_array_equal(model_copy.labels_, model.labels_)
def test_spectral_amg_mode():
# Test the amg mode of SpectralClustering
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
try:
from pyamg import smoothed_aggregation_solver
amg_loaded = True
except ImportError:
amg_loaded = False
if amg_loaded:
labels = spectral_clustering(S, n_clusters=len(centers),
random_state=0, eigen_solver="amg")
# We don't care too much that it's good, just that it *worked*.
# There does have to be some lower limit on the performance though.
assert_greater(np.mean(labels == true_labels), .3)
else:
assert_raises(ValueError, spectral_embedding, S,
n_components=len(centers),
random_state=0, eigen_solver="amg")
def test_spectral_unknown_mode():
# Test that SpectralClustering fails with an unknown mode set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, eigen_solver="<unknown>")
def test_spectral_unknown_assign_labels():
# Test that SpectralClustering fails with an unknown assign_labels set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, assign_labels="<unknown>")
def test_spectral_clustering_sparse():
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01)
S = rbf_kernel(X, gamma=1)
S = np.maximum(S - 1e-4, 0)
S = sparse.coo_matrix(S)
labels = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed').fit(S).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
def test_affinities():
# Note: in the following, random_state has been selected to have
# a dataset that yields a stable eigen decomposition both when built
# on OSX and Linux
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01
)
# nearest neighbors affinity
sp = SpectralClustering(n_clusters=2, affinity='nearest_neighbors',
random_state=0)
assert_warns_message(UserWarning, 'not fully connected', sp.fit, X)
assert_equal(adjusted_rand_score(y, sp.labels_), 1)
sp = SpectralClustering(n_clusters=2, gamma=2, random_state=0)
labels = sp.fit(X).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
X = check_random_state(10).rand(10, 5) * 10
kernels_available = kernel_metrics()
for kern in kernels_available:
# Additive chi^2 gives a negative similarity matrix which
# doesn't make sense for spectral clustering
if kern != 'additive_chi2':
sp = SpectralClustering(n_clusters=2, affinity=kern,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
sp = SpectralClustering(n_clusters=2, affinity=lambda x, y: 1,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
sp = SpectralClustering(n_clusters=2, affinity=histogram, random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
# raise error on unknown affinity
sp = SpectralClustering(n_clusters=2, affinity='<unknown>')
assert_raises(ValueError, sp.fit, X)
def test_discretize(seed=8):
# Test the discretize using a noise assignment matrix
random_state = np.random.RandomState(seed)
for n_samples in [50, 100, 150, 500]:
for n_class in range(2, 10):
# random class labels
y_true = random_state.randint(0, n_class + 1, n_samples)
y_true = np.array(y_true, np.float)
# noise class assignment matrix
y_indicator = sparse.coo_matrix((np.ones(n_samples),
(np.arange(n_samples),
y_true)),
shape=(n_samples,
n_class + 1))
y_true_noisy = (y_indicator.toarray()
+ 0.1 * random_state.randn(n_samples,
n_class + 1))
y_pred = discretize(y_true_noisy, random_state)
assert_greater(adjusted_rand_score(y_true, y_pred), 0.8)
|
bsd-3-clause
|
bgris/ODL_bgris
|
lib/python3.5/idlelib/idle_test/test_formatparagraph.py
|
2
|
14331
|
# Test the functions and main class method of FormatParagraph.py
import unittest
from idlelib import FormatParagraph as fp
from idlelib.EditorWindow import EditorWindow
from tkinter import Tk, Text
from test.support import requires
class Is_Get_Test(unittest.TestCase):
"""Test the is_ and get_ functions"""
test_comment = '# This is a comment'
test_nocomment = 'This is not a comment'
trailingws_comment = '# This is a comment '
leadingws_comment = ' # This is a comment'
leadingws_nocomment = ' This is not a comment'
def test_is_all_white(self):
self.assertTrue(fp.is_all_white(''))
self.assertTrue(fp.is_all_white('\t\n\r\f\v'))
self.assertFalse(fp.is_all_white(self.test_comment))
def test_get_indent(self):
Equal = self.assertEqual
Equal(fp.get_indent(self.test_comment), '')
Equal(fp.get_indent(self.trailingws_comment), '')
Equal(fp.get_indent(self.leadingws_comment), ' ')
Equal(fp.get_indent(self.leadingws_nocomment), ' ')
def test_get_comment_header(self):
Equal = self.assertEqual
# Test comment strings
Equal(fp.get_comment_header(self.test_comment), '#')
Equal(fp.get_comment_header(self.trailingws_comment), '#')
Equal(fp.get_comment_header(self.leadingws_comment), ' #')
# Test non-comment strings
Equal(fp.get_comment_header(self.leadingws_nocomment), ' ')
Equal(fp.get_comment_header(self.test_nocomment), '')
class FindTest(unittest.TestCase):
"""Test the find_paragraph function in FormatParagraph.
Using the runcase() function, find_paragraph() is called with 'mark' set at
multiple indexes before and inside the test paragraph.
It appears that code with the same indentation as a quoted string is grouped
as part of the same paragraph, which is probably incorrect behavior.
"""
@classmethod
def setUpClass(cls):
from idlelib.idle_test.mock_tk import Text
cls.text = Text()
def runcase(self, inserttext, stopline, expected):
# Check that find_paragraph returns the expected paragraph when
# the mark index is set to beginning, middle, end of each line
# up to but not including the stop line
text = self.text
text.insert('1.0', inserttext)
for line in range(1, stopline):
linelength = int(text.index("%d.end" % line).split('.')[1])
for col in (0, linelength//2, linelength):
tempindex = "%d.%d" % (line, col)
self.assertEqual(fp.find_paragraph(text, tempindex), expected)
text.delete('1.0', 'end')
def test_find_comment(self):
comment = (
"# Comment block with no blank lines before\n"
"# Comment line\n"
"\n")
self.runcase(comment, 3, ('1.0', '3.0', '#', comment[0:58]))
comment = (
"\n"
"# Comment block with whitespace line before and after\n"
"# Comment line\n"
"\n")
self.runcase(comment, 4, ('2.0', '4.0', '#', comment[1:70]))
comment = (
"\n"
" # Indented comment block with whitespace before and after\n"
" # Comment line\n"
"\n")
self.runcase(comment, 4, ('2.0', '4.0', ' #', comment[1:82]))
comment = (
"\n"
"# Single line comment\n"
"\n")
self.runcase(comment, 3, ('2.0', '3.0', '#', comment[1:23]))
comment = (
"\n"
" # Single line comment with leading whitespace\n"
"\n")
self.runcase(comment, 3, ('2.0', '3.0', ' #', comment[1:51]))
comment = (
"\n"
"# Comment immediately followed by code\n"
"x = 42\n"
"\n")
self.runcase(comment, 3, ('2.0', '3.0', '#', comment[1:40]))
comment = (
"\n"
" # Indented comment immediately followed by code\n"
"x = 42\n"
"\n")
self.runcase(comment, 3, ('2.0', '3.0', ' #', comment[1:53]))
comment = (
"\n"
"# Comment immediately followed by indented code\n"
" x = 42\n"
"\n")
self.runcase(comment, 3, ('2.0', '3.0', '#', comment[1:49]))
def test_find_paragraph(self):
teststring = (
'"""String with no blank lines before\n'
'String line\n'
'"""\n'
'\n')
self.runcase(teststring, 4, ('1.0', '4.0', '', teststring[0:53]))
teststring = (
"\n"
'"""String with whitespace line before and after\n'
'String line.\n'
'"""\n'
'\n')
self.runcase(teststring, 5, ('2.0', '5.0', '', teststring[1:66]))
teststring = (
'\n'
' """Indented string with whitespace before and after\n'
' Comment string.\n'
' """\n'
'\n')
self.runcase(teststring, 5, ('2.0', '5.0', ' ', teststring[1:85]))
teststring = (
'\n'
'"""Single line string."""\n'
'\n')
self.runcase(teststring, 3, ('2.0', '3.0', '', teststring[1:27]))
teststring = (
'\n'
' """Single line string with leading whitespace."""\n'
'\n')
self.runcase(teststring, 3, ('2.0', '3.0', ' ', teststring[1:55]))
class ReformatFunctionTest(unittest.TestCase):
"""Test the reformat_paragraph function without the editor window."""
def test_reformat_paragraph(self):
Equal = self.assertEqual
reform = fp.reformat_paragraph
hw = "O hello world"
Equal(reform(' ', 1), ' ')
Equal(reform("Hello world", 20), "Hello world")
# Test without leading newline
Equal(reform(hw, 1), "O\nhello\nworld")
Equal(reform(hw, 6), "O\nhello\nworld")
Equal(reform(hw, 7), "O hello\nworld")
Equal(reform(hw, 12), "O hello\nworld")
Equal(reform(hw, 13), "O hello world")
# Test with leading newline
hw = "\nO hello world"
Equal(reform(hw, 1), "\nO\nhello\nworld")
Equal(reform(hw, 6), "\nO\nhello\nworld")
Equal(reform(hw, 7), "\nO hello\nworld")
Equal(reform(hw, 12), "\nO hello\nworld")
Equal(reform(hw, 13), "\nO hello world")
class ReformatCommentTest(unittest.TestCase):
"""Test the reformat_comment function without the editor window."""
def test_reformat_comment(self):
Equal = self.assertEqual
# reformat_comment formats to a minimum of 20 characters
test_string = (
" \"\"\"this is a test of a reformat for a triple quoted string"
" will it reformat to less than 70 characters for me?\"\"\"")
result = fp.reformat_comment(test_string, 70, " ")
expected = (
" \"\"\"this is a test of a reformat for a triple quoted string will it\n"
" reformat to less than 70 characters for me?\"\"\"")
Equal(result, expected)
test_comment = (
"# this is a test of a reformat for a triple quoted string will "
"it reformat to less than 70 characters for me?")
result = fp.reformat_comment(test_comment, 70, "#")
expected = (
"# this is a test of a reformat for a triple quoted string will it\n"
"# reformat to less than 70 characters for me?")
Equal(result, expected)
class FormatClassTest(unittest.TestCase):
def test_init_close(self):
instance = fp.FormatParagraph('editor')
self.assertEqual(instance.editwin, 'editor')
instance.close()
self.assertEqual(instance.editwin, None)
# For testing format_paragraph_event, Initialize FormatParagraph with
# a mock Editor with .text and .get_selection_indices. The text must
# be a Text wrapper that adds two methods
# A real EditorWindow creates unneeded, time-consuming baggage and
# sometimes emits shutdown warnings like this:
# "warning: callback failed in WindowList <class '_tkinter.TclError'>
# : invalid command name ".55131368.windows".
# Calling EditorWindow._close in tearDownClass prevents this but causes
# other problems (windows left open).
class TextWrapper:
def __init__(self, master):
self.text = Text(master=master)
def __getattr__(self, name):
return getattr(self.text, name)
def undo_block_start(self): pass
def undo_block_stop(self): pass
class Editor:
def __init__(self, root):
self.text = TextWrapper(root)
get_selection_indices = EditorWindow. get_selection_indices
class FormatEventTest(unittest.TestCase):
"""Test the formatting of text inside a Text widget.
This is done with FormatParagraph.format.paragraph_event,
which calls functions in the module as appropriate.
"""
test_string = (
" '''this is a test of a reformat for a triple "
"quoted string will it reformat to less than 70 "
"characters for me?'''\n")
multiline_test_string = (
" '''The first line is under the max width.\n"
" The second line's length is way over the max width. It goes "
"on and on until it is over 100 characters long.\n"
" Same thing with the third line. It is also way over the max "
"width, but FormatParagraph will fix it.\n"
" '''\n")
multiline_test_comment = (
"# The first line is under the max width.\n"
"# The second line's length is way over the max width. It goes on "
"and on until it is over 100 characters long.\n"
"# Same thing with the third line. It is also way over the max "
"width, but FormatParagraph will fix it.\n"
"# The fourth line is short like the first line.")
@classmethod
def setUpClass(cls):
requires('gui')
cls.root = Tk()
editor = Editor(root=cls.root)
cls.text = editor.text.text # Test code does not need the wrapper.
cls.formatter = fp.FormatParagraph(editor).format_paragraph_event
# Sets the insert mark just after the re-wrapped and inserted text.
@classmethod
def tearDownClass(cls):
del cls.text, cls.formatter
cls.root.destroy()
del cls.root
def test_short_line(self):
self.text.insert('1.0', "Short line\n")
self.formatter("Dummy")
self.assertEqual(self.text.get('1.0', 'insert'), "Short line\n" )
self.text.delete('1.0', 'end')
def test_long_line(self):
text = self.text
# Set cursor ('insert' mark) to '1.0', within text.
text.insert('1.0', self.test_string)
text.mark_set('insert', '1.0')
self.formatter('ParameterDoesNothing', limit=70)
result = text.get('1.0', 'insert')
# find function includes \n
expected = (
" '''this is a test of a reformat for a triple quoted string will it\n"
" reformat to less than 70 characters for me?'''\n") # yes
self.assertEqual(result, expected)
text.delete('1.0', 'end')
# Select from 1.11 to line end.
text.insert('1.0', self.test_string)
text.tag_add('sel', '1.11', '1.end')
self.formatter('ParameterDoesNothing', limit=70)
result = text.get('1.0', 'insert')
# selection excludes \n
expected = (
" '''this is a test of a reformat for a triple quoted string will it reformat\n"
" to less than 70 characters for me?'''") # no
self.assertEqual(result, expected)
text.delete('1.0', 'end')
def test_multiple_lines(self):
text = self.text
# Select 2 long lines.
text.insert('1.0', self.multiline_test_string)
text.tag_add('sel', '2.0', '4.0')
self.formatter('ParameterDoesNothing', limit=70)
result = text.get('2.0', 'insert')
expected = (
" The second line's length is way over the max width. It goes on and\n"
" on until it is over 100 characters long. Same thing with the third\n"
" line. It is also way over the max width, but FormatParagraph will\n"
" fix it.\n")
self.assertEqual(result, expected)
text.delete('1.0', 'end')
def test_comment_block(self):
text = self.text
# Set cursor ('insert') to '1.0', within block.
text.insert('1.0', self.multiline_test_comment)
self.formatter('ParameterDoesNothing', limit=70)
result = text.get('1.0', 'insert')
expected = (
"# The first line is under the max width. The second line's length is\n"
"# way over the max width. It goes on and on until it is over 100\n"
"# characters long. Same thing with the third line. It is also way over\n"
"# the max width, but FormatParagraph will fix it. The fourth line is\n"
"# short like the first line.\n")
self.assertEqual(result, expected)
text.delete('1.0', 'end')
# Select line 2, verify line 1 unaffected.
text.insert('1.0', self.multiline_test_comment)
text.tag_add('sel', '2.0', '3.0')
self.formatter('ParameterDoesNothing', limit=70)
result = text.get('1.0', 'insert')
expected = (
"# The first line is under the max width.\n"
"# The second line's length is way over the max width. It goes on and\n"
"# on until it is over 100 characters long.\n")
self.assertEqual(result, expected)
text.delete('1.0', 'end')
# The following block worked with EditorWindow but fails with the mock.
# Lines 2 and 3 get pasted together even though the previous block left
# the previous line alone. More investigation is needed.
## # Select lines 3 and 4
## text.insert('1.0', self.multiline_test_comment)
## text.tag_add('sel', '3.0', '5.0')
## self.formatter('ParameterDoesNothing')
## result = text.get('3.0', 'insert')
## expected = (
##"# Same thing with the third line. It is also way over the max width,\n"
##"# but FormatParagraph will fix it. The fourth line is short like the\n"
##"# first line.\n")
## self.assertEqual(result, expected)
## text.delete('1.0', 'end')
if __name__ == '__main__':
unittest.main(verbosity=2, exit=2)
|
gpl-3.0
|
IONISx/edx-platform
|
common/djangoapps/config_models/tests.py
|
11
|
13830
|
# -*- coding: utf-8 -*-
"""
Tests of ConfigurationModel
"""
import ddt
from django.contrib.auth.models import User
from django.db import models
from django.test import TestCase
from rest_framework.test import APIRequestFactory
from freezegun import freeze_time
from mock import patch, Mock
from config_models.models import ConfigurationModel
from config_models.views import ConfigurationModelCurrentAPIView
class ExampleConfig(ConfigurationModel):
"""
Test model for testing ``ConfigurationModels``.
"""
cache_timeout = 300
string_field = models.TextField()
int_field = models.IntegerField(default=10)
@patch('config_models.models.cache')
class ConfigurationModelTests(TestCase):
"""
Tests of ConfigurationModel
"""
def setUp(self):
super(ConfigurationModelTests, self).setUp()
self.user = User()
self.user.save()
def test_cache_deleted_on_save(self, mock_cache):
ExampleConfig(changed_by=self.user).save()
mock_cache.delete.assert_called_with(ExampleConfig.cache_key_name())
def test_cache_key_name(self, _mock_cache):
self.assertEquals(ExampleConfig.cache_key_name(), 'configuration/ExampleConfig/current')
def test_no_config_empty_cache(self, mock_cache):
mock_cache.get.return_value = None
current = ExampleConfig.current()
self.assertEquals(current.int_field, 10)
self.assertEquals(current.string_field, '')
mock_cache.set.assert_called_with(ExampleConfig.cache_key_name(), current, 300)
def test_no_config_full_cache(self, mock_cache):
current = ExampleConfig.current()
self.assertEquals(current, mock_cache.get.return_value)
def test_config_ordering(self, mock_cache):
mock_cache.get.return_value = None
with freeze_time('2012-01-01'):
first = ExampleConfig(changed_by=self.user)
first.string_field = 'first'
first.save()
second = ExampleConfig(changed_by=self.user)
second.string_field = 'second'
second.save()
self.assertEquals(ExampleConfig.current().string_field, 'second')
def test_cache_set(self, mock_cache):
mock_cache.get.return_value = None
first = ExampleConfig(changed_by=self.user)
first.string_field = 'first'
first.save()
ExampleConfig.current()
mock_cache.set.assert_called_with(ExampleConfig.cache_key_name(), first, 300)
def test_active_annotation(self, mock_cache):
mock_cache.get.return_value = None
with freeze_time('2012-01-01'):
ExampleConfig.objects.create(string_field='first')
ExampleConfig.objects.create(string_field='second')
rows = ExampleConfig.objects.with_active_flag().order_by('-change_date')
self.assertEqual(len(rows), 2)
self.assertEqual(rows[0].string_field, 'second')
self.assertEqual(rows[0].is_active, True)
self.assertEqual(rows[1].string_field, 'first')
self.assertEqual(rows[1].is_active, False)
def test_always_insert(self, __):
config = ExampleConfig(changed_by=self.user, string_field='first')
config.save()
config.string_field = 'second'
config.save()
self.assertEquals(2, ExampleConfig.objects.all().count())
class ExampleKeyedConfig(ConfigurationModel):
"""
Test model for testing ``ConfigurationModels`` with keyed configuration.
Does not inherit from ExampleConfig due to how Django handles model inheritance.
"""
cache_timeout = 300
KEY_FIELDS = ('left', 'right')
left = models.CharField(max_length=30)
right = models.CharField(max_length=30)
string_field = models.TextField()
int_field = models.IntegerField(default=10)
@ddt.ddt
@patch('config_models.models.cache')
class KeyedConfigurationModelTests(TestCase):
"""
Tests for ``ConfigurationModels`` with keyed configuration.
"""
def setUp(self):
super(KeyedConfigurationModelTests, self).setUp()
self.user = User()
self.user.save()
@ddt.data(('a', 'b'), ('c', 'd'))
@ddt.unpack
def test_cache_key_name(self, left, right, _mock_cache):
self.assertEquals(
ExampleKeyedConfig.cache_key_name(left, right),
'configuration/ExampleKeyedConfig/current/{},{}'.format(left, right)
)
@ddt.data(
((), 'left,right'),
(('left', 'right'), 'left,right'),
(('left', ), 'left')
)
@ddt.unpack
def test_key_values_cache_key_name(self, args, expected_key, _mock_cache):
self.assertEquals(
ExampleKeyedConfig.key_values_cache_key_name(*args),
'configuration/ExampleKeyedConfig/key_values/{}'.format(expected_key))
@ddt.data(('a', 'b'), ('c', 'd'))
@ddt.unpack
def test_no_config_empty_cache(self, left, right, mock_cache):
mock_cache.get.return_value = None
current = ExampleKeyedConfig.current(left, right)
self.assertEquals(current.int_field, 10)
self.assertEquals(current.string_field, '')
mock_cache.set.assert_called_with(ExampleKeyedConfig.cache_key_name(left, right), current, 300)
@ddt.data(('a', 'b'), ('c', 'd'))
@ddt.unpack
def test_no_config_full_cache(self, left, right, mock_cache):
current = ExampleKeyedConfig.current(left, right)
self.assertEquals(current, mock_cache.get.return_value)
def test_config_ordering(self, mock_cache):
mock_cache.get.return_value = None
with freeze_time('2012-01-01'):
ExampleKeyedConfig(
changed_by=self.user,
left='left_a',
right='right_a',
string_field='first_a',
).save()
ExampleKeyedConfig(
changed_by=self.user,
left='left_b',
right='right_b',
string_field='first_b',
).save()
ExampleKeyedConfig(
changed_by=self.user,
left='left_a',
right='right_a',
string_field='second_a',
).save()
ExampleKeyedConfig(
changed_by=self.user,
left='left_b',
right='right_b',
string_field='second_b',
).save()
self.assertEquals(ExampleKeyedConfig.current('left_a', 'right_a').string_field, 'second_a')
self.assertEquals(ExampleKeyedConfig.current('left_b', 'right_b').string_field, 'second_b')
def test_cache_set(self, mock_cache):
mock_cache.get.return_value = None
first = ExampleKeyedConfig(
changed_by=self.user,
left='left',
right='right',
string_field='first',
)
first.save()
ExampleKeyedConfig.current('left', 'right')
mock_cache.set.assert_called_with(ExampleKeyedConfig.cache_key_name('left', 'right'), first, 300)
def test_key_values(self, mock_cache):
mock_cache.get.return_value = None
with freeze_time('2012-01-01'):
ExampleKeyedConfig(left='left_a', right='right_a', changed_by=self.user).save()
ExampleKeyedConfig(left='left_b', right='right_b', changed_by=self.user).save()
ExampleKeyedConfig(left='left_a', right='right_a', changed_by=self.user).save()
ExampleKeyedConfig(left='left_b', right='right_b', changed_by=self.user).save()
unique_key_pairs = ExampleKeyedConfig.key_values()
self.assertEquals(len(unique_key_pairs), 2)
self.assertEquals(set(unique_key_pairs), set([('left_a', 'right_a'), ('left_b', 'right_b')]))
unique_left_keys = ExampleKeyedConfig.key_values('left', flat=True)
self.assertEquals(len(unique_left_keys), 2)
self.assertEquals(set(unique_left_keys), set(['left_a', 'left_b']))
def test_key_string_values(self, mock_cache):
""" Ensure str() vs unicode() doesn't cause duplicate cache entries """
ExampleKeyedConfig(left='left', right=u'〉☃', enabled=True, int_field=10, changed_by=self.user).save()
mock_cache.get.return_value = None
entry = ExampleKeyedConfig.current('left', u'〉☃')
key = mock_cache.get.call_args[0][0]
self.assertEqual(entry.int_field, 10)
mock_cache.get.assert_called_with(key)
self.assertEqual(mock_cache.set.call_args[0][0], key)
mock_cache.get.reset_mock()
entry = ExampleKeyedConfig.current(u'left', u'〉☃')
self.assertEqual(entry.int_field, 10)
mock_cache.get.assert_called_with(key)
def test_current_set(self, mock_cache):
mock_cache.get.return_value = None
with freeze_time('2012-01-01'):
ExampleKeyedConfig(left='left_a', right='right_a', int_field=0, changed_by=self.user).save()
ExampleKeyedConfig(left='left_b', right='right_b', int_field=0, changed_by=self.user).save()
ExampleKeyedConfig(left='left_a', right='right_a', int_field=1, changed_by=self.user).save()
ExampleKeyedConfig(left='left_b', right='right_b', int_field=2, changed_by=self.user).save()
queryset = ExampleKeyedConfig.objects.current_set()
self.assertEqual(len(queryset.all()), 2)
self.assertEqual(
set(queryset.order_by('int_field').values_list('int_field', flat=True)),
set([1, 2])
)
def test_active_annotation(self, mock_cache):
mock_cache.get.return_value = None
with freeze_time('2012-01-01'):
ExampleKeyedConfig.objects.create(left='left_a', right='right_a', string_field='first')
ExampleKeyedConfig.objects.create(left='left_b', right='right_b', string_field='first')
ExampleKeyedConfig.objects.create(left='left_a', right='right_a', string_field='second')
rows = ExampleKeyedConfig.objects.with_active_flag()
self.assertEqual(len(rows), 3)
for row in rows:
if row.left == 'left_a':
self.assertEqual(row.is_active, row.string_field == 'second')
else:
self.assertEqual(row.left, 'left_b')
self.assertEqual(row.string_field, 'first')
self.assertEqual(row.is_active, True)
def test_key_values_cache(self, mock_cache):
mock_cache.get.return_value = None
self.assertEquals(ExampleKeyedConfig.key_values(), [])
mock_cache.set.assert_called_with(ExampleKeyedConfig.key_values_cache_key_name(), [], 300)
fake_result = [('a', 'b'), ('c', 'd')]
mock_cache.get.return_value = fake_result
self.assertEquals(ExampleKeyedConfig.key_values(), fake_result)
@ddt.ddt
class ConfigurationModelAPITests(TestCase):
"""
Tests for the configuration model API.
"""
def setUp(self):
super(ConfigurationModelAPITests, self).setUp()
self.factory = APIRequestFactory()
self.user = User.objects.create_user(
username='test_user',
email='test_user@example.com',
password='test_pass',
)
self.user.is_superuser = True
self.user.save()
self.current_view = ConfigurationModelCurrentAPIView.as_view(model=ExampleConfig)
# Disable caching while testing the API
patcher = patch('config_models.models.cache', Mock(get=Mock(return_value=None)))
patcher.start()
self.addCleanup(patcher.stop)
def test_insert(self):
self.assertEquals("", ExampleConfig.current().string_field)
request = self.factory.post('/config/ExampleConfig', {"string_field": "string_value"})
request.user = self.user
__ = self.current_view(request)
self.assertEquals("string_value", ExampleConfig.current().string_field)
self.assertEquals(self.user, ExampleConfig.current().changed_by)
def test_multiple_inserts(self):
for i in xrange(3):
self.assertEquals(i, ExampleConfig.objects.all().count())
request = self.factory.post('/config/ExampleConfig', {"string_field": str(i)})
request.user = self.user
response = self.current_view(request)
self.assertEquals(201, response.status_code)
self.assertEquals(i + 1, ExampleConfig.objects.all().count())
self.assertEquals(str(i), ExampleConfig.current().string_field)
def test_get_current(self):
request = self.factory.get('/config/ExampleConfig')
request.user = self.user
response = self.current_view(request)
# pylint: disable=no-member
self.assertEquals('', response.data['string_field'])
self.assertEquals(10, response.data['int_field'])
self.assertEquals(None, response.data['changed_by'])
self.assertEquals(False, response.data['enabled'])
self.assertEquals(None, response.data['change_date'])
ExampleConfig(string_field='string_value', int_field=20).save()
response = self.current_view(request)
self.assertEquals('string_value', response.data['string_field'])
self.assertEquals(20, response.data['int_field'])
@ddt.data(
('get', [], 200),
('post', [{'string_field': 'string_value', 'int_field': 10}], 201),
)
@ddt.unpack
def test_permissions(self, method, args, status_code):
request = getattr(self.factory, method)('/config/ExampleConfig', *args)
request.user = User.objects.create_user(
username='no-perms',
email='no-perms@example.com',
password='no-perms',
)
response = self.current_view(request)
self.assertEquals(403, response.status_code)
request.user = self.user
response = self.current_view(request)
self.assertEquals(status_code, response.status_code)
|
agpl-3.0
|
axsauze/eventsfinder
|
django/contrib/messages/api.py
|
321
|
2952
|
from django.contrib.messages import constants
from django.contrib.messages.storage import default_storage
__all__ = (
'add_message', 'get_messages',
'get_level', 'set_level',
'debug', 'info', 'success', 'warning', 'error',
)
class MessageFailure(Exception):
pass
def add_message(request, level, message, extra_tags='', fail_silently=False):
"""
Attempts to add a message to the request using the 'messages' app.
"""
if hasattr(request, '_messages'):
return request._messages.add(level, message, extra_tags)
if not fail_silently:
raise MessageFailure('You cannot add messages without installing '
'django.contrib.messages.middleware.MessageMiddleware')
def get_messages(request):
"""
Returns the message storage on the request if it exists, otherwise returns
an empty list.
"""
if hasattr(request, '_messages'):
return request._messages
else:
return []
def get_level(request):
"""
Returns the minimum level of messages to be recorded.
The default level is the ``MESSAGE_LEVEL`` setting. If this is not found,
the ``INFO`` level is used.
"""
if hasattr(request, '_messages'):
storage = request._messages
else:
storage = default_storage(request)
return storage.level
def set_level(request, level):
"""
Sets the minimum level of messages to be recorded, returning ``True`` if
the level was recorded successfully.
If set to ``None``, the default level will be used (see the ``get_level``
method).
"""
if not hasattr(request, '_messages'):
return False
request._messages.level = level
return True
def debug(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``DEBUG`` level.
"""
add_message(request, constants.DEBUG, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def info(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``INFO`` level.
"""
add_message(request, constants.INFO, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def success(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``SUCCESS`` level.
"""
add_message(request, constants.SUCCESS, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def warning(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``WARNING`` level.
"""
add_message(request, constants.WARNING, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def error(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``ERROR`` level.
"""
add_message(request, constants.ERROR, message, extra_tags=extra_tags,
fail_silently=fail_silently)
|
bsd-3-clause
|
zhangxq5012/sky_engine
|
mojo/tools/check_mojom_golden_files.py
|
10
|
3768
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import os.path
import sys
from filecmp import dircmp
from shutil import rmtree
from tempfile import mkdtemp
from mopy.paths import Paths
paths = Paths()
sys.path.insert(0, os.path.join(paths.mojo_dir, "public", "tools", "bindings",
"pylib"))
from mojom_tests.support.find_files import FindFiles
from mojom_tests.support.run_bindings_generator import RunBindingsGenerator
def _ProcessDircmpResults(results, verbose=False):
"""Prints results of directory comparison and returns true if they are
identical (note: the "left" directory should be the golden directory)."""
rv = not (bool(results.left_only) or bool(results.right_only) or \
bool(results.common_funny) or bool(results.funny_files) or \
bool(results.diff_files))
if verbose:
for f in results.left_only:
print "%s exists in golden directory but not in current output" % f
for f in results.right_only:
print "%s exists in current output but not in golden directory" % f
for f in results.common_funny + results.funny_files:
print "Unable to compare %s between golden directory and current output" \
% f
for f in results.diff_files:
print "%s differs between golden directory and current output" % f
for r in results.subdirs.values():
# If we're being verbose, check subdirectories even if we know that there
# are differences. Note that it's "... and rv" to avoid the short-circuit.
if rv or verbose:
rv = _ProcessDircmpResults(r, verbose=verbose) and rv
return rv
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--generate_golden_files", action="store_true",
help=("generate golden files (does not obliterate "
"directory"))
parser.add_argument("--keep_temp_dir", action="store_true",
help="don't delete the temporary directory")
parser.add_argument("--verbose", action="store_true",
help="spew excess verbiage")
parser.add_argument("golden_dir", metavar="GOLDEN_DIR",
help="directory with the golden files")
args = parser.parse_args()
if args.generate_golden_files:
if os.path.exists(args.golden_dir):
print "WARNING: golden directory %s already exists" % args.golden_dir
out_dir = args.golden_dir
else:
if not os.path.exists(args.golden_dir):
print "ERROR: golden directory %s does not exist" % args.golden_dir
return 1
out_dir = mkdtemp()
if args.verbose:
print "Generating files to %s ..." % out_dir
mojom_files = FindFiles(paths.mojo_dir, "*.mojom")
for mojom_file in mojom_files:
if args.verbose:
print " Processing %s ..." % os.path.relpath(mojom_file, paths.mojo_dir)
# TODO(vtl): This may wrong, since the path can be overridden in the .gyp
# file.
RunBindingsGenerator(out_dir, paths.mojo_dir, mojom_file,
["-I", paths.src_root])
if args.generate_golden_files:
return 0
identical = _ProcessDircmpResults(dircmp(args.golden_dir, out_dir, ignore=[]),
verbose=args.verbose)
if args.keep_temp_dir:
if args.verbose:
print "Not removing %s ..." % out_dir
else:
if args.verbose:
print "Removing %s ..." % out_dir
rmtree(out_dir)
if not identical:
print "FAILURE: current output differs from golden files"
return 1
print "SUCCESS: current output identical to golden files"
return 0
if __name__ == '__main__':
sys.exit(main())
|
bsd-3-clause
|
cchurch/ansible
|
test/units/cli/test_galaxy.py
|
2
|
40697
|
# -*- coding: utf-8 -*-
# (c) 2016, Adrian Likins <alikins@redhat.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ansible
import json
import os
import pytest
import shutil
import tarfile
import tempfile
import yaml
import ansible.constants as C
from ansible import context
from ansible.cli.galaxy import GalaxyCLI
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_text
from ansible.utils import context_objects as co
from units.compat import unittest
from units.compat.mock import patch, MagicMock
@pytest.fixture(autouse='function')
def reset_cli_args():
co.GlobalCLIArgs._Singleton__instance = None
yield
co.GlobalCLIArgs._Singleton__instance = None
class TestGalaxy(unittest.TestCase):
@classmethod
def setUpClass(cls):
'''creating prerequisites for installing a role; setUpClass occurs ONCE whereas setUp occurs with every method tested.'''
# class data for easy viewing: role_dir, role_tar, role_name, role_req, role_path
cls.temp_dir = tempfile.mkdtemp(prefix='ansible-test_galaxy-')
os.chdir(cls.temp_dir)
if os.path.exists("./delete_me"):
shutil.rmtree("./delete_me")
# creating framework for a role
gc = GalaxyCLI(args=["ansible-galaxy", "init", "--offline", "delete_me"])
gc.run()
cls.role_dir = "./delete_me"
cls.role_name = "delete_me"
# making a temp dir for role installation
cls.role_path = os.path.join(tempfile.mkdtemp(), "roles")
if not os.path.isdir(cls.role_path):
os.makedirs(cls.role_path)
# creating a tar file name for class data
cls.role_tar = './delete_me.tar.gz'
cls.makeTar(cls.role_tar, cls.role_dir)
# creating a temp file with installation requirements
cls.role_req = './delete_me_requirements.yml'
fd = open(cls.role_req, "w")
fd.write("- 'src': '%s'\n 'name': '%s'\n 'path': '%s'" % (cls.role_tar, cls.role_name, cls.role_path))
fd.close()
@classmethod
def makeTar(cls, output_file, source_dir):
''' used for making a tarfile from a role directory '''
# adding directory into a tar file
try:
tar = tarfile.open(output_file, "w:gz")
tar.add(source_dir, arcname=os.path.basename(source_dir))
except AttributeError: # tarfile obj. has no attribute __exit__ prior to python 2. 7
pass
finally: # ensuring closure of tarfile obj
tar.close()
@classmethod
def tearDownClass(cls):
'''After tests are finished removes things created in setUpClass'''
# deleting the temp role directory
if os.path.exists(cls.role_dir):
shutil.rmtree(cls.role_dir)
if os.path.exists(cls.role_req):
os.remove(cls.role_req)
if os.path.exists(cls.role_tar):
os.remove(cls.role_tar)
if os.path.isdir(cls.role_path):
shutil.rmtree(cls.role_path)
os.chdir('/')
shutil.rmtree(cls.temp_dir)
def setUp(self):
# Reset the stored command line args
co.GlobalCLIArgs._Singleton__instance = None
self.default_args = ['ansible-galaxy']
def tearDown(self):
# Reset the stored command line args
co.GlobalCLIArgs._Singleton__instance = None
def test_init(self):
galaxy_cli = GalaxyCLI(args=self.default_args)
self.assertTrue(isinstance(galaxy_cli, GalaxyCLI))
def test_display_min(self):
gc = GalaxyCLI(args=self.default_args)
role_info = {'name': 'some_role_name'}
display_result = gc._display_role_info(role_info)
self.assertTrue(display_result.find('some_role_name') > -1)
def test_display_galaxy_info(self):
gc = GalaxyCLI(args=self.default_args)
galaxy_info = {}
role_info = {'name': 'some_role_name',
'galaxy_info': galaxy_info}
display_result = gc._display_role_info(role_info)
if display_result.find('\n\tgalaxy_info:') == -1:
self.fail('Expected galaxy_info to be indented once')
def test_run(self):
''' verifies that the GalaxyCLI object's api is created and that execute() is called. '''
gc = GalaxyCLI(args=["ansible-galaxy", "install", "--ignore-errors", "imaginary_role"])
gc.parse()
with patch.object(ansible.cli.CLI, "run", return_value=None) as mock_run:
gc.run()
# testing
self.assertIsInstance(gc.galaxy, ansible.galaxy.Galaxy)
self.assertEqual(mock_run.call_count, 1)
self.assertTrue(isinstance(gc.api, ansible.galaxy.api.GalaxyAPI))
def test_execute_remove(self):
# installing role
gc = GalaxyCLI(args=["ansible-galaxy", "install", "-p", self.role_path, "-r", self.role_req, '--force'])
gc.run()
# location where the role was installed
role_file = os.path.join(self.role_path, self.role_name)
# removing role
# Have to reset the arguments in the context object manually since we're doing the
# equivalent of running the command line program twice
co.GlobalCLIArgs._Singleton__instance = None
gc = GalaxyCLI(args=["ansible-galaxy", "remove", role_file, self.role_name])
gc.run()
# testing role was removed
removed_role = not os.path.exists(role_file)
self.assertTrue(removed_role)
def test_exit_without_ignore_without_flag(self):
''' tests that GalaxyCLI exits with the error specified if the --ignore-errors flag is not used '''
gc = GalaxyCLI(args=["ansible-galaxy", "install", "--server=None", "fake_role_name"])
with patch.object(ansible.utils.display.Display, "display", return_value=None) as mocked_display:
# testing that error expected is raised
self.assertRaises(AnsibleError, gc.run)
self.assertTrue(mocked_display.called_once_with("- downloading role 'fake_role_name', owned by "))
def test_exit_without_ignore_with_flag(self):
''' tests that GalaxyCLI exits without the error specified if the --ignore-errors flag is used '''
# testing with --ignore-errors flag
gc = GalaxyCLI(args=["ansible-galaxy", "install", "--server=None", "fake_role_name", "--ignore-errors"])
with patch.object(ansible.utils.display.Display, "display", return_value=None) as mocked_display:
gc.run()
self.assertTrue(mocked_display.called_once_with("- downloading role 'fake_role_name', owned by "))
def test_parse_no_action(self):
''' testing the options parser when no action is given '''
gc = GalaxyCLI(args=["ansible-galaxy", ""])
self.assertRaises(SystemExit, gc.parse)
def test_parse_invalid_action(self):
''' testing the options parser when an invalid action is given '''
gc = GalaxyCLI(args=["ansible-galaxy", "NOT_ACTION"])
self.assertRaises(SystemExit, gc.parse)
def test_parse_delete(self):
''' testing the options parser when the action 'delete' is given '''
gc = GalaxyCLI(args=["ansible-galaxy", "delete", "foo", "bar"])
gc.parse()
self.assertEqual(context.CLIARGS['verbosity'], 0)
def test_parse_import(self):
''' testing the options parser when the action 'import' is given '''
gc = GalaxyCLI(args=["ansible-galaxy", "import", "foo", "bar"])
gc.parse()
self.assertEqual(context.CLIARGS['wait'], True)
self.assertEqual(context.CLIARGS['reference'], None)
self.assertEqual(context.CLIARGS['check_status'], False)
self.assertEqual(context.CLIARGS['verbosity'], 0)
def test_parse_info(self):
''' testing the options parser when the action 'info' is given '''
gc = GalaxyCLI(args=["ansible-galaxy", "info", "foo", "bar"])
gc.parse()
self.assertEqual(context.CLIARGS['offline'], False)
def test_parse_init(self):
''' testing the options parser when the action 'init' is given '''
gc = GalaxyCLI(args=["ansible-galaxy", "init", "foo"])
gc.parse()
self.assertEqual(context.CLIARGS['offline'], False)
self.assertEqual(context.CLIARGS['force'], False)
def test_parse_install(self):
''' testing the options parser when the action 'install' is given '''
gc = GalaxyCLI(args=["ansible-galaxy", "install"])
gc.parse()
self.assertEqual(context.CLIARGS['ignore_errors'], False)
self.assertEqual(context.CLIARGS['no_deps'], False)
self.assertEqual(context.CLIARGS['role_file'], None)
self.assertEqual(context.CLIARGS['force'], False)
def test_parse_list(self):
''' testing the options parser when the action 'list' is given '''
gc = GalaxyCLI(args=["ansible-galaxy", "list"])
gc.parse()
self.assertEqual(context.CLIARGS['verbosity'], 0)
def test_parse_login(self):
''' testing the options parser when the action 'login' is given '''
gc = GalaxyCLI(args=["ansible-galaxy", "login"])
gc.parse()
self.assertEqual(context.CLIARGS['verbosity'], 0)
self.assertEqual(context.CLIARGS['token'], None)
def test_parse_remove(self):
''' testing the options parser when the action 'remove' is given '''
gc = GalaxyCLI(args=["ansible-galaxy", "remove", "foo"])
gc.parse()
self.assertEqual(context.CLIARGS['verbosity'], 0)
def test_parse_search(self):
''' testing the options parswer when the action 'search' is given '''
gc = GalaxyCLI(args=["ansible-galaxy", "search"])
gc.parse()
self.assertEqual(context.CLIARGS['platforms'], None)
self.assertEqual(context.CLIARGS['galaxy_tags'], None)
self.assertEqual(context.CLIARGS['author'], None)
def test_parse_setup(self):
''' testing the options parser when the action 'setup' is given '''
gc = GalaxyCLI(args=["ansible-galaxy", "setup", "source", "github_user", "github_repo", "secret"])
gc.parse()
self.assertEqual(context.CLIARGS['verbosity'], 0)
self.assertEqual(context.CLIARGS['remove_id'], None)
self.assertEqual(context.CLIARGS['setup_list'], False)
class ValidRoleTests(object):
expected_role_dirs = ('defaults', 'files', 'handlers', 'meta', 'tasks', 'templates', 'vars', 'tests')
@classmethod
def setUpRole(cls, role_name, galaxy_args=None, skeleton_path=None, use_explicit_type=False):
if galaxy_args is None:
galaxy_args = []
if skeleton_path is not None:
cls.role_skeleton_path = skeleton_path
galaxy_args += ['--role-skeleton', skeleton_path]
# Make temp directory for testing
cls.test_dir = tempfile.mkdtemp()
if not os.path.isdir(cls.test_dir):
os.makedirs(cls.test_dir)
cls.role_dir = os.path.join(cls.test_dir, role_name)
cls.role_name = role_name
# create role using default skeleton
args = ['ansible-galaxy']
if use_explicit_type:
args += ['role']
args += ['init', '-c', '--offline'] + galaxy_args + ['--init-path', cls.test_dir, cls.role_name]
gc = GalaxyCLI(args=args)
gc.run()
cls.gc = gc
if skeleton_path is None:
cls.role_skeleton_path = gc.galaxy.default_role_skeleton_path
@classmethod
def tearDownClass(cls):
if os.path.isdir(cls.test_dir):
shutil.rmtree(cls.test_dir)
def test_metadata(self):
with open(os.path.join(self.role_dir, 'meta', 'main.yml'), 'r') as mf:
metadata = yaml.safe_load(mf)
self.assertIn('galaxy_info', metadata, msg='unable to find galaxy_info in metadata')
self.assertIn('dependencies', metadata, msg='unable to find dependencies in metadata')
def test_readme(self):
readme_path = os.path.join(self.role_dir, 'README.md')
self.assertTrue(os.path.exists(readme_path), msg='Readme doesn\'t exist')
def test_main_ymls(self):
need_main_ymls = set(self.expected_role_dirs) - set(['meta', 'tests', 'files', 'templates'])
for d in need_main_ymls:
main_yml = os.path.join(self.role_dir, d, 'main.yml')
self.assertTrue(os.path.exists(main_yml))
expected_string = "---\n# {0} file for {1}".format(d, self.role_name)
with open(main_yml, 'r') as f:
self.assertEqual(expected_string, f.read().strip())
def test_role_dirs(self):
for d in self.expected_role_dirs:
self.assertTrue(os.path.isdir(os.path.join(self.role_dir, d)), msg="Expected role subdirectory {0} doesn't exist".format(d))
def test_travis_yml(self):
with open(os.path.join(self.role_dir, '.travis.yml'), 'r') as f:
contents = f.read()
with open(os.path.join(self.role_skeleton_path, '.travis.yml'), 'r') as f:
expected_contents = f.read()
self.assertEqual(expected_contents, contents, msg='.travis.yml does not match expected')
def test_readme_contents(self):
with open(os.path.join(self.role_dir, 'README.md'), 'r') as readme:
contents = readme.read()
with open(os.path.join(self.role_skeleton_path, 'README.md'), 'r') as f:
expected_contents = f.read()
self.assertEqual(expected_contents, contents, msg='README.md does not match expected')
def test_test_yml(self):
with open(os.path.join(self.role_dir, 'tests', 'test.yml'), 'r') as f:
test_playbook = yaml.safe_load(f)
print(test_playbook)
self.assertEqual(len(test_playbook), 1)
self.assertEqual(test_playbook[0]['hosts'], 'localhost')
self.assertEqual(test_playbook[0]['remote_user'], 'root')
self.assertListEqual(test_playbook[0]['roles'], [self.role_name], msg='The list of roles included in the test play doesn\'t match')
class TestGalaxyInitDefault(unittest.TestCase, ValidRoleTests):
@classmethod
def setUpClass(cls):
cls.setUpRole(role_name='delete_me')
def test_metadata_contents(self):
with open(os.path.join(self.role_dir, 'meta', 'main.yml'), 'r') as mf:
metadata = yaml.safe_load(mf)
self.assertEqual(metadata.get('galaxy_info', dict()).get('author'), 'your name', msg='author was not set properly in metadata')
class TestGalaxyInitAPB(unittest.TestCase, ValidRoleTests):
@classmethod
def setUpClass(cls):
cls.setUpRole('delete_me_apb', galaxy_args=['--type=apb'])
def test_metadata_apb_tag(self):
with open(os.path.join(self.role_dir, 'meta', 'main.yml'), 'r') as mf:
metadata = yaml.safe_load(mf)
self.assertIn('apb', metadata.get('galaxy_info', dict()).get('galaxy_tags', []), msg='apb tag not set in role metadata')
def test_metadata_contents(self):
with open(os.path.join(self.role_dir, 'meta', 'main.yml'), 'r') as mf:
metadata = yaml.safe_load(mf)
self.assertEqual(metadata.get('galaxy_info', dict()).get('author'), 'your name', msg='author was not set properly in metadata')
def test_apb_yml(self):
self.assertTrue(os.path.exists(os.path.join(self.role_dir, 'apb.yml')), msg='apb.yml was not created')
def test_test_yml(self):
with open(os.path.join(self.role_dir, 'tests', 'test.yml'), 'r') as f:
test_playbook = yaml.safe_load(f)
print(test_playbook)
self.assertEqual(len(test_playbook), 1)
self.assertEqual(test_playbook[0]['hosts'], 'localhost')
self.assertFalse(test_playbook[0]['gather_facts'])
self.assertEqual(test_playbook[0]['connection'], 'local')
self.assertIsNone(test_playbook[0]['tasks'], msg='We\'re expecting an unset list of tasks in test.yml')
class TestGalaxyInitContainer(unittest.TestCase, ValidRoleTests):
@classmethod
def setUpClass(cls):
cls.setUpRole('delete_me_container', galaxy_args=['--type=container'])
def test_metadata_container_tag(self):
with open(os.path.join(self.role_dir, 'meta', 'main.yml'), 'r') as mf:
metadata = yaml.safe_load(mf)
self.assertIn('container', metadata.get('galaxy_info', dict()).get('galaxy_tags', []), msg='container tag not set in role metadata')
def test_metadata_contents(self):
with open(os.path.join(self.role_dir, 'meta', 'main.yml'), 'r') as mf:
metadata = yaml.safe_load(mf)
self.assertEqual(metadata.get('galaxy_info', dict()).get('author'), 'your name', msg='author was not set properly in metadata')
def test_meta_container_yml(self):
self.assertTrue(os.path.exists(os.path.join(self.role_dir, 'meta', 'container.yml')), msg='container.yml was not created')
def test_test_yml(self):
with open(os.path.join(self.role_dir, 'tests', 'test.yml'), 'r') as f:
test_playbook = yaml.safe_load(f)
print(test_playbook)
self.assertEqual(len(test_playbook), 1)
self.assertEqual(test_playbook[0]['hosts'], 'localhost')
self.assertFalse(test_playbook[0]['gather_facts'])
self.assertEqual(test_playbook[0]['connection'], 'local')
self.assertIsNone(test_playbook[0]['tasks'], msg='We\'re expecting an unset list of tasks in test.yml')
class TestGalaxyInitSkeleton(unittest.TestCase, ValidRoleTests):
@classmethod
def setUpClass(cls):
role_skeleton_path = os.path.join(os.path.split(__file__)[0], 'test_data', 'role_skeleton')
cls.setUpRole('delete_me_skeleton', skeleton_path=role_skeleton_path, use_explicit_type=True)
def test_empty_files_dir(self):
files_dir = os.path.join(self.role_dir, 'files')
self.assertTrue(os.path.isdir(files_dir))
self.assertListEqual(os.listdir(files_dir), [], msg='we expect the files directory to be empty, is ignore working?')
def test_template_ignore_jinja(self):
test_conf_j2 = os.path.join(self.role_dir, 'templates', 'test.conf.j2')
self.assertTrue(os.path.exists(test_conf_j2), msg="The test.conf.j2 template doesn't seem to exist, is it being rendered as test.conf?")
with open(test_conf_j2, 'r') as f:
contents = f.read()
expected_contents = '[defaults]\ntest_key = {{ test_variable }}'
self.assertEqual(expected_contents, contents.strip(), msg="test.conf.j2 doesn't contain what it should, is it being rendered?")
def test_template_ignore_jinja_subfolder(self):
test_conf_j2 = os.path.join(self.role_dir, 'templates', 'subfolder', 'test.conf.j2')
self.assertTrue(os.path.exists(test_conf_j2), msg="The test.conf.j2 template doesn't seem to exist, is it being rendered as test.conf?")
with open(test_conf_j2, 'r') as f:
contents = f.read()
expected_contents = '[defaults]\ntest_key = {{ test_variable }}'
self.assertEqual(expected_contents, contents.strip(), msg="test.conf.j2 doesn't contain what it should, is it being rendered?")
def test_template_ignore_similar_folder(self):
self.assertTrue(os.path.exists(os.path.join(self.role_dir, 'templates_extra', 'templates.txt')))
def test_skeleton_option(self):
self.assertEquals(self.role_skeleton_path, context.CLIARGS['role_skeleton'], msg='Skeleton path was not parsed properly from the command line')
@pytest.fixture()
def collection_skeleton(request, tmp_path_factory):
name, skeleton_path = request.param
galaxy_args = ['ansible-galaxy', 'collection', 'init', '-c']
if skeleton_path is not None:
galaxy_args += ['--collection-skeleton', skeleton_path]
test_dir = to_text(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections'))
galaxy_args += ['--init-path', test_dir, name]
GalaxyCLI(args=galaxy_args).run()
namespace_name, collection_name = name.split('.', 1)
collection_dir = os.path.join(test_dir, namespace_name, collection_name)
return collection_dir
@pytest.mark.parametrize('collection_skeleton', [
('ansible_test.my_collection', None),
], indirect=True)
def test_collection_default(collection_skeleton):
meta_path = os.path.join(collection_skeleton, 'galaxy.yml')
with open(meta_path, 'r') as galaxy_meta:
metadata = yaml.safe_load(galaxy_meta)
assert metadata['namespace'] == 'ansible_test'
assert metadata['name'] == 'my_collection'
assert metadata['authors'] == ['your name <example@domain.com>']
assert metadata['readme'] == 'README.md'
assert metadata['version'] == '1.0.0'
assert metadata['description'] == 'your collection description'
assert metadata['license'] == ['GPL-2.0-or-later']
assert metadata['tags'] == []
assert metadata['dependencies'] == {}
assert metadata['documentation'] == 'http://docs.example.com'
assert metadata['repository'] == 'http://example.com/repository'
assert metadata['homepage'] == 'http://example.com'
assert metadata['issues'] == 'http://example.com/issue/tracker'
for d in ['docs', 'plugins', 'roles']:
assert os.path.isdir(os.path.join(collection_skeleton, d)), \
"Expected collection subdirectory {0} doesn't exist".format(d)
@pytest.mark.parametrize('collection_skeleton', [
('ansible_test.delete_me_skeleton', os.path.join(os.path.split(__file__)[0], 'test_data', 'collection_skeleton')),
], indirect=True)
def test_collection_skeleton(collection_skeleton):
meta_path = os.path.join(collection_skeleton, 'galaxy.yml')
with open(meta_path, 'r') as galaxy_meta:
metadata = yaml.safe_load(galaxy_meta)
assert metadata['namespace'] == 'ansible_test'
assert metadata['name'] == 'delete_me_skeleton'
assert metadata['authors'] == ['Ansible Cow <acow@bovineuniversity.edu>', 'Tu Cow <tucow@bovineuniversity.edu>']
assert metadata['version'] == '0.1.0'
assert metadata['readme'] == 'README.md'
assert len(metadata) == 5
assert os.path.exists(os.path.join(collection_skeleton, 'README.md'))
# Test empty directories exist and are empty
for empty_dir in ['plugins/action', 'plugins/filter', 'plugins/inventory', 'plugins/lookup',
'plugins/module_utils', 'plugins/modules']:
assert os.listdir(os.path.join(collection_skeleton, empty_dir)) == []
# Test files that don't end with .j2 were not templated
doc_file = os.path.join(collection_skeleton, 'docs', 'My Collection.md')
with open(doc_file, 'r') as f:
doc_contents = f.read()
assert doc_contents.strip() == 'Welcome to my test collection doc for {{ namespace }}.'
# Test files that end with .j2 but are in the templates directory were not templated
for template_dir in ['playbooks/templates', 'playbooks/templates/subfolder',
'roles/common/templates', 'roles/common/templates/subfolder']:
test_conf_j2 = os.path.join(collection_skeleton, template_dir, 'test.conf.j2')
assert os.path.exists(test_conf_j2)
with open(test_conf_j2, 'r') as f:
contents = f.read()
expected_contents = '[defaults]\ntest_key = {{ test_variable }}'
assert expected_contents == contents.strip()
@pytest.fixture()
def collection_artifact(collection_skeleton, tmp_path_factory):
''' Creates a collection artifact tarball that is ready to be published and installed '''
output_dir = to_text(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Output'))
# Because we call GalaxyCLI in collection_skeleton we need to reset the singleton back to None so it uses the new
# args, we reset the original args once it is done.
orig_cli_args = co.GlobalCLIArgs._Singleton__instance
try:
co.GlobalCLIArgs._Singleton__instance = None
galaxy_args = ['ansible-galaxy', 'collection', 'build', collection_skeleton, '--output-path', output_dir]
gc = GalaxyCLI(args=galaxy_args)
gc.run()
yield output_dir
finally:
co.GlobalCLIArgs._Singleton__instance = orig_cli_args
def test_invalid_skeleton_path():
expected = "- the skeleton path '/fake/path' does not exist, cannot init collection"
gc = GalaxyCLI(args=['ansible-galaxy', 'collection', 'init', 'my.collection', '--collection-skeleton',
'/fake/path'])
with pytest.raises(AnsibleError, match=expected):
gc.run()
@pytest.mark.parametrize("name", [
"",
"invalid",
"hypen-ns.collection",
"ns.hyphen-collection",
"ns.collection.weird",
])
def test_invalid_collection_name(name):
expected = "Invalid collection name, must be in the format <namespace>.<collection>"
gc = GalaxyCLI(args=['ansible-galaxy', 'collection', 'init', name])
with pytest.raises(AnsibleError, match=expected):
gc.run()
@pytest.mark.parametrize('collection_skeleton', [
('ansible_test.build_collection', None),
], indirect=True)
def test_collection_build(collection_artifact):
tar_path = os.path.join(collection_artifact, 'ansible_test-build_collection-1.0.0.tar.gz')
assert tarfile.is_tarfile(tar_path)
with tarfile.open(tar_path, mode='r') as tar:
tar_members = tar.getmembers()
valid_files = ['MANIFEST.json', 'FILES.json', 'roles', 'docs', 'plugins', 'plugins/README.md', 'README.md']
assert len(tar_members) == 7
# Verify the uid and gid is 0 and the correct perms are set
for member in tar_members:
assert member.name in valid_files
assert member.gid == 0
assert member.gname == ''
assert member.uid == 0
assert member.uname == ''
if member.isdir():
assert member.mode == 0o0755
else:
assert member.mode == 0o0644
manifest_file = tar.extractfile(tar_members[0])
try:
manifest = json.loads(to_text(manifest_file.read()))
finally:
manifest_file.close()
coll_info = manifest['collection_info']
file_manifest = manifest['file_manifest_file']
assert manifest['format'] == 1
assert len(manifest.keys()) == 3
assert coll_info['namespace'] == 'ansible_test'
assert coll_info['name'] == 'build_collection'
assert coll_info['version'] == '1.0.0'
assert coll_info['authors'] == ['your name <example@domain.com>']
assert coll_info['readme'] == 'README.md'
assert coll_info['tags'] == []
assert coll_info['description'] == 'your collection description'
assert coll_info['license'] == ['GPL-2.0-or-later']
assert coll_info['license_file'] is None
assert coll_info['dependencies'] == {}
assert coll_info['repository'] == 'http://example.com/repository'
assert coll_info['documentation'] == 'http://docs.example.com'
assert coll_info['homepage'] == 'http://example.com'
assert coll_info['issues'] == 'http://example.com/issue/tracker'
assert len(coll_info.keys()) == 14
assert file_manifest['name'] == 'FILES.json'
assert file_manifest['ftype'] == 'file'
assert file_manifest['chksum_type'] == 'sha256'
assert file_manifest['chksum_sha256'] is not None # Order of keys makes it hard to verify the checksum
assert file_manifest['format'] == 1
assert len(file_manifest.keys()) == 5
files_file = tar.extractfile(tar_members[1])
try:
files = json.loads(to_text(files_file.read()))
finally:
files_file.close()
assert len(files['files']) == 6
assert files['format'] == 1
assert len(files.keys()) == 2
valid_files_entries = ['.', 'roles', 'docs', 'plugins', 'plugins/README.md', 'README.md']
for file_entry in files['files']:
assert file_entry['name'] in valid_files_entries
assert file_entry['format'] == 1
if file_entry['name'] == 'plugins/README.md':
assert file_entry['ftype'] == 'file'
assert file_entry['chksum_type'] == 'sha256'
assert file_entry['chksum_sha256'] == '5be7ec7b71096d56e1cc48311b6a2266b77b5fdb9d1985b5bc625787b1e857c5'
elif file_entry['name'] == 'README.md':
assert file_entry['ftype'] == 'file'
assert file_entry['chksum_type'] == 'sha256'
assert file_entry['chksum_sha256'] == '45923ca2ece0e8ce31d29e5df9d8b649fe55e2f5b5b61c9724d7cc187bd6ad4a'
else:
assert file_entry['ftype'] == 'dir'
assert file_entry['chksum_type'] is None
assert file_entry['chksum_sha256'] is None
assert len(file_entry.keys()) == 5
@pytest.fixture()
def collection_install(reset_cli_args, tmp_path_factory, monkeypatch):
mock_install = MagicMock()
monkeypatch.setattr(ansible.cli.galaxy, 'install_collections', mock_install)
mock_warning = MagicMock()
monkeypatch.setattr(ansible.utils.display.Display, 'warning', mock_warning)
output_dir = to_text((tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Output')))
yield mock_install, mock_warning, output_dir
def test_collection_install_with_names(collection_install):
mock_install, mock_warning, output_dir = collection_install
galaxy_args = ['ansible-galaxy', 'collection', 'install', 'namespace.collection', 'namespace2.collection:1.0.1',
'--collections-path', output_dir]
GalaxyCLI(args=galaxy_args).run()
collection_path = os.path.join(output_dir, 'ansible_collections')
assert os.path.isdir(collection_path)
assert mock_warning.call_count == 1
assert "The specified collections path '%s' is not part of the configured Ansible collections path" % output_dir \
in mock_warning.call_args[0][0]
assert mock_install.call_count == 1
assert mock_install.call_args[0][0] == [('namespace.collection', '*', None),
('namespace2.collection', '1.0.1', None)]
assert mock_install.call_args[0][1] == collection_path
assert mock_install.call_args[0][2] == ['https://galaxy.ansible.com']
assert mock_install.call_args[0][3] is True
assert mock_install.call_args[0][4] is False
assert mock_install.call_args[0][5] is False
assert mock_install.call_args[0][6] is False
assert mock_install.call_args[0][7] is False
def test_collection_install_with_requirements_file(collection_install):
mock_install, mock_warning, output_dir = collection_install
requirements_file = os.path.join(output_dir, 'requirements.yml')
with open(requirements_file, 'wb') as req_obj:
req_obj.write(b'''---
collections:
- namespace.coll
- name: namespace2.coll
version: '>2.0.1'
''')
galaxy_args = ['ansible-galaxy', 'collection', 'install', '--requirements-file', requirements_file,
'--collections-path', output_dir]
GalaxyCLI(args=galaxy_args).run()
collection_path = os.path.join(output_dir, 'ansible_collections')
assert os.path.isdir(collection_path)
assert mock_warning.call_count == 1
assert "The specified collections path '%s' is not part of the configured Ansible collections path" % output_dir \
in mock_warning.call_args[0][0]
assert mock_install.call_count == 1
assert mock_install.call_args[0][0] == [('namespace.coll', '*', None),
('namespace2.coll', '>2.0.1', None)]
assert mock_install.call_args[0][1] == collection_path
assert mock_install.call_args[0][2] == ['https://galaxy.ansible.com']
assert mock_install.call_args[0][3] is True
assert mock_install.call_args[0][4] is False
assert mock_install.call_args[0][5] is False
assert mock_install.call_args[0][6] is False
assert mock_install.call_args[0][7] is False
def test_collection_install_with_relative_path(collection_install, monkeypatch):
mock_install = collection_install[0]
mock_req = MagicMock()
mock_req.return_value = [('namespace.coll', '*', None)]
monkeypatch.setattr(ansible.cli.galaxy, 'parse_collections_requirements_file', mock_req)
monkeypatch.setattr(os, 'makedirs', MagicMock())
requirements_file = './requirements.myl'
collections_path = './ansible_collections'
galaxy_args = ['ansible-galaxy', 'collection', 'install', '--requirements-file', requirements_file,
'--collections-path', collections_path]
GalaxyCLI(args=galaxy_args).run()
assert mock_install.call_count == 1
assert mock_install.call_args[0][0] == [('namespace.coll', '*', None)]
assert mock_install.call_args[0][1] == os.path.abspath(collections_path)
assert mock_install.call_args[0][2] == ['https://galaxy.ansible.com']
assert mock_install.call_args[0][3] is True
assert mock_install.call_args[0][4] is False
assert mock_install.call_args[0][5] is False
assert mock_install.call_args[0][6] is False
assert mock_install.call_args[0][7] is False
assert mock_req.call_count == 1
assert mock_req.call_args[0][0] == os.path.abspath(requirements_file)
def test_collection_install_with_unexpanded_path(collection_install, monkeypatch):
mock_install = collection_install[0]
mock_req = MagicMock()
mock_req.return_value = [('namespace.coll', '*', None)]
monkeypatch.setattr(ansible.cli.galaxy, 'parse_collections_requirements_file', mock_req)
monkeypatch.setattr(os, 'makedirs', MagicMock())
requirements_file = '~/requirements.myl'
collections_path = '~/ansible_collections'
galaxy_args = ['ansible-galaxy', 'collection', 'install', '--requirements-file', requirements_file,
'--collections-path', collections_path]
GalaxyCLI(args=galaxy_args).run()
assert mock_install.call_count == 1
assert mock_install.call_args[0][0] == [('namespace.coll', '*', None)]
assert mock_install.call_args[0][1] == os.path.expanduser(os.path.expandvars(collections_path))
assert mock_install.call_args[0][2] == ['https://galaxy.ansible.com']
assert mock_install.call_args[0][3] is True
assert mock_install.call_args[0][4] is False
assert mock_install.call_args[0][5] is False
assert mock_install.call_args[0][6] is False
assert mock_install.call_args[0][7] is False
assert mock_req.call_count == 1
assert mock_req.call_args[0][0] == os.path.expanduser(os.path.expandvars(requirements_file))
def test_collection_install_in_collection_dir(collection_install, monkeypatch):
mock_install, mock_warning, output_dir = collection_install
collections_path = C.COLLECTIONS_PATHS[0]
galaxy_args = ['ansible-galaxy', 'collection', 'install', 'namespace.collection', 'namespace2.collection:1.0.1',
'--collections-path', collections_path]
GalaxyCLI(args=galaxy_args).run()
assert mock_warning.call_count == 0
assert mock_install.call_count == 1
assert mock_install.call_args[0][0] == [('namespace.collection', '*', None),
('namespace2.collection', '1.0.1', None)]
assert mock_install.call_args[0][1] == os.path.join(collections_path, 'ansible_collections')
assert mock_install.call_args[0][2] == ['https://galaxy.ansible.com']
assert mock_install.call_args[0][3] is True
assert mock_install.call_args[0][4] is False
assert mock_install.call_args[0][5] is False
assert mock_install.call_args[0][6] is False
assert mock_install.call_args[0][7] is False
def test_collection_install_name_and_requirements_fail(collection_install):
test_path = collection_install[2]
expected = 'The positional collection_name arg and --requirements-file are mutually exclusive.'
with pytest.raises(AnsibleError, match=expected):
GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection', '--collections-path',
test_path, '--requirements-file', test_path]).run()
def test_collection_install_no_name_and_requirements_fail(collection_install):
test_path = collection_install[2]
expected = 'You must specify a collection name or a requirements file.'
with pytest.raises(AnsibleError, match=expected):
GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', '--collections-path', test_path]).run()
def test_collection_install_path_with_ansible_collections(collection_install):
mock_install, mock_warning, output_dir = collection_install
collection_path = os.path.join(output_dir, 'ansible_collections')
galaxy_args = ['ansible-galaxy', 'collection', 'install', 'namespace.collection', 'namespace2.collection:1.0.1',
'--collections-path', collection_path]
GalaxyCLI(args=galaxy_args).run()
assert os.path.isdir(collection_path)
assert mock_warning.call_count == 1
assert "The specified collections path '%s' is not part of the configured Ansible collections path" \
% collection_path in mock_warning.call_args[0][0]
assert mock_install.call_count == 1
assert mock_install.call_args[0][0] == [('namespace.collection', '*', None),
('namespace2.collection', '1.0.1', None)]
assert mock_install.call_args[0][1] == collection_path
assert mock_install.call_args[0][2] == ['https://galaxy.ansible.com']
assert mock_install.call_args[0][3] is True
assert mock_install.call_args[0][4] is False
assert mock_install.call_args[0][5] is False
assert mock_install.call_args[0][6] is False
assert mock_install.call_args[0][7] is False
def test_collection_install_ignore_certs(collection_install):
mock_install, mock_warning, output_dir = collection_install
galaxy_args = ['ansible-galaxy', 'collection', 'install', 'namespace.collection', '--collections-path', output_dir,
'--ignore-certs']
GalaxyCLI(args=galaxy_args).run()
assert mock_install.call_args[0][3] is False
def test_collection_install_force(collection_install):
mock_install, mock_warning, output_dir = collection_install
galaxy_args = ['ansible-galaxy', 'collection', 'install', 'namespace.collection', '--collections-path', output_dir,
'--force']
GalaxyCLI(args=galaxy_args).run()
assert mock_install.call_args[0][6] is True
def test_collection_install_force_deps(collection_install):
mock_install, mock_warning, output_dir = collection_install
galaxy_args = ['ansible-galaxy', 'collection', 'install', 'namespace.collection', '--collections-path', output_dir,
'--force-with-deps']
GalaxyCLI(args=galaxy_args).run()
assert mock_install.call_args[0][7] is True
def test_collection_install_no_deps(collection_install):
mock_install, mock_warning, output_dir = collection_install
galaxy_args = ['ansible-galaxy', 'collection', 'install', 'namespace.collection', '--collections-path', output_dir,
'--no-deps']
GalaxyCLI(args=galaxy_args).run()
assert mock_install.call_args[0][5] is True
def test_collection_install_ignore(collection_install):
mock_install, mock_warning, output_dir = collection_install
galaxy_args = ['ansible-galaxy', 'collection', 'install', 'namespace.collection', '--collections-path', output_dir,
'--ignore-errors']
GalaxyCLI(args=galaxy_args).run()
assert mock_install.call_args[0][4] is True
def test_collection_install_custom_server(collection_install):
mock_install, mock_warning, output_dir = collection_install
galaxy_args = ['ansible-galaxy', 'collection', 'install', 'namespace.collection', '--collections-path', output_dir,
'--server', 'https://galaxy-dev.ansible.com']
GalaxyCLI(args=galaxy_args).run()
assert mock_install.call_args[0][2] == ['https://galaxy-dev.ansible.com']
|
gpl-3.0
|
spatialdev/onadata
|
onadata/apps/main/migrations/0012_auto__add_unique_metadata_xform_data_type_data_value.py
|
8
|
9948
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding unique constraint on 'MetaData', fields ['xform', 'data_type', 'data_value']
db.create_unique(u'main_metadata', ['xform_id', 'data_type', 'data_value'])
def backwards(self, orm):
# Removing unique constraint on 'MetaData', fields ['xform', 'data_type', 'data_value']
db.delete_unique(u'main_metadata', ['xform_id', 'data_type', 'data_value'])
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'logger.xform': {
'Meta': {'ordering': "('id_string',)", 'unique_together': "(('user', 'id_string'), ('user', 'sms_id_string'))", 'object_name': 'XForm'},
'allows_sms': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'bamboo_dataset': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '60'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "u''", 'null': 'True'}),
'downloadable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'encrypted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_start_time': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'id_string': ('django.db.models.fields.SlugField', [], {'max_length': '100'}),
'instances_with_geopoints': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'json': ('django.db.models.fields.TextField', [], {'default': "u''"}),
'last_submission_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'num_of_submissions': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'require_auth': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'shared': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'shared_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sms_id_string': ('django.db.models.fields.SlugField', [], {'default': "''", 'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'xforms'", 'null': 'True', 'to': u"orm['auth.User']"}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '32'}),
'xls': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True'}),
'xml': ('django.db.models.fields.TextField', [], {})
},
'main.metadata': {
'Meta': {'unique_together': "(('xform', 'data_type', 'data_value'),)", 'object_name': 'MetaData'},
'data_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'data_file_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'data_type': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'data_value': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'xform': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['logger.XForm']"})
},
'main.tokenstoragemodel': {
'Meta': {'object_name': 'TokenStorageModel'},
'id': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'google_id'", 'primary_key': 'True', 'to': u"orm['auth.User']"}),
'token': ('django.db.models.fields.TextField', [], {})
},
'main.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'home_page': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'num_of_submissions': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'organization': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'require_auth': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': u"orm['auth.User']"})
},
u'taggit.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_tagged_items'", 'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_items'", 'to': u"orm['taggit.Tag']"})
}
}
complete_apps = ['main']
|
bsd-2-clause
|
ysekky/chainer
|
chainer/functions/theano/theano_function.py
|
4
|
2370
|
import numpy
import six
from chainer import cuda
from chainer import function
from chainer import utils
from chainer.utils import type_check
class TheanoFunction(function.Function):
def __init__(self, forward_func, backward_func):
utils.experimental('chainer.functions.TheanoFunction')
self.forward_func = forward_func
self.backward_func = backward_func
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == len(self.forward_func.indices))
for actual_type, input_info in six.moves.zip(
in_types, self.forward_func.indices):
expect_type = input_info[0].variable.type
# Theano cannot check shapes of variables
type_check.expect(
actual_type.ndim == expect_type.ndim,
actual_type.dtype == expect_type.numpy_dtype,
)
def forward(self, inputs):
gpu = cuda.get_array_module(*inputs) is not numpy
inputs = [cuda.to_cpu(x) for x in inputs]
outputs = self.forward_func(*inputs)
if gpu:
# TODO(unno): We can remove redundant gpu-cpu copy using
# theano.sandbox.cuda.CudaNdarray.gpudata
device = cuda.get_device_from_array(inputs)
outputs = [cuda.to_gpu(x, device) for x in outputs]
return tuple(outputs)
def backward(self, inputs, grads):
gpu = cuda.get_array_module(*inputs) is not numpy
# TODO(unno): We can remove redundant gpu-cpu copy using
# theano.sandbox.cuda.basic_ops.gpu_from_host
args = [cuda.to_cpu(x) for x in inputs + grads]
outputs = self.backward_func(*args)
assert len(outputs) == len(inputs)
if gpu:
# TODO(unno): We can remove redundant gpu-cpu copy using
# theano.sandbox.cuda.CudaNdarray.gpudata
device = cuda.get_device_from_array(inputs)
outputs = [cuda.to_gpu(x, device) for x in outputs]
results = []
for o, i in zip(outputs, inputs):
if i.dtype.kind != 'f':
o = None
elif o.dtype != i.dtype:
o = o.astype(i.dtype)
results.append(o)
return tuple(results)
def theano_function(forward_func, backward_func, *inputs):
return TheanoFunction(forward_func, backward_func)(*inputs)
|
mit
|
pkilambi/ceilometer
|
ceilometer/compute/pollsters/instance.py
|
4
|
1618
|
#
# Copyright 2012 eNovance <licensing@enovance.com>
# Copyright 2012 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ceilometer.compute import pollsters
from ceilometer.compute.pollsters import util
from ceilometer import sample
class InstancePollster(pollsters.BaseComputePollster):
@staticmethod
def get_samples(manager, cache, resources):
for instance in resources:
yield util.make_sample_from_instance(
instance,
name='instance',
type=sample.TYPE_GAUGE,
unit='instance',
volume=1,
)
class InstanceFlavorPollster(pollsters.BaseComputePollster):
@staticmethod
def get_samples(manager, cache, resources):
for instance in resources:
yield util.make_sample_from_instance(
instance,
# Use the "meter name + variable" syntax
name='instance:%s' %
instance.flavor['name'],
type=sample.TYPE_GAUGE,
unit='instance',
volume=1,
)
|
apache-2.0
|
sebastienhupin/qxrad
|
qooxdoo/tool/pylib/generator/code/DependencyLoader.py
|
1
|
19794
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
#
# qooxdoo - the new era of web development
#
# http://qooxdoo.org
#
# Copyright:
# 2006-2010 1&1 Internet AG, Germany, http://www.1und1.de
#
# License:
# LGPL: http://www.gnu.org/licenses/lgpl.html
# EPL: http://www.eclipse.org/org/documents/epl-v10.php
# See the LICENSE file in the project's top-level directory for details.
#
# Authors:
# * Sebastian Werner (wpbasti)
# * Thomas Herchenroeder (thron7)
#
################################################################################
##
# NAME
# DependencyLoader
#
# SYNTAX
# from DependencyLoader import DependencyLoader
# mydl = DependencyLoader(...)
#
# DESCRIPTION
#
# ENTRY POINTS (for generator)
# - DependencyLoader.__init__()
# - DependencyLoader.getClassList()
# - DependencyLoader.classlistFromInclude()
# - DependencyLoader.sortClasses()
#
##
import sys, re, os, types, time
from operator import attrgetter
import graph
from misc.ExtMap import ExtMap
from ecmascript.frontend import lang
from ecmascript.transform.check import global_symbols as gs
from generator.code.Class import DependencyError
from generator.code.DependencyItem import DependencyItem
from generator.action import CodeMaintenance
class DependencyLoader(object):
def __init__(self, classesObj, cache, console, require, use, context):
self._classesObj = classesObj # _libClassesObj
self._cache = cache
self._console = console
self._context = context
self._jobconf = context.get('jobconf', ExtMap())
self._require = require
self._use = use
self.counter = 0
def expand_hard_excludes(self, excludeWithDepsHard, script, verifyDeps=False):
excludes_hard = []
if excludeWithDepsHard:
excludes_hard = self.classlistFromInclude(excludeWithDepsHard, [], script.variants, verifyDeps, script)
return excludes_hard
##
# Return a class list for the current script
def getClassList(self, includeWithDeps, excludeWithDeps, includeNoDeps, excludeWithDepsHard, script, verifyDeps=False):
##
# Resolve intelli include/exclude depdendencies
def resolveDepsSmartCludes(includeWithDeps, excludeWithDeps):
if len(includeWithDeps) == 0 and len(includeNoDeps) > 0:
if len(excludeWithDeps) > 0:
#raise ValueError("Blocking is not supported when only explicit includes are defined!");
pass
result = []
else:
result = self.classlistFromInclude(includeWithDeps, excludeWithDeps, script.variants, verifyDeps, script)
return result
##
# Explicit include/exclude
def processExplicitCludes(result, includeList, excludeList):
if len(includeList) > 0 or len(excludeList) > 0:
self._console.info("Processing explicitly configured includes/excludes...")
for entry in includeList:
if not entry in result:
result.append(entry)
for entry in excludeList:
if entry in result:
result.remove(entry)
return result
# ---------------------------------------------------
if excludeWithDepsHard:
exclude_hard_list = resolveDepsSmartCludes(excludeWithDepsHard, [])
else:
exclude_hard_list = []
excludeList = excludeWithDeps + exclude_hard_list
result = resolveDepsSmartCludes(includeWithDeps, excludeList)
result = processExplicitCludes(result, includeNoDeps, excludeList) # resolveDepsSmartCludes not necessarily removes elems of exlcudeList, hence repeated here
# Sort classes
self._console.info("Sorting %s classes " % len(result), False)
result = self.sortClasses(result, script.variants, script.buildType)
self._console.dotclear()
#self._console.nl()
if self._console.getLevel() == "debug":
self._console.indent()
self._console.debug("Sorted class list:")
self._console.indent()
for classId in result:
self._console.debug(classId)
self._console.outdent()
self._console.outdent()
# Return list
return result
def classlistFromInclude(self, includeWithDeps, excludeWithDeps, variants,
verifyDeps=False, script=None, allowBlockLoaddeps=True):
def classlistFromClassRecursive(depsItem, excludeWithDeps, variants, result, warn_deps, loadDepsChain, allowBlockLoaddeps=True):
# support blocking
if depsItem.name in excludeWithDeps:
if depsItem.isLoadDep and not allowBlockLoaddeps:
raise DependencyError()
return
# check if already in
if depsItem.name in resultNames: # string compares are perceivably faster than object compares (as DependencyItem defines __eq__)
return
# Reading dependencies
self._console.debug("Gathering dependencies: %s" % depsItem.name)
self._console.indent()
classObj = self._classesObj[depsItem.name] # get class from depsItem - throws KeyError
deps, cached = classObj.getCombinedDeps(self._classesObj, variants, self._jobconf)
# lint-check - sans globals check (s.further)
if lint_check and is_app_code(classObj): # opt: and not cached
warns = classObj.lint_warnings(lint_opts)
for warn in warns:
self._console.warn("%s (%d, %d): %s" % (classObj.id, warn.line, warn.column,
warn.msg % tuple(warn.args)))
self._console.outdent()
if logInfos: self._console.dot("%s" % "." if cached else "*")
# And evaluate them
# check for unknown globals
deps["warn"] = self._checkDepsAreKnown(deps) # add 'warn' key to deps
ignore_names = [x.name for x in deps["ignore"]]
if verifyDeps:
for dep in deps["warn"]:
if dep.name not in ignore_names:
warn_deps.append(dep) # add it to warnings accumulator
# process lists
try:
skipNames = [x.name for x in deps["warn"] + deps["ignore"]]
# cycle detection
assert depsItem.name not in loadDepsChain
loadDepsChain.append(depsItem.name)
for subitem in deps["load"]:
# cycle check
if subitem.name in loadDepsChain:
self._console.warn("Detected circular dependency between: %s and %s" % (depsItem.name, subitem.name))
self._console.indent()
self._console.debug("currently explored dependency path: %r" % loadDepsChain)
self._console.outdent()
raise RuntimeError("Circular class dependencies")
if subitem.name not in resultNames and subitem.name not in skipNames:
classlistFromClassRecursive(subitem, excludeWithDeps, variants, result, warn_deps, loadDepsChain, allowBlockLoaddeps)
##
# putting this here allows expanding and partially sorting of the class
# list in one go
if depsItem.name not in resultNames:
result.append(depsItem)
resultNames.append(depsItem.name)
# cycle check
loadDepsChain.remove(depsItem.name)
for subitem in deps["run"]:
if subitem.name not in resultNames and subitem.name not in skipNames:
classlistFromClassRecursive(subitem, excludeWithDeps, variants, result, warn_deps, [], allowBlockLoaddeps)
except DependencyError, detail:
raise ValueError("Attempt to block load-time dependency of class %s to %s" % (depsItem.name, subitem.name))
except KeyError, detail:
raise NameError("Could not resolve dependencies of class '%s': %s" % (depsItem.name, detail))
return
def classlistFromClassIterative(depsItem, excludeWithDeps, variants, result, warn_deps, loadDepsChain, allowBlockLoaddeps=True):
def processNode(depsItem):
if depsItem.name in resultNames:
node = None
else:
result.append(depsItem)
resultNames.append(depsItem.name)
node = depsItem
return node
def getNodeChildren(depsItem):
deps, cached = self._classesObj[depsItem.name].getCombinedDeps(self._classesObj, variants, self._jobconf)
# and evaluate them
deps["warn"] = self._checkDepsAreKnown(deps) # add 'warn' key to deps
ignore_names = [x.name for x in deps["ignore"]]
if verifyDeps:
for dep in deps["warn"]:
if dep.name not in ignore_names:
warn_deps.append(dep)
skipNames = [x.name for x in deps["warn"] + deps["ignore"]]
result = []
for dep in deps['load'] + deps['run']:
if dep.name in skipNames or dep.name in resultNames:
continue
result.append(dep)
return result # returns *all* deps (load, run, ...)
# ---------------------------------------------------------------------
self.agendaSearch([depsItem], processNode, getNodeChildren, mode="bf")
return
def is_app_code(classObj):
return classObj.library.namespace == app_namespace
# -------------------------------------------
result = []
warn_deps = []
logInfos = self._console.getLevel() == "info"
app_namespace = self._jobconf.get("let/APPLICATION", u'')
# Lint stuff
lint_check, lint_opts = CodeMaintenance.lint_comptime_opts()
if lint_check:
lint_opts.library_classes = self._classesObj.keys() # for globals shadowing check
# No dependency calculation
if len(includeWithDeps) == 0:
self._console.info("Including all known classes")
result = self._classesObj.keys()
# In this case the block works like an explicit exclude
# because all classes are included like an explicit include.
for classId in excludeWithDeps:
result.remove(classId)
# TODO: use lint_check
# Calculate dependencies
else:
result = [] # reset any previous results for this iteration
resultNames = []
# calculate class list recursively
for item in includeWithDeps:
depsItem = DependencyItem(item, '', '|config|')
classlistFromClassRecursive(depsItem, excludeWithDeps, variants, result, warn_deps, [], allowBlockLoaddeps)
self._console.dotclear()
# extract names of depsItems
result = [x.name for x in result]
# Unknown globals warnings
# - late, because adding the list of name spaces of the selected classes
known_namespaces = set()
for classid in result:
nsindex = classid.rfind(".")
if nsindex == -1:
continue # not interested in bare class names
classnamespace = classid[:nsindex]
known_namespaces.add(classnamespace)
# honor lint-check/allowed-globals config
callowed_globals = self._jobconf.get("lint-check/allowed-globals", [])
#known_namespaces.update(callowed_globals)
for dep in warn_deps:
if not gs.test_for_libsymbol(dep.name, callowed_globals, known_namespaces):
self._console.warn("%s (%s): Unknown global symbol used: %s" % (dep.requestor, dep.line, dep.assembled()))
return result
def _checkDepsAreKnown(self, deps,):
# check the shallow deps are known classes
new_warn = []
for dep in deps["load"] + deps["run"]:
if not self._isKnownClass(dep.name):
new_warn.append(dep)
return new_warn
def _isKnownClass(self, classId):
# check whether classId can be considered a known class
if classId in lang.BUILTIN + ["clazz"]:
return True
elif classId in self._classesObj:
return True
elif re.match(r'this\b', classId):
return True
return False
def agendaSearch(self, agenda, processNode, getNodeChildren, mode="df"):
while agenda:
node = agenda.pop(0)
node = processNode(node)
if node:
children = getNodeChildren(node)
if mode == "df":
agenda[0:0] = children # prepend
else:
agenda.extend(children) # append
return
def agendaSearchMP(self, agenda, processNode, getNodeChildren, mode="df"):
while agenda:
node = agenda.pop(0)
node = processNode(node)
if node:
children = getNodeChildren(node)
if mode == "df":
agenda[0:0] = children # prepend
else:
agenda.extend(children) # append
return
######################################################################
# CLASS SORT SUPPORT
######################################################################
##
# Method chooser
def sortClasses(self, *args, **kwargs):
#if self._jobconf.get("dependencies/sort-topological", False):
return self.sortClassesRec(*args, **kwargs)
#return self.sortClassesTopological(*args, **kwargs)
def sortClassesRec(self, classList, variants, buildType=""):
def sortClassesRecurser(classId, classListSorted, path):
if classId in classListSorted:
return
# reading dependencies
deps, cached = self._classesObj[classId].getCombinedDeps(self._classesObj, variants, self._jobconf)
if self._console.getLevel() is "info":
self._console.dot("%s" % "." if cached else "*")
# path is needed for recursion detection
if not classId in path:
path.append(classId)
# process loadtime requirements
for dep in deps["load"]:
dep_name = dep.name
if dep_name in classList and not dep_name in classListSorted:
if dep_name in path:
self._console.warn("Detected circular dependency between: %s and %s" % (classId, dep_name))
self._console.indent()
self._console.debug("currently explored dependency path: %r" % path)
self._console.outdent()
raise RuntimeError("Circular class dependencies")
else:
sortClassesRecurser(dep_name, classListSorted, path)
if not classId in classListSorted:
# remove element from path
path.remove(classId)
# print "Add: %s" % classId
classListSorted.append(classId)
return
# ---------------------------------
classListSorted = []
path = []
for classId in classList:
sortClassesRecurser(classId, classListSorted, path)
return classListSorted
def sortClassesTopological(self, classList, variants, buildType=''):
# create graph object
gr = graph.digraph()
# add classes as nodes
gr.add_nodes(classList)
# for each load dependency add a directed edge
for classId in classList:
deps, _ = self._classesObj[classId].getCombinedDeps(variants, self._jobconf)
for dep in deps["load"]:
depClassId = dep.name
if depClassId in classList:
gr.add_edge(depClassId, classId)
# cycle check?
cycle_nodes = gr.find_cycle()
if cycle_nodes:
#raise RuntimeError("Detected circular dependencies between nodes: %r" % cycle_nodes)
pass
classList = gr.topological_sorting()
return classList
######################################################################
# FEATURE SUPPORT
######################################################################
##
# Returns featureMap =
# { 'qx.core.Object' : {'myFeature': ('r',)} } -- ('r',) is currently a way to say 'True'
def registerDependeeFeatures(self, classList, variants, buildType=""):
featureMap = {}
self._console.info("Registering used class features ", False)
for clazz in classList:
# make sure every class is at least listed
if clazz.id not in featureMap:
featureMap[clazz.id] = {}
deps, _ = clazz.getCombinedDeps(self._classesObj, variants, self._jobconf, stripSelfReferences=False, projectClassNames=False, force=0)
ignored_names = map(attrgetter("name"), deps['ignore'])
for dep in deps['load'] + deps['run']:
if dep.name in ignored_names:
continue
if dep.name not in featureMap:
featureMap[dep.name] = {}
if dep.attribute in featureMap[dep.name]:
# increment
featureMap[dep.name][dep.attribute].addref(dep)
else:
# create
featureMap[dep.name][dep.attribute] = UsedFeature(dep)
self._console.nl()
return featureMap
##
# Helper class, to represent reference counts in the FeatureMap
#
class UsedFeature(object):
def __init__(s, dep):
s._ref_cnt = 1
s._refs = [dep]
def __str__(s):
return "<UsedFeature:%d:%r>" % (s._ref_cnt, [("%s:%s" % (x.requestor, x.line)) for x in s._refs])
def __repr__(s):
return str(s)
def addref(s, dep):
s._refs.append(dep)
s._ref_cnt += 1
#def incref(s):
# s._ref_cnt += 1
def decref(s, req_name='', req_line=''):
if s._ref_cnt > 0:
s._ref_cnt -= 1
ref_removed = False
if req_name:
for ref in s._refs[:]:
if ((ref.requestor == req_name and not req_line) or
(ref.requestor == req_name and ref.line == req_line)):
ref_removed = True
s._refs.remove(ref) # TODO: this is very delicate, as [].remove uses __eq__ of the elements!
return ref_removed
def hasref(s):
return s._ref_cnt > 0
def __len__(s):
return len(s._refs)
# this is more specific than DependencyItem.__eq__
# compare name, attribute, requestor and line
_depattribs = 'name attribute requestor line'.split()
def _depmatches(s, dep, odep):
return all([(getattr(dep,f)==getattr(odep,f)) for f in s._depattribs])
def __contains__(s, odep):
for dep in s._refs:
if s._depmatches(dep, odep):
return True
return False
|
lgpl-3.0
|
BrianHicks/probe
|
probe/cli.py
|
1
|
3949
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from argparse import ArgumentParser
from datetime import datetime
from dateutil.rrule import rrulestr
import os
import sys
from textwrap import dedent
from .config import ConfigHandler
from .errors import AnswerError
CONFIG_LOCATION = os.environ.get(
'PROBE_CONFIG', os.path.expanduser('~/.probe_config')
)
LAST_RUN_LOCATION = os.environ.get(
'PROBE_LAST_RUN', os.path.expanduser('~/.probe_last_run')
)
class ProbeCLI(object):
def __init__(self, config):
self.config = config
self.parser = ArgumentParser('probe')
sub = self.parser.add_subparsers()
init = sub.add_parser(
'init',
description='Initialize a new config in "%s" (WILL OVERWRITE)' % CONFIG_LOCATION
)
init.set_defaults(func=self.initialize)
answer = sub.add_parser(
'answer',
description='Answer the questions you have set up'
)
answer.set_defaults(func=self.answer)
for question in self.config.questions:
arg = answer.add_argument(
'--%s' % question.key,
metavar=question.key,
help=question.hint(),
)
if question.type != bool:
arg.type = question.type
def run(self, args):
args = self.parser.parse_args(args)
return args.func(args)
def initialize(self, args):
print('Initializing sample config in %s' % CONFIG_LOCATION)
with open(CONFIG_LOCATION, 'w') as config:
config.write(dedent('''
questions:
- sleep.hours:
text: How long did you sleep last night?
interval: every day
unit: hours
- mood.energy:
text: What is your energy level?
interval: every hour
unit: rating
lower: 0
upper: 10
outputs:
- stdout:
level: info
''').strip())
return CONFIG_LOCATION
def answer(self, args):
# answer questions
answers = {}
for question in self.config.questions:
last_run = self.config.last_run.last_run(question.key)
if last_run is not None:
rule = rrulestr(question.interval, dtstart=last_run)
next_run = rule.after(last_run)
if next_run > datetime.now():
continue
if getattr(args, question.key):
try:
answers[question.key] = question.parse_answer(
getattr(args, question.key)
)
except AnswerError as err:
print('Error in %s: %s' % (question.key, err))
sys.exit(1)
continue
while question.key not in answers:
try:
answer = raw_input(question.text + ' [' + question.hint() + '] ')
except (KeyboardInterrupt, EOFError):
print('Interrupting. Bye!')
sys.exit(1)
try:
answers[question.key] = question.parse_answer(answer)
except AnswerError as err:
print('Error: %s' % err)
if not answers:
print('No questions to answer right now. Check back later!')
for key, answer in answers.items():
for output in self.config.outputs:
output.send(key, answer)
self.config.last_run.update_last_run(key, datetime.now())
return answers
def main():
ProbeCLI(ConfigHandler.from_paths(
CONFIG_LOCATION, LAST_RUN_LOCATION
)).run(sys.argv[1:])
|
bsd-3-clause
|
gdimitris/ChessPuzzlerBackend
|
Virtual_Environment/lib/python2.7/site-packages/setuptools/depends.py
|
462
|
6370
|
import sys
import imp
import marshal
from imp import PKG_DIRECTORY, PY_COMPILED, PY_SOURCE, PY_FROZEN
from distutils.version import StrictVersion
from setuptools import compat
__all__ = [
'Require', 'find_module', 'get_module_constant', 'extract_constant'
]
class Require:
"""A prerequisite to building or installing a distribution"""
def __init__(self, name, requested_version, module, homepage='',
attribute=None, format=None):
if format is None and requested_version is not None:
format = StrictVersion
if format is not None:
requested_version = format(requested_version)
if attribute is None:
attribute = '__version__'
self.__dict__.update(locals())
del self.self
def full_name(self):
"""Return full package/distribution name, w/version"""
if self.requested_version is not None:
return '%s-%s' % (self.name,self.requested_version)
return self.name
def version_ok(self, version):
"""Is 'version' sufficiently up-to-date?"""
return self.attribute is None or self.format is None or \
str(version) != "unknown" and version >= self.requested_version
def get_version(self, paths=None, default="unknown"):
"""Get version number of installed module, 'None', or 'default'
Search 'paths' for module. If not found, return 'None'. If found,
return the extracted version attribute, or 'default' if no version
attribute was specified, or the value cannot be determined without
importing the module. The version is formatted according to the
requirement's version format (if any), unless it is 'None' or the
supplied 'default'.
"""
if self.attribute is None:
try:
f,p,i = find_module(self.module,paths)
if f: f.close()
return default
except ImportError:
return None
v = get_module_constant(self.module, self.attribute, default, paths)
if v is not None and v is not default and self.format is not None:
return self.format(v)
return v
def is_present(self, paths=None):
"""Return true if dependency is present on 'paths'"""
return self.get_version(paths) is not None
def is_current(self, paths=None):
"""Return true if dependency is present and up-to-date on 'paths'"""
version = self.get_version(paths)
if version is None:
return False
return self.version_ok(version)
def _iter_code(code):
"""Yield '(op,arg)' pair for each operation in code object 'code'"""
from array import array
from dis import HAVE_ARGUMENT, EXTENDED_ARG
bytes = array('b',code.co_code)
eof = len(code.co_code)
ptr = 0
extended_arg = 0
while ptr<eof:
op = bytes[ptr]
if op>=HAVE_ARGUMENT:
arg = bytes[ptr+1] + bytes[ptr+2]*256 + extended_arg
ptr += 3
if op==EXTENDED_ARG:
extended_arg = arg * compat.long_type(65536)
continue
else:
arg = None
ptr += 1
yield op,arg
def find_module(module, paths=None):
"""Just like 'imp.find_module()', but with package support"""
parts = module.split('.')
while parts:
part = parts.pop(0)
f, path, (suffix,mode,kind) = info = imp.find_module(part, paths)
if kind==PKG_DIRECTORY:
parts = parts or ['__init__']
paths = [path]
elif parts:
raise ImportError("Can't find %r in %s" % (parts,module))
return info
def get_module_constant(module, symbol, default=-1, paths=None):
"""Find 'module' by searching 'paths', and extract 'symbol'
Return 'None' if 'module' does not exist on 'paths', or it does not define
'symbol'. If the module defines 'symbol' as a constant, return the
constant. Otherwise, return 'default'."""
try:
f, path, (suffix, mode, kind) = find_module(module, paths)
except ImportError:
# Module doesn't exist
return None
try:
if kind==PY_COMPILED:
f.read(8) # skip magic & date
code = marshal.load(f)
elif kind==PY_FROZEN:
code = imp.get_frozen_object(module)
elif kind==PY_SOURCE:
code = compile(f.read(), path, 'exec')
else:
# Not something we can parse; we'll have to import it. :(
if module not in sys.modules:
imp.load_module(module, f, path, (suffix, mode, kind))
return getattr(sys.modules[module], symbol, None)
finally:
if f:
f.close()
return extract_constant(code, symbol, default)
def extract_constant(code, symbol, default=-1):
"""Extract the constant value of 'symbol' from 'code'
If the name 'symbol' is bound to a constant value by the Python code
object 'code', return that value. If 'symbol' is bound to an expression,
return 'default'. Otherwise, return 'None'.
Return value is based on the first assignment to 'symbol'. 'symbol' must
be a global, or at least a non-"fast" local in the code block. That is,
only 'STORE_NAME' and 'STORE_GLOBAL' opcodes are checked, and 'symbol'
must be present in 'code.co_names'.
"""
if symbol not in code.co_names:
# name's not there, can't possibly be an assigment
return None
name_idx = list(code.co_names).index(symbol)
STORE_NAME = 90
STORE_GLOBAL = 97
LOAD_CONST = 100
const = default
for op, arg in _iter_code(code):
if op==LOAD_CONST:
const = code.co_consts[arg]
elif arg==name_idx and (op==STORE_NAME or op==STORE_GLOBAL):
return const
else:
const = default
def _update_globals():
"""
Patch the globals to remove the objects not available on some platforms.
XXX it'd be better to test assertions about bytecode instead.
"""
if not sys.platform.startswith('java') and sys.platform != 'cli':
return
incompatible = 'extract_constant', 'get_module_constant'
for name in incompatible:
del globals()[name]
__all__.remove(name)
_update_globals()
|
mit
|
Arcanfel/whatToPlay
|
openid/fetchers.py
|
10
|
13415
|
# -*- test-case-name: openid.test.test_fetchers -*-
"""
This module contains the HTTP fetcher interface and several implementations.
"""
__all__ = ['fetch', 'getDefaultFetcher', 'setDefaultFetcher', 'HTTPResponse',
'HTTPFetcher', 'createHTTPFetcher', 'HTTPFetchingError',
'HTTPError']
import urllib2
import time
import cStringIO
import sys
import openid
import openid.urinorm
# Try to import httplib2 for caching support
# http://bitworking.org/projects/httplib2/
try:
import httplib2
except ImportError:
# httplib2 not available
httplib2 = None
# try to import pycurl, which will let us use CurlHTTPFetcher
try:
import pycurl
except ImportError:
pycurl = None
USER_AGENT = "python-openid/%s (%s)" % (openid.__version__, sys.platform)
def fetch(url, body=None, headers=None):
"""Invoke the fetch method on the default fetcher. Most users
should need only this method.
@raises Exception: any exceptions that may be raised by the default fetcher
"""
fetcher = getDefaultFetcher()
return fetcher.fetch(url, body, headers)
def createHTTPFetcher():
"""Create a default HTTP fetcher instance
prefers Curl to urllib2."""
if pycurl is None:
fetcher = Urllib2Fetcher()
else:
fetcher = CurlHTTPFetcher()
return fetcher
# Contains the currently set HTTP fetcher. If it is set to None, the
# library will call createHTTPFetcher() to set it. Do not access this
# variable outside of this module.
_default_fetcher = None
def getDefaultFetcher():
"""Return the default fetcher instance
if no fetcher has been set, it will create a default fetcher.
@return: the default fetcher
@rtype: HTTPFetcher
"""
global _default_fetcher
if _default_fetcher is None:
setDefaultFetcher(createHTTPFetcher())
return _default_fetcher
def setDefaultFetcher(fetcher, wrap_exceptions=True):
"""Set the default fetcher
@param fetcher: The fetcher to use as the default HTTP fetcher
@type fetcher: HTTPFetcher
@param wrap_exceptions: Whether to wrap exceptions thrown by the
fetcher wil HTTPFetchingError so that they may be caught
easier. By default, exceptions will be wrapped. In general,
unwrapped fetchers are useful for debugging of fetching errors
or if your fetcher raises well-known exceptions that you would
like to catch.
@type wrap_exceptions: bool
"""
global _default_fetcher
if fetcher is None or not wrap_exceptions:
_default_fetcher = fetcher
else:
_default_fetcher = ExceptionWrappingFetcher(fetcher)
def usingCurl():
"""Whether the currently set HTTP fetcher is a Curl HTTP fetcher."""
return isinstance(getDefaultFetcher(), CurlHTTPFetcher)
class HTTPResponse(object):
"""XXX document attributes"""
headers = None
status = None
body = None
final_url = None
def __init__(self, final_url=None, status=None, headers=None, body=None):
self.final_url = final_url
self.status = status
self.headers = headers
self.body = body
def __repr__(self):
return "<%s status %s for %s>" % (self.__class__.__name__,
self.status,
self.final_url)
class HTTPFetcher(object):
"""
This class is the interface for openid HTTP fetchers. This
interface is only important if you need to write a new fetcher for
some reason.
"""
def fetch(self, url, body=None, headers=None):
"""
This performs an HTTP POST or GET, following redirects along
the way. If a body is specified, then the request will be a
POST. Otherwise, it will be a GET.
@param headers: HTTP headers to include with the request
@type headers: {str:str}
@return: An object representing the server's HTTP response. If
there are network or protocol errors, an exception will be
raised. HTTP error responses, like 404 or 500, do not
cause exceptions.
@rtype: L{HTTPResponse}
@raise Exception: Different implementations will raise
different errors based on the underlying HTTP library.
"""
raise NotImplementedError
def _allowedURL(url):
return url.startswith('http://') or url.startswith('https://')
class HTTPFetchingError(Exception):
"""Exception that is wrapped around all exceptions that are raised
by the underlying fetcher when using the ExceptionWrappingFetcher
@ivar why: The exception that caused this exception
"""
def __init__(self, why=None):
Exception.__init__(self, why)
self.why = why
class ExceptionWrappingFetcher(HTTPFetcher):
"""Fetcher that wraps another fetcher, causing all exceptions
@cvar uncaught_exceptions: Exceptions that should be exposed to the
user if they are raised by the fetch call
"""
uncaught_exceptions = (SystemExit, KeyboardInterrupt, MemoryError)
def __init__(self, fetcher):
self.fetcher = fetcher
def fetch(self, *args, **kwargs):
try:
return self.fetcher.fetch(*args, **kwargs)
except self.uncaught_exceptions:
raise
except:
exc_cls, exc_inst = sys.exc_info()[:2]
if exc_inst is None:
# string exceptions
exc_inst = exc_cls
raise HTTPFetchingError(why=exc_inst)
class Urllib2Fetcher(HTTPFetcher):
"""An C{L{HTTPFetcher}} that uses urllib2.
"""
# Parameterized for the benefit of testing frameworks, see
# http://trac.openidenabled.com/trac/ticket/85
urlopen = staticmethod(urllib2.urlopen)
def fetch(self, url, body=None, headers=None):
if not _allowedURL(url):
raise ValueError('Bad URL scheme: %r' % (url,))
if headers is None:
headers = {}
headers.setdefault(
'User-Agent',
"%s Python-urllib/%s" % (USER_AGENT, urllib2.__version__,))
req = urllib2.Request(url, data=body, headers=headers)
try:
f = self.urlopen(req)
try:
return self._makeResponse(f)
finally:
f.close()
except urllib2.HTTPError, why:
try:
return self._makeResponse(why)
finally:
why.close()
def _makeResponse(self, urllib2_response):
resp = HTTPResponse()
resp.body = urllib2_response.read()
resp.final_url = urllib2_response.geturl()
resp.headers = dict(urllib2_response.info().items())
if hasattr(urllib2_response, 'code'):
resp.status = urllib2_response.code
else:
resp.status = 200
return resp
class HTTPError(HTTPFetchingError):
"""
This exception is raised by the C{L{CurlHTTPFetcher}} when it
encounters an exceptional situation fetching a URL.
"""
pass
# XXX: define what we mean by paranoid, and make sure it is.
class CurlHTTPFetcher(HTTPFetcher):
"""
An C{L{HTTPFetcher}} that uses pycurl for fetching.
See U{http://pycurl.sourceforge.net/}.
"""
ALLOWED_TIME = 20 # seconds
def __init__(self):
HTTPFetcher.__init__(self)
if pycurl is None:
raise RuntimeError('Cannot find pycurl library')
def _parseHeaders(self, header_file):
header_file.seek(0)
# Remove the status line from the beginning of the input
unused_http_status_line = header_file.readline()
lines = [line.strip() for line in header_file]
# and the blank line from the end
empty_line = lines.pop()
if empty_line:
raise HTTPError("No blank line at end of headers: %r" % (line,))
headers = {}
for line in lines:
try:
name, value = line.split(':', 1)
except ValueError:
raise HTTPError(
"Malformed HTTP header line in response: %r" % (line,))
value = value.strip()
# HTTP headers are case-insensitive
name = name.lower()
headers[name] = value
return headers
def _checkURL(self, url):
# XXX: document that this can be overridden to match desired policy
# XXX: make sure url is well-formed and routeable
return _allowedURL(url)
def fetch(self, url, body=None, headers=None):
stop = int(time.time()) + self.ALLOWED_TIME
off = self.ALLOWED_TIME
if headers is None:
headers = {}
headers.setdefault('User-Agent',
"%s %s" % (USER_AGENT, pycurl.version,))
header_list = []
if headers is not None:
for header_name, header_value in headers.iteritems():
header_list.append('%s: %s' % (header_name, header_value))
c = pycurl.Curl()
try:
c.setopt(pycurl.NOSIGNAL, 1)
if header_list:
c.setopt(pycurl.HTTPHEADER, header_list)
# Presence of a body indicates that we should do a POST
if body is not None:
c.setopt(pycurl.POST, 1)
c.setopt(pycurl.POSTFIELDS, body)
while off > 0:
if not self._checkURL(url):
raise HTTPError("Fetching URL not allowed: %r" % (url,))
data = cStringIO.StringIO()
response_header_data = cStringIO.StringIO()
c.setopt(pycurl.WRITEFUNCTION, data.write)
c.setopt(pycurl.HEADERFUNCTION, response_header_data.write)
c.setopt(pycurl.TIMEOUT, off)
c.setopt(pycurl.URL, openid.urinorm.urinorm(url))
c.perform()
response_headers = self._parseHeaders(response_header_data)
code = c.getinfo(pycurl.RESPONSE_CODE)
if code in [301, 302, 303, 307]:
url = response_headers.get('location')
if url is None:
raise HTTPError(
'Redirect (%s) returned without a location' % code)
# Redirects are always GETs
c.setopt(pycurl.POST, 0)
# There is no way to reset POSTFIELDS to empty and
# reuse the connection, but we only use it once.
else:
resp = HTTPResponse()
resp.headers = response_headers
resp.status = code
resp.final_url = url
resp.body = data.getvalue()
return resp
off = stop - int(time.time())
raise HTTPError("Timed out fetching: %r" % (url,))
finally:
c.close()
class HTTPLib2Fetcher(HTTPFetcher):
"""A fetcher that uses C{httplib2} for performing HTTP
requests. This implementation supports HTTP caching.
@see: http://bitworking.org/projects/httplib2/
"""
def __init__(self, cache=None):
"""@param cache: An object suitable for use as an C{httplib2}
cache. If a string is passed, it is assumed to be a
directory name.
"""
if httplib2 is None:
raise RuntimeError('Cannot find httplib2 library. '
'See http://bitworking.org/projects/httplib2/')
super(HTTPLib2Fetcher, self).__init__()
# An instance of the httplib2 object that performs HTTP requests
self.httplib2 = httplib2.Http(cache)
# We want httplib2 to raise exceptions for errors, just like
# the other fetchers.
self.httplib2.force_exception_to_status_code = False
def fetch(self, url, body=None, headers=None):
"""Perform an HTTP request
@raises Exception: Any exception that can be raised by httplib2
@see: C{L{HTTPFetcher.fetch}}
"""
if body:
method = 'POST'
else:
method = 'GET'
# httplib2 doesn't check to make sure that the URL's scheme is
# 'http' so we do it here.
if not (url.startswith('http://') or url.startswith('https://')):
raise ValueError('URL is not a HTTP URL: %r' % (url,))
httplib2_response, content = self.httplib2.request(
url, method, body=body, headers=headers)
# Translate the httplib2 response to our HTTP response abstraction
# When a 400 is returned, there is no "content-location"
# header set. This seems like a bug to me. I can't think of a
# case where we really care about the final URL when it is an
# error response, but being careful about it can't hurt.
try:
final_url = httplib2_response['content-location']
except KeyError:
# We're assuming that no redirects occurred
assert not httplib2_response.previous
# And this should never happen for a successful response
assert httplib2_response.status != 200
final_url = url
return HTTPResponse(
body=content,
final_url=final_url,
headers=dict(httplib2_response.items()),
status=httplib2_response.status,
)
|
apache-2.0
|
ChristopherHogan/pip
|
pip/_vendor/requests/packages/chardet/sbcharsetprober.py
|
2927
|
4793
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .charsetprober import CharSetProber
from .compat import wrap_ord
SAMPLE_SIZE = 64
SB_ENOUGH_REL_THRESHOLD = 1024
POSITIVE_SHORTCUT_THRESHOLD = 0.95
NEGATIVE_SHORTCUT_THRESHOLD = 0.05
SYMBOL_CAT_ORDER = 250
NUMBER_OF_SEQ_CAT = 4
POSITIVE_CAT = NUMBER_OF_SEQ_CAT - 1
#NEGATIVE_CAT = 0
class SingleByteCharSetProber(CharSetProber):
def __init__(self, model, reversed=False, nameProber=None):
CharSetProber.__init__(self)
self._mModel = model
# TRUE if we need to reverse every pair in the model lookup
self._mReversed = reversed
# Optional auxiliary prober for name decision
self._mNameProber = nameProber
self.reset()
def reset(self):
CharSetProber.reset(self)
# char order of last character
self._mLastOrder = 255
self._mSeqCounters = [0] * NUMBER_OF_SEQ_CAT
self._mTotalSeqs = 0
self._mTotalChar = 0
# characters that fall in our sampling range
self._mFreqChar = 0
def get_charset_name(self):
if self._mNameProber:
return self._mNameProber.get_charset_name()
else:
return self._mModel['charsetName']
def feed(self, aBuf):
if not self._mModel['keepEnglishLetter']:
aBuf = self.filter_without_english_letters(aBuf)
aLen = len(aBuf)
if not aLen:
return self.get_state()
for c in aBuf:
order = self._mModel['charToOrderMap'][wrap_ord(c)]
if order < SYMBOL_CAT_ORDER:
self._mTotalChar += 1
if order < SAMPLE_SIZE:
self._mFreqChar += 1
if self._mLastOrder < SAMPLE_SIZE:
self._mTotalSeqs += 1
if not self._mReversed:
i = (self._mLastOrder * SAMPLE_SIZE) + order
model = self._mModel['precedenceMatrix'][i]
else: # reverse the order of the letters in the lookup
i = (order * SAMPLE_SIZE) + self._mLastOrder
model = self._mModel['precedenceMatrix'][i]
self._mSeqCounters[model] += 1
self._mLastOrder = order
if self.get_state() == constants.eDetecting:
if self._mTotalSeqs > SB_ENOUGH_REL_THRESHOLD:
cf = self.get_confidence()
if cf > POSITIVE_SHORTCUT_THRESHOLD:
if constants._debug:
sys.stderr.write('%s confidence = %s, we have a'
'winner\n' %
(self._mModel['charsetName'], cf))
self._mState = constants.eFoundIt
elif cf < NEGATIVE_SHORTCUT_THRESHOLD:
if constants._debug:
sys.stderr.write('%s confidence = %s, below negative'
'shortcut threshhold %s\n' %
(self._mModel['charsetName'], cf,
NEGATIVE_SHORTCUT_THRESHOLD))
self._mState = constants.eNotMe
return self.get_state()
def get_confidence(self):
r = 0.01
if self._mTotalSeqs > 0:
r = ((1.0 * self._mSeqCounters[POSITIVE_CAT]) / self._mTotalSeqs
/ self._mModel['mTypicalPositiveRatio'])
r = r * self._mFreqChar / self._mTotalChar
if r >= 1.0:
r = 0.99
return r
|
mit
|
alfonsodev/ansible-modules-extras
|
system/puppet.py
|
51
|
6731
|
#!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
import json
import os
import pipes
import stat
DOCUMENTATION = '''
---
module: puppet
short_description: Runs puppet
description:
- Runs I(puppet) agent or apply in a reliable manner
version_added: "2.0"
options:
timeout:
description:
- How long to wait for I(puppet) to finish.
required: false
default: 30m
puppetmaster:
description:
- The hostname of the puppetmaster to contact.
required: false
default: None
manifest:
desciption:
- Path to the manifest file to run puppet apply on.
required: false
default: None
show_diff:
description:
- Should puppet return diffs of changes applied. Defaults to off to avoid leaking secret changes by default.
required: false
default: no
choices: [ "yes", "no" ]
facts:
description:
- A dict of values to pass in as persistent external facter facts
required: false
default: None
facter_basename:
description:
- Basename of the facter output file
required: false
default: ansible
environment:
description:
- Puppet environment to be used.
required: false
default: None
requirements: [ puppet ]
author: "Monty Taylor (@emonty)"
'''
EXAMPLES = '''
# Run puppet agent and fail if anything goes wrong
- puppet
# Run puppet and timeout in 5 minutes
- puppet: timeout=5m
# Run puppet using a different environment
- puppet: environment=testing
'''
def _get_facter_dir():
if os.getuid() == 0:
return '/etc/facter/facts.d'
else:
return os.path.expanduser('~/.facter/facts.d')
def _write_structured_data(basedir, basename, data):
if not os.path.exists(basedir):
os.makedirs(basedir)
file_path = os.path.join(basedir, "{0}.json".format(basename))
# This is more complex than you might normally expect because we want to
# open the file with only u+rw set. Also, we use the stat constants
# because ansible still supports python 2.4 and the octal syntax changed
out_file = os.fdopen(
os.open(
file_path, os.O_CREAT | os.O_WRONLY,
stat.S_IRUSR | stat.S_IWUSR), 'w')
out_file.write(json.dumps(data).encode('utf8'))
out_file.close()
def main():
module = AnsibleModule(
argument_spec=dict(
timeout=dict(default="30m"),
puppetmaster=dict(required=False, default=None),
manifest=dict(required=False, default=None),
show_diff=dict(
default=False, aliases=['show-diff'], type='bool'),
facts=dict(default=None),
facter_basename=dict(default='ansible'),
environment=dict(required=False, default=None),
),
supports_check_mode=True,
mutually_exclusive=[
('puppetmaster', 'manifest'),
],
)
p = module.params
global PUPPET_CMD
PUPPET_CMD = module.get_bin_path("puppet", False)
if not PUPPET_CMD:
module.fail_json(
msg="Could not find puppet. Please ensure it is installed.")
if p['manifest']:
if not os.path.exists(p['manifest']):
module.fail_json(
msg="Manifest file %(manifest)s not found." % dict(
manifest=p['manifest']))
# Check if puppet is disabled here
if not p['manifest']:
rc, stdout, stderr = module.run_command(
PUPPET_CMD + " config print agent_disabled_lockfile")
if os.path.exists(stdout.strip()):
module.fail_json(
msg="Puppet agent is administratively disabled.", disabled=True)
elif rc != 0:
module.fail_json(
msg="Puppet agent state could not be determined.")
if module.params['facts'] and not module.check_mode:
_write_structured_data(
_get_facter_dir(),
module.params['facter_basename'],
module.params['facts'])
base_cmd = "timeout -s 9 %(timeout)s %(puppet_cmd)s" % dict(
timeout=pipes.quote(p['timeout']), puppet_cmd=PUPPET_CMD)
if not p['manifest']:
cmd = ("%(base_cmd)s agent --onetime"
" --ignorecache --no-daemonize --no-usecacheonfailure --no-splay"
" --detailed-exitcodes --verbose") % dict(
base_cmd=base_cmd,
)
if p['puppetmaster']:
cmd += " --server %s" % pipes.quote(p['puppetmaster'])
if p['show_diff']:
cmd += " --show_diff"
if p['environment']:
cmd += " --environment '%s'" % p['environment']
if module.check_mode:
cmd += " --noop"
else:
cmd += " --no-noop"
else:
cmd = "%s apply --detailed-exitcodes " % base_cmd
if p['environment']:
cmd += "--environment '%s' " % p['environment']
if module.check_mode:
cmd += "--noop "
else:
cmd += "--no-noop "
cmd += pipes.quote(p['manifest'])
rc, stdout, stderr = module.run_command(cmd)
if rc == 0:
# success
module.exit_json(rc=rc, changed=False, stdout=stdout)
elif rc == 1:
# rc==1 could be because it's disabled
# rc==1 could also mean there was a compilation failure
disabled = "administratively disabled" in stdout
if disabled:
msg = "puppet is disabled"
else:
msg = "puppet did not run"
module.exit_json(
rc=rc, disabled=disabled, msg=msg,
error=True, stdout=stdout, stderr=stderr)
elif rc == 2:
# success with changes
module.exit_json(rc=0, changed=True)
elif rc == 124:
# timeout
module.exit_json(
rc=rc, msg="%s timed out" % cmd, stdout=stdout, stderr=stderr)
else:
# failure
module.fail_json(
rc=rc, msg="%s failed with return code: %d" % (cmd, rc),
stdout=stdout, stderr=stderr)
# import module snippets
from ansible.module_utils.basic import *
main()
|
gpl-3.0
|
runeksvendsen/btcdice
|
privaddr.py
|
1
|
1333
|
import hashlib
import Crypto.Hash.SHA256 as sha256
import binascii
from bitcoin import key as ecdsa
from bitcoin import base58
from baseconv import dice_to_10
def dsha256(s):
return sha256.new(sha256.new(s).digest()).digest()
def rhash(s):
h1 = hashlib.new('ripemd160')
h1.update(hashlib.sha256(s).digest())
return h1.digest()
def base58_check_encode(s, version=b'\x00'):
vs = version + s
check = dsha256(vs)[:4]
return base58.encode(vs + check)
def get_addr(k):
pubkey = k.get_pubkey()
secret = k.prikey
hash160 = rhash(pubkey)
addr = base58_check_encode(hash160)
payload = secret
pkey = base58_check_encode(payload, b'\x80')
return addr, pkey
def num_to_secret(num):
return binascii.unhexlify(hex(num).lstrip('0x').rstrip('L')[0:64].ljust(64, '0'))
def throws_to_keyaddr(throws, faces):
num = dice_to_10(throws, faces)
secret = num_to_secret(num)
key = ecdsa.CKey()
key.generate(secret)
addr,pkey = get_addr(key)
return (addr,pkey)
if __name__ == "__main__":
#tests
from testvectors import *
assert throws_to_keyaddr(*base6_test['throws']) == (base6_test['address'], base6_test['privkey'])
assert throws_to_keyaddr(*base10_test['throws']) == (base10_test['address'], base10_test['privkey'])
assert throws_to_keyaddr(*base20_test['throws']) == (base20_test['address'], base20_test['privkey'])
|
mit
|
fuzzysteve/yamlloader
|
tableloader/tableFunctions/dogmaEffects.py
|
1
|
3091
|
# -*- coding: utf-8 -*-
import sys
import os
reload(sys)
sys.setdefaultencoding("utf-8")
from sqlalchemy import Table
from yaml import load,dump
try:
from yaml import CSafeLoader as SafeLoader
print "Using CSafeLoader"
except ImportError:
from yaml import SafeLoader
print "Using Python SafeLoader"
distribution={'twosome':1,'bubble':2}
effectcategory={}
def importyaml(connection,metadata,sourcePath,language='en'):
print "Importing dogma effects"
dgmEffects = Table('dgmEffects',metadata)
print "opening Yaml"
trans = connection.begin()
with open(os.path.join(sourcePath,'fsd','dogmaEffects.yaml'),'r') as yamlstream:
print "importing"
dogmaEffects=load(yamlstream,Loader=SafeLoader)
print "Yaml Processed into memory"
for dogmaEffectsid in dogmaEffects:
effect=dogmaEffects[dogmaEffectsid]
connection.execute(dgmEffects.insert(),
effectID=dogmaEffectsid,
effectName=effect.get('effectName'),
effectCategory=effectcategory.get(effect['effectCategory']),
description=effect.get('descriptionID',{}).get(language),
guid=effect.get('guid'),
iconID=effect.get('iconID'),
isOffensive=effect['isOffensive'],
isAssistance=effect['isAssistance'],
durationAttributeID=effect.get('durationAttributeID'),
trackingSpeedAttributeID=effect.get('trackingSpeedAttributeID'),
dischargeAttributeID=effect.get('dischargeAttributeID'),
rangeAttributeID=effect.get('rangeAttributeID'),
falloffAttributeID=effect.get('falloffAttributeID'),
disallowAutoRepeat=effect.get('disallowAutoRepeat'),
published=effect.get('published'),
displayName=effect.get('displayNameID',{}).get(language),
isWarpSafe=effect.get('isWarpSafe'),
rangeChance=effect.get('rangeChance'),
electronicChance=effect.get('electronicChance'),
propulsionChance=effect.get('propulsionChance'),
distribution=distribution.get(effect.get('distribution')),
sfxName=effect.get('sfxName'),
npcUsageChanceAttributeID=effect.get('npcUsageChanceAttributeID'),
npcActivationChanceAttributeID=effect.get('npcActivationChanceAttributeID'),
fittingUsageChanceAttributeID=effect.get('fittingUsageChanceAttributeID'),
modifierInfo=dump(effect.get('modifierInfo'))
)
trans.commit()
|
mit
|
maxteufel/weechat
|
doc/docgen.py
|
1
|
32544
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2008-2018 Sébastien Helleu <flashcode@flashtux.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Documentation generator for WeeChat: build include files with commands,
options, infos, infolists, hdata and completions for WeeChat core and
plugins.
Instructions to build config files yourself in WeeChat directories (replace
all paths with your path to WeeChat):
1. run WeeChat and load this script, with following command:
/python load ~/src/weechat/doc/docgen.py
2. change path to build in your doc/ directory:
/set plugins.var.python.docgen.path "~/src/weechat/doc"
3. run docgen command:
/docgen
Note: it is recommended to load only this script when building doc.
Files should be in ~/src/weechat/doc/xx/autogen/ (where xx is language).
"""
from __future__ import print_function
SCRIPT_NAME = 'docgen'
SCRIPT_AUTHOR = 'Sébastien Helleu <flashcode@flashtux.org>'
SCRIPT_VERSION = '0.1'
SCRIPT_LICENSE = 'GPL3'
SCRIPT_DESC = 'Documentation generator for WeeChat'
SCRIPT_COMMAND = 'docgen'
IMPORT_OK = True
# pylint: disable=wrong-import-position
try:
import gettext
import hashlib
import os
import re
from collections import defaultdict
from operator import itemgetter
except ImportError as message:
print('Missing package(s) for {0}: {1}'.format(SCRIPT_NAME, message))
IMPORT_OK = False
try:
import weechat # pylint: disable=import-error
except ImportError:
print('This script must be run under WeeChat.')
print('Get WeeChat now at: https://weechat.org/')
IMPORT_OK = False
# default path where doc files will be written (should be doc/ in sources
# package tree)
# path must have subdirectories with languages and autogen directory:
# path
# |-- en
# | |-- autogen
# |-- fr
# | |-- autogen
# ...
DEFAULT_PATH = '~/src/weechat/doc'
# list of locales for which we want to build doc files to include
LOCALE_LIST = ('en_US', 'fr_FR', 'it_IT', 'de_DE', 'ja_JP', 'pl_PL')
# all commands/options/.. of following plugins will produce a file
# non-listed plugins will be ignored
# value: "c" = plugin may have many commands
# "o" = write config options for plugin
# if plugin is listed without "c", that means plugin has only one command
# /name (where "name" is name of plugin)
# Note: we consider core is a plugin called "weechat"
PLUGIN_LIST = {
'sec': 'o',
'weechat': 'co',
'alias': '',
'aspell': 'o',
'buflist': 'co',
'charset': 'o',
'exec': 'o',
'fifo': 'o',
'fset': 'o',
'irc': 'co',
'logger': 'o',
'relay': 'o',
'script': 'o',
'perl': 'o',
'python': 'o',
'ruby': 'o',
'lua': 'o',
'tcl': 'o',
'guile': 'o',
'javascript': 'o',
'php': 'o',
'trigger': 'o',
'xfer': 'co',
}
# options to ignore
IGNORE_OPTIONS = (
r'aspell\.dict\..*',
r'aspell\.option\..*',
r'charset\.decode\..*',
r'charset\.encode\..*',
r'irc\.msgbuffer\..*',
r'irc\.ctcp\..*',
r'irc\.ignore\..*',
r'irc\.server\..*',
r'jabber\.server\..*',
r'logger\.level\..*',
r'logger\.mask\..*',
r'relay\.port\..*',
r'trigger\.trigger\..*',
r'weechat\.palette\..*',
r'weechat\.proxy\..*',
r'weechat\.bar\..*',
r'weechat\.debug\..*',
r'weechat\.notify\..*',
)
# completions to ignore
IGNORE_COMPLETIONS_ITEMS = (
'docgen.*',
'jabber.*',
'weeget.*',
)
class AutogenDoc(object):
"""A class to write auto-generated doc files."""
def __init__(self, directory, doc, name):
"""Initialize auto-generated doc file."""
self.filename = os.path.join(directory, doc, name + '.adoc')
self.filename_tmp = self.filename + '.tmp'
self._file = open(self.filename_tmp, 'w')
self.write('//\n')
self.write('// This file is auto-generated by script docgen.py.\n')
self.write('// DO NOT EDIT BY HAND!\n')
self.write('//\n')
def write(self, string):
"""Write a line in auto-generated doc file."""
self._file.write(string)
def update(self, obj_name, num_files, num_files_updated):
"""Update doc file if needed (if content has changed)."""
# close temp file
self._file.close()
# compute checksum on old file
try:
with open(self.filename, 'r') as _file:
shaold = hashlib.sha256(_file.read()).hexdigest()
except IOError:
shaold = ''
# compute checksum on new (temp) file
try:
with open(self.filename_tmp, 'r') as _file:
shanew = hashlib.sha256(_file.read()).hexdigest()
except IOError:
shanew = ''
# compare checksums
if shaold != shanew:
# update doc file
if os.path.exists(self.filename):
os.unlink(self.filename)
os.rename(self.filename_tmp, self.filename)
num_files_updated['total1'] += 1
num_files_updated['total2'] += 1
num_files_updated[obj_name] += 1
else:
os.unlink(self.filename_tmp)
# update counters
num_files['total1'] += 1
num_files['total2'] += 1
num_files[obj_name] += 1
def get_commands():
"""
Get list of WeeChat/plugins commands as dictionary with 3 indexes: plugin,
command, xxx.
"""
commands = defaultdict(lambda: defaultdict(defaultdict))
infolist = weechat.infolist_get('hook', '', 'command')
while weechat.infolist_next(infolist):
plugin = weechat.infolist_string(infolist, 'plugin_name') or 'weechat'
if plugin in PLUGIN_LIST:
command = weechat.infolist_string(infolist, 'command')
if command == plugin or 'c' in PLUGIN_LIST[plugin]:
for key in ('description', 'args', 'args_description',
'completion'):
commands[plugin][command][key] = \
weechat.infolist_string(infolist, key)
weechat.infolist_free(infolist)
return commands
def get_options():
"""
Get list of WeeChat/plugins config options as dictionary with 4 indexes:
config, section, option, xxx.
"""
options = \
defaultdict(lambda: defaultdict(lambda: defaultdict(defaultdict)))
infolist = weechat.infolist_get('option', '', '')
while weechat.infolist_next(infolist):
full_name = weechat.infolist_string(infolist, 'full_name')
if not re.search('|'.join(IGNORE_OPTIONS), full_name):
config = weechat.infolist_string(infolist, 'config_name')
if config in PLUGIN_LIST and 'o' in PLUGIN_LIST[config]:
section = weechat.infolist_string(infolist, 'section_name')
option = weechat.infolist_string(infolist, 'option_name')
for key in ('type', 'string_values', 'default_value',
'description'):
options[config][section][option][key] = \
weechat.infolist_string(infolist, key)
for key in ('min', 'max', 'null_value_allowed'):
options[config][section][option][key] = \
weechat.infolist_integer(infolist, key)
weechat.infolist_free(infolist)
return options
def get_infos():
"""
Get list of WeeChat/plugins infos as dictionary with 3 indexes: plugin,
name, xxx.
"""
infos = defaultdict(lambda: defaultdict(defaultdict))
infolist = weechat.infolist_get('hook', '', 'info')
while weechat.infolist_next(infolist):
info_name = weechat.infolist_string(infolist, 'info_name')
plugin = weechat.infolist_string(infolist, 'plugin_name') or 'weechat'
for key in ('description', 'args_description'):
infos[plugin][info_name][key] = \
weechat.infolist_string(infolist, key)
weechat.infolist_free(infolist)
return infos
def get_infos_hashtable():
"""
Get list of WeeChat/plugins infos (hashtable) as dictionary with 3 indexes:
plugin, name, xxx.
"""
infos_hashtable = defaultdict(lambda: defaultdict(defaultdict))
infolist = weechat.infolist_get('hook', '', 'info_hashtable')
while weechat.infolist_next(infolist):
info_name = weechat.infolist_string(infolist, 'info_name')
plugin = weechat.infolist_string(infolist, 'plugin_name') or 'weechat'
for key in ('description', 'args_description', 'output_description'):
infos_hashtable[plugin][info_name][key] = \
weechat.infolist_string(infolist, key)
weechat.infolist_free(infolist)
return infos_hashtable
def get_infolists():
"""
Get list of WeeChat/plugins infolists as dictionary with 3 indexes: plugin,
name, xxx.
"""
infolists = defaultdict(lambda: defaultdict(defaultdict))
infolist = weechat.infolist_get('hook', '', 'infolist')
while weechat.infolist_next(infolist):
infolist_name = weechat.infolist_string(infolist, 'infolist_name')
plugin = weechat.infolist_string(infolist, 'plugin_name') or 'weechat'
for key in ('description', 'pointer_description', 'args_description'):
infolists[plugin][infolist_name][key] = \
weechat.infolist_string(infolist, key)
weechat.infolist_free(infolist)
return infolists
# pylint: disable=too-many-locals
def get_hdata():
"""
Get list of WeeChat/plugins hdata as dictionary with 3 indexes: plugin,
name, xxx.
"""
hdata = defaultdict(lambda: defaultdict(defaultdict))
infolist = weechat.infolist_get('hook', '', 'hdata')
while weechat.infolist_next(infolist):
hdata_name = weechat.infolist_string(infolist, 'hdata_name')
plugin = weechat.infolist_string(infolist, 'plugin_name') or 'weechat'
hdata[plugin][hdata_name]['description'] = \
weechat.infolist_string(infolist, 'description')
variables = ''
variables_update = ''
lists = ''
ptr_hdata = weechat.hdata_get(hdata_name)
if ptr_hdata:
hdata2 = []
string = weechat.hdata_get_string(ptr_hdata, 'var_keys_values')
if string:
for item in string.split(','):
key = item.split(':')[0]
var_offset = weechat.hdata_get_var_offset(ptr_hdata, key)
var_array_size = \
weechat.hdata_get_var_array_size_string(ptr_hdata, '',
key)
if var_array_size:
var_array_size = \
', array_size: "{0}"'.format(var_array_size)
var_hdata = weechat.hdata_get_var_hdata(ptr_hdata, key)
if var_hdata:
var_hdata = ', hdata: "{0}"'.format(var_hdata)
type_string = weechat.hdata_get_var_type_string(ptr_hdata,
key)
hdata2.append({
'offset': var_offset,
'text': '_{0}_ ({1})'.format(key, type_string),
'textlong': '_{0}_ ({1}{2}{3})'.format(
key, type_string, var_array_size, var_hdata),
'update': weechat.hdata_update(
ptr_hdata, '', {'__update_allowed': key}),
})
hdata2 = sorted(hdata2, key=itemgetter('offset'))
for item in hdata2:
variables += '{0} +\n'.format(item['textlong'])
if item['update']:
variables_update += ' {0} +\n'.format(item['text'])
if weechat.hdata_update(ptr_hdata, '',
{'__create_allowed': ''}):
variables_update += ' _{hdata_update_create}_ +\n'
if weechat.hdata_update(ptr_hdata, '',
{'__delete_allowed': ''}):
variables_update += ' _{hdata_update_delete}_ +\n'
hdata[plugin][hdata_name]['vars'] = variables
hdata[plugin][hdata_name]['vars_update'] = variables_update
string = weechat.hdata_get_string(ptr_hdata, 'list_keys')
if string:
list_lists = string.split(',')
lists_std = [l for l in list_lists
if not l.startswith('last_')]
lists_last = [l for l in list_lists
if l.startswith('last_')]
for item in sorted(lists_std) + sorted(lists_last):
lists += '_{0}_ +\n'.format(item)
hdata[plugin][hdata_name]['lists'] = lists
weechat.infolist_free(infolist)
return hdata
def get_completions():
"""
Get list of WeeChat/plugins completions as dictionary with 3 indexes:
plugin, item, xxx.
"""
completions = defaultdict(lambda: defaultdict(defaultdict))
infolist = weechat.infolist_get('hook', '', 'completion')
while weechat.infolist_next(infolist):
completion_item = weechat.infolist_string(infolist, 'completion_item')
if not re.search('|'.join(IGNORE_COMPLETIONS_ITEMS), completion_item):
plugin = weechat.infolist_string(infolist, 'plugin_name') or \
'weechat'
completions[plugin][completion_item]['description'] = \
weechat.infolist_string(infolist, 'description')
weechat.infolist_free(infolist)
return completions
def get_url_options():
"""
Get list of URL options as list of dictionaries.
"""
url_options = []
infolist = weechat.infolist_get('url_options', '', '')
while weechat.infolist_next(infolist):
url_options.append({
'name': weechat.infolist_string(infolist, 'name').lower(),
'option': weechat.infolist_integer(infolist, 'option'),
'type': weechat.infolist_string(infolist, 'type'),
'constants': weechat.infolist_string(
infolist, 'constants').lower().replace(',', ', ')
})
weechat.infolist_free(infolist)
return url_options
def get_default_aliases():
"""
Get list of default aliases as list of dictionaries.
"""
default_aliases = []
infolist = weechat.infolist_get('alias_default', '', '')
while weechat.infolist_next(infolist):
default_aliases.append({
'name': '/' + weechat.infolist_string(infolist, 'name'),
'command': '/' + weechat.infolist_string(infolist, 'command'),
'completion': weechat.infolist_string(infolist, 'completion'),
})
weechat.infolist_free(infolist)
return default_aliases
def get_irc_colors():
"""
Get list of IRC colors as list of dictionaries.
"""
irc_colors = []
infolist = weechat.infolist_get('irc_color_weechat', '', '')
while weechat.infolist_next(infolist):
irc_colors.append({
'color_irc': weechat.infolist_string(infolist, 'color_irc'),
'color_weechat': weechat.infolist_string(infolist,
'color_weechat'),
})
weechat.infolist_free(infolist)
return irc_colors
def get_plugins_priority():
"""
Get priority of default WeeChat plugins as a dictionary.
"""
plugins_priority = {}
infolist = weechat.infolist_get('plugin', '', '')
while weechat.infolist_next(infolist):
name = weechat.infolist_string(infolist, 'name')
priority = weechat.infolist_integer(infolist, 'priority')
if priority in plugins_priority:
plugins_priority[priority].append(name)
else:
plugins_priority[priority] = [name]
weechat.infolist_free(infolist)
return plugins_priority
# pylint: disable=too-many-locals, too-many-branches, too-many-statements
# pylint: disable=too-many-nested-blocks
def docgen_cmd_cb(data, buf, args):
"""Callback for /docgen command."""
if args:
locales = args.split(' ')
else:
locales = LOCALE_LIST
commands = get_commands()
options = get_options()
infos = get_infos()
infos_hashtable = get_infos_hashtable()
infolists = get_infolists()
hdata = get_hdata()
completions = get_completions()
url_options = get_url_options()
default_aliases = get_default_aliases()
irc_colors = get_irc_colors()
plugins_priority = get_plugins_priority()
# get path and replace ~ by home if needed
path = weechat.config_get_plugin('path')
if path.startswith('~'):
path = os.environ['HOME'] + path[1:]
# write to doc files, by locale
num_files = defaultdict(int)
num_files_updated = defaultdict(int)
# pylint: disable=undefined-variable
translate = lambda s: (s and _(s)) or s
escape = lambda s: s.replace('|', '\\|')
for locale in locales:
for key in num_files:
if key != 'total2':
num_files[key] = 0
num_files_updated[key] = 0
trans = gettext.translation('weechat',
weechat.info_get('weechat_localedir', ''),
languages=[locale + '.UTF-8'],
fallback=True)
trans.install()
directory = path + '/' + locale[0:2] + '/autogen'
if not os.path.isdir(directory):
weechat.prnt('',
'{0}docgen error: directory "{1}" does not exist'
''.format(weechat.prefix('error'), directory))
continue
# write commands
for plugin in commands:
doc = AutogenDoc(directory, 'user', plugin + '_commands')
for i, command in enumerate(sorted(commands[plugin])):
if i > 0:
doc.write('\n')
_cmd = commands[plugin][command]
args = translate(_cmd['args'])
args_formats = args.split(' || ')
desc = translate(_cmd['description'])
args_desc = translate(_cmd['args_description'])
doc.write('[[command_{0}_{1}]]\n'.format(plugin, command))
doc.write('* `+{0}+`: {1}\n\n'.format(command, desc))
doc.write('----\n')
prefix = '/' + command + ' '
if args_formats != ['']:
for fmt in args_formats:
doc.write(prefix + fmt + '\n')
prefix = ' ' * len(prefix)
if args_desc:
doc.write('\n')
for line in args_desc.split('\n'):
doc.write(line + '\n')
doc.write('----\n')
doc.update('commands', num_files, num_files_updated)
# write config options
for config in options:
doc = AutogenDoc(directory, 'user', config + '_options')
i = 0
for section in sorted(options[config]):
for option in sorted(options[config][section]):
if i > 0:
doc.write('\n')
i += 1
_opt = options[config][section][option]
opt_type = _opt['type']
string_values = _opt['string_values']
default_value = _opt['default_value']
opt_min = _opt['min']
opt_max = _opt['max']
null_value_allowed = _opt['null_value_allowed']
desc = translate(_opt['description'])
type_nls = translate(opt_type)
values = ''
if opt_type == 'boolean':
values = 'on, off'
elif opt_type == 'integer':
if string_values:
values = string_values.replace('|', ', ')
else:
values = '{0} .. {1}'.format(opt_min, opt_max)
elif opt_type == 'string':
if opt_max <= 0:
values = _('any string')
elif opt_max == 1:
values = _('any char')
elif opt_max > 1:
values = '{0} ({1}: {2})'.format(_('any string'),
_('max chars'),
opt_max)
else:
values = _('any string')
default_value = '"{0}"'.format(
default_value.replace('"', '\\"'))
elif opt_type == 'color':
values = _('a WeeChat color name (default, black, '
'(dark)gray, white, (light)red, '
'(light)green, brown, yellow, (light)blue, '
'(light)magenta, (light)cyan), a terminal '
'color number or an alias; attributes are '
'allowed before color (for text color '
'only, not background): \"*\" for bold, '
'\"!\" for reverse, \"/\" for italic, '
'\"_\" for underline')
doc.write('* [[option_{0}.{1}.{2}]] *{3}.{4}.{5}*\n'
''.format(config, section, option, config,
section, option))
doc.write('** {0}: pass:none[{1}]\n'.format(
_('description'), desc.replace(']', '\\]')))
doc.write('** {0}: {1}\n'.format(_('type'), type_nls))
doc.write('** {0}: {1}\n'.format(_('values'), values))
doc.write('** {0}: `+{1}+`\n'
''.format(_('default value'), default_value))
if null_value_allowed:
doc.write('** {0}\n'.format(
_('undefined value allowed (null)')))
doc.update('options', num_files, num_files_updated)
# write default aliases
doc = AutogenDoc(directory, 'user', 'alias_default_aliases')
doc.write('[width="100%",cols="2m,5m,5",options="header"]\n')
doc.write('|===\n')
doc.write('| {0} | {1} | {2}\n\n'
''.format(_('Alias'), _('Command'), _('Completion')))
for alias in default_aliases:
doc.write('| {0} | {1} | {2}\n'
''.format(escape(alias['name']),
escape(alias['command']),
escape(alias['completion'] or '-')))
doc.write('|===\n')
doc.update('alias_default_aliases', num_files, num_files_updated)
# write IRC colors
doc = AutogenDoc(directory, 'user', 'irc_colors')
doc.write('[width="30%",cols="^2m,3",options="header"]\n')
doc.write('|===\n')
doc.write('| {0} | {1}\n\n'
''.format(_('IRC color'), _('WeeChat color')))
for color in irc_colors:
doc.write('| {0} | {1}\n'
''.format(escape(color['color_irc']),
escape(color['color_weechat'])))
doc.write('|===\n')
doc.update('irc_colors', num_files, num_files_updated)
# write infos hooked
doc = AutogenDoc(directory, 'plugin_api', 'infos')
doc.write('[width="100%",cols="^1,^2,6,6",options="header"]\n')
doc.write('|===\n')
doc.write('| {0} | {1} | {2} | {3}\n\n'
''.format(_('Plugin'), _('Name'), _('Description'),
_('Arguments')))
for plugin in sorted(infos):
for info in sorted(infos[plugin]):
_inf = infos[plugin][info]
desc = translate(_inf['description'])
args_desc = translate(_inf['args_description'] or '-')
doc.write('| {0} | {1} | {2} | {3}\n\n'
''.format(escape(plugin), escape(info),
escape(desc), escape(args_desc)))
doc.write('|===\n')
doc.update('infos', num_files, num_files_updated)
# write infos (hashtable) hooked
doc = AutogenDoc(directory, 'plugin_api', 'infos_hashtable')
doc.write('[width="100%",cols="^1,^2,6,6,8",options="header"]\n')
doc.write('|===\n')
doc.write('| {0} | {1} | {2} | {3} | {4}\n\n'
''.format(_('Plugin'), _('Name'), _('Description'),
_('Hashtable (input)'), _('Hashtable (output)')))
for plugin in sorted(infos_hashtable):
for info in sorted(infos_hashtable[plugin]):
_inh = infos_hashtable[plugin][info]
desc = translate(_inh['description'])
args_desc = translate(_inh['args_description'])
output_desc = translate(_inh['output_description']) or '-'
doc.write('| {0} | {1} | {2} | {3} | {4}\n\n'
''.format(escape(plugin), escape(info),
escape(desc), escape(args_desc),
escape(output_desc)))
doc.write('|===\n')
doc.update('infos_hashtable', num_files, num_files_updated)
# write infolists hooked
doc = AutogenDoc(directory, 'plugin_api', 'infolists')
doc.write('[width="100%",cols="^1,^2,5,5,5",options="header"]\n')
doc.write('|===\n')
doc.write('| {0} | {1} | {2} | {3} | {4}\n\n'
''.format(_('Plugin'), _('Name'), _('Description'),
_('Pointer'), _('Arguments')))
for plugin in sorted(infolists):
for infolist in sorted(infolists[plugin]):
_inl = infolists[plugin][infolist]
desc = translate(_inl['description'])
pointer_desc = translate(_inl['pointer_description']) or '-'
args_desc = translate(_inl['args_description']) or '-'
doc.write('| {0} | {1} | {2} | {3} | {4}\n\n'
''.format(escape(plugin), escape(infolist),
escape(desc), escape(pointer_desc),
escape(args_desc)))
doc.write('|===\n')
doc.update('infolists', num_files, num_files_updated)
# write hdata hooked
doc = AutogenDoc(directory, 'plugin_api', 'hdata')
doc.write(':hdata_update_create: __create\n')
doc.write(':hdata_update_delete: __delete\n')
doc.write('[width="100%",cols="^1,^2,2,2,5",options="header"]\n')
doc.write('|===\n')
doc.write('| {0} | {1} | {2} | {3} | {4}\n\n'
''.format(_('Plugin'), _('Name'), _('Description'),
_('Lists'), _('Variables')))
for plugin in sorted(hdata):
for hdata_name in sorted(hdata[plugin]):
_hda = hdata[plugin][hdata_name]
anchor = 'hdata_{0}'.format(hdata_name)
desc = translate(_hda['description'])
variables = _hda['vars']
variables_update = _hda['vars_update']
lists = _hda['lists']
doc.write('| {0}\n'.format(escape(plugin)))
doc.write('| [[{0}]]<<{0},{1}>>\n'
''.format(escape(anchor), escape(hdata_name)))
doc.write('| {0}\n'.format(escape(desc)))
doc.write('| {0}\n'.format(escape(lists) if lists else '-'))
doc.write('| {0}\n'.format(escape(variables)))
if variables_update:
doc.write('*{0}* +\n{1}'.format(
_('Update allowed:'),
escape(variables_update)))
doc.write('\n')
doc.write('|===\n')
doc.update('hdata', num_files, num_files_updated)
# write completions hooked
doc = AutogenDoc(directory, 'plugin_api', 'completions')
doc.write('[width="100%",cols="^1,^2,7",options="header"]\n')
doc.write('|===\n')
doc.write('| {0} | {1} | {2}\n\n'
''.format(_('Plugin'), _('Name'), _('Description')))
for plugin in sorted(completions):
for completion_item in sorted(completions[plugin]):
_cmp = completions[plugin][completion_item]
desc = translate(_cmp['description'])
doc.write('| {0} | {1} | {2}\n\n'
''.format(escape(plugin), escape(completion_item),
escape(desc)))
doc.write('|===\n')
doc.update('completions', num_files, num_files_updated)
# write url options
doc = AutogenDoc(directory, 'plugin_api', 'url_options')
doc.write('[width="100%",cols="2,^1,7",options="header"]\n')
doc.write('|===\n')
doc.write('| {0} | {1} | {2}\n\n'
''.format(_('Option'),
_('Type') + ' ^(1)^',
_('Constants') + ' ^(2)^'))
for option in url_options:
constants = option['constants']
if constants:
constants = ' ' + constants
doc.write('| {0} | {1} |{2}\n\n'
''.format(escape(option['name']),
escape(option['type']),
escape(constants)))
doc.write('|===\n')
doc.update('url_options', num_files, num_files_updated)
# write plugins priority
doc = AutogenDoc(directory, 'plugin_api', 'plugins_priority')
for priority in sorted(plugins_priority, reverse=True):
plugins = ', '.join(sorted(plugins_priority[priority]))
doc.write('. {0} ({1})\n'.format(escape(plugins), priority))
doc.update('plugins_priority', num_files, num_files_updated)
# write counters
weechat.prnt('',
'docgen: {0}: {1} files, {2} updated'
''.format(locale,
num_files['total1'],
num_files_updated['total1']))
weechat.prnt('',
'docgen: total: {0} files, {1} updated'
''.format(num_files['total2'], num_files_updated['total2']))
return weechat.WEECHAT_RC_OK
def docgen_completion_cb(data, completion_item, buf, completion):
"""Callback for completion."""
for locale in LOCALE_LIST:
weechat.hook_completion_list_add(completion, locale, 0,
weechat.WEECHAT_LIST_POS_SORT)
return weechat.WEECHAT_RC_OK
if __name__ == '__main__' and IMPORT_OK:
if weechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION,
SCRIPT_LICENSE, SCRIPT_DESC, '', ''):
weechat.hook_command(SCRIPT_COMMAND,
'Documentation generator.',
'[locales]',
'locales: list of locales to build (by default '
'build all locales)',
'%(docgen_locales)|%*',
'docgen_cmd_cb', '')
weechat.hook_completion('docgen_locales', 'locales for docgen',
'docgen_completion_cb', '')
if not weechat.config_is_set_plugin('path'):
weechat.config_set_plugin('path', DEFAULT_PATH)
|
gpl-3.0
|
ernado/infosessions
|
infosessions/migrations/0001_initial.py
|
1
|
1741
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='SessionInfo',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('creation_date', models.DateTimeField(auto_now_add=True, verbose_name='\u0414\u0430\u0442\u0430 \u0441\u043e\u0437\u0434\u0430\u043d\u0438\u044f')),
('key', models.CharField(max_length=32, verbose_name='\u041a\u043b\u044e\u0447', db_index=True)),
('prefix', models.CharField(default='default', max_length=16, db_index=True)),
('active', models.BooleanField(default=True)),
('user_ip', models.IPAddressField(default='127.0.0.1', db_index=True)),
('user_agent', models.TextField(default=None, null=True, blank=True)),
('user_agent_md5', models.CharField(default=None, max_length=32, null=True, blank=True)),
('user', models.ForeignKey(related_name='sessions', verbose_name='\u041f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u044c', to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'verbose_name': 'Session',
'verbose_name_plural': '\u0421\u0435\u0441\u0441\u0438\u0438',
},
),
migrations.AlterUniqueTogether(
name='sessioninfo',
unique_together=set([('prefix', 'key')]),
),
]
|
bsd-3-clause
|
vrv/tensorflow
|
tensorflow/python/kernel_tests/random_shuffle_queue_test.py
|
65
|
50664
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.data_flow_ops.Queue."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import re
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
class RandomShuffleQueueTest(test.TestCase):
def setUp(self):
# Useful for debugging when a test times out.
super(RandomShuffleQueueTest, self).setUp()
tf_logging.error("Starting: %s", self._testMethodName)
def tearDown(self):
super(RandomShuffleQueueTest, self).tearDown()
tf_logging.error("Finished: %s", self._testMethodName)
def testEnqueue(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
self.assertAllEqual(0, q.size().eval())
enqueue_op.run()
self.assertAllEqual(1, q.size().eval())
def testEnqueueWithShape(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=tensor_shape.TensorShape([3, 2]))
enqueue_correct_op = q.enqueue(([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],))
enqueue_correct_op.run()
self.assertAllEqual(1, q.size().eval())
with self.assertRaises(ValueError):
q.enqueue(([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],))
def testEnqueueManyWithShape(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(
10, 5, [dtypes_lib.int32, dtypes_lib.int32], shapes=[(), (2,)])
q.enqueue_many([[1, 2, 3, 4], [[1, 1], [2, 2], [3, 3], [4, 4]]]).run()
self.assertAllEqual(4, q.size().eval())
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, shapes=tensor_shape.TensorShape([3]))
q2.enqueue(([1, 2, 3],))
q2.enqueue_many(([[1, 2, 3]],))
def testScalarShapes(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, [dtypes_lib.int32, dtypes_lib.int32], shapes=[(), (1,)])
q.enqueue_many([[1, 2, 3, 4], [[5], [6], [7], [8]]]).run()
q.enqueue([9, [10]]).run()
dequeue_t = q.dequeue()
results = []
for _ in range(2):
a, b = sess.run(dequeue_t)
results.append((a, b))
a, b = sess.run(q.dequeue_many(3))
for i in range(3):
results.append((a[i], b[i]))
self.assertItemsEqual([(1, [5]), (2, [6]), (3, [7]), (4, [8]), (9, [10])],
results)
def testParallelEnqueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
sess.run(enqueue_op)
threads = [
self.checkedThread(
target=enqueue, args=(e,)) for e in enqueue_ops
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Dequeue every element using a single thread.
results = []
for _ in xrange(len(elems)):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testParallelDequeue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Enqueue every element using a single thread.
for enqueue_op in enqueue_ops:
enqueue_op.run()
# Run one consumer thread for each element in elems.
results = []
def dequeue():
results.append(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in enqueue_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, results)
def testDequeue(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
vals = [dequeued_t.eval() for _ in xrange(len(elems))]
self.assertItemsEqual(elems, vals)
def testEnqueueAndBlockingDequeue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(3, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
def enqueue():
# The enqueue_ops should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for enqueue_op in enqueue_ops:
sess.run(enqueue_op)
results = []
def dequeue():
for _ in xrange(len(elems)):
results.append(sess.run(dequeued_t))
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertItemsEqual(elems, results)
def testMultiEnqueueAndDequeue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.int32, dtypes_lib.float32))
elems = [(5, 10.0), (10, 20.0), (15, 30.0)]
enqueue_ops = [q.enqueue((x, y)) for x, y in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
results = []
for _ in xrange(len(elems)):
x, y = sess.run(dequeued_t)
results.append((x, y))
self.assertItemsEqual(elems, results)
def testQueueSizeEmpty(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32)
self.assertEqual(0, q.size().eval())
def testQueueSizeAfterEnqueueAndDequeue(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue()
size = q.size()
self.assertEqual([], size.get_shape())
enqueue_op.run()
self.assertEqual([1], size.eval())
dequeued_t.op.run()
self.assertEqual([0], size.eval())
def testEnqueueMany(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
results = []
for _ in range(8):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems + elems, results)
def testEmptyEnqueueMany(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32)
empty_t = constant_op.constant(
[], dtype=dtypes_lib.float32, shape=[0, 2, 3])
enqueue_op = q.enqueue_many((empty_t,))
size_t = q.size()
self.assertEqual(0, size_t.eval())
enqueue_op.run()
self.assertEqual(0, size_t.eval())
def testEmptyDequeueMany(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, shapes=())
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_many(0)
self.assertEqual([], dequeued_t.eval().tolist())
enqueue_op.run()
self.assertEqual([], dequeued_t.eval().tolist())
def testEmptyDequeueUpTo(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, shapes=())
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_up_to(0)
self.assertEqual([], dequeued_t.eval().tolist())
enqueue_op.run()
self.assertEqual([], dequeued_t.eval().tolist())
def testEmptyDequeueManyWithNoShape(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
enqueue_op = q.enqueue((constant_op.constant(
[10.0, 20.0], shape=(1, 2)),))
dequeued_t = q.dequeue_many(0)
# Expect the operation to fail due to the shape not being constrained.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
dequeued_t.eval()
enqueue_op.run()
# RandomShuffleQueue does not make any attempt to support DequeueMany
# with unspecified shapes, even if a shape could be inferred from the
# elements enqueued.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
dequeued_t.eval()
def testEmptyDequeueUpToWithNoShape(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
enqueue_op = q.enqueue((constant_op.constant(
[10.0, 20.0], shape=(1, 2)),))
dequeued_t = q.dequeue_up_to(0)
# Expect the operation to fail due to the shape not being constrained.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
dequeued_t.eval()
enqueue_op.run()
# RandomShuffleQueue does not make any attempt to support DequeueUpTo
# with unspecified shapes, even if a shape could be inferred from the
# elements enqueued.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
dequeued_t.eval()
def testMultiEnqueueMany(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.float32, dtypes_lib.int32))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
results = []
for _ in range(8):
float_val, int_val = sess.run(dequeued_t)
results.append((float_val, [int_val[0], int_val[1]]))
expected = list(zip(float_elems, int_elems)) * 2
self.assertItemsEqual(expected, results)
def testDequeueMany(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(5)
enqueue_op.run()
results = dequeued_t.eval().tolist()
results.extend(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testDequeueUpToNoBlocking(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(5)
enqueue_op.run()
results = dequeued_t.eval().tolist()
results.extend(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testMultiDequeueMany(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
results = []
float_val, int_val = sess.run(dequeued_t)
self.assertEqual(float_val.shape, dequeued_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_t[1].get_shape())
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = sess.run(dequeued_t)
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = sess.run(dequeued_single_t)
self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
results.append((float_val, int_val.tolist()))
float_val, int_val = sess.run(dequeued_single_t)
results.append((float_val, int_val.tolist()))
self.assertItemsEqual(zip(float_elems, int_elems), results)
def testMultiDequeueUpToNoBlocking(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_up_to(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
results = []
float_val, int_val = sess.run(dequeued_t)
# dequeue_up_to has undefined shape.
self.assertEqual([None], dequeued_t[0].get_shape().as_list())
self.assertEqual([None, 2], dequeued_t[1].get_shape().as_list())
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = sess.run(dequeued_t)
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = sess.run(dequeued_single_t)
self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
results.append((float_val, int_val.tolist()))
float_val, int_val = sess.run(dequeued_single_t)
results.append((float_val, int_val.tolist()))
self.assertItemsEqual(zip(float_elems, int_elems), results)
def testHighDimension(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.int32, (
(4, 4, 4, 4)))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
enqueue_op.run()
self.assertItemsEqual(dequeued_t.eval().tolist(), elems.tolist())
def testParallelEnqueueMany(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
1000, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(100)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(1000)
# Enqueue 100 items in parallel on 10 threads.
def enqueue():
sess.run(enqueue_op)
threads = [self.checkedThread(target=enqueue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(dequeued_t.eval(), elems * 10)
def testParallelDequeueMany(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
1000, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(100)
enqueue_op.run()
# Dequeue 100 items in parallel on 10 threads.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelDequeueUpTo(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
1000, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(100)
enqueue_op.run()
# Dequeue 100 items in parallel on 10 threads.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelDequeueUpToRandomPartition(self):
with self.test_session() as sess:
dequeue_sizes = [random.randint(50, 150) for _ in xrange(10)]
total_elements = sum(dequeue_sizes)
q = data_flow_ops.RandomShuffleQueue(
total_elements, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in xrange(total_elements)]
enqueue_op = q.enqueue_many((elems,))
dequeue_ops = [q.dequeue_up_to(size) for size in dequeue_sizes]
enqueue_op.run()
# Dequeue random number of items in parallel on 10 threads.
dequeued_elems = []
def dequeue(dequeue_op):
dequeued_elems.extend(sess.run(dequeue_op))
threads = []
for dequeue_op in dequeue_ops:
threads.append(self.checkedThread(target=dequeue, args=(dequeue_op,)))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testBlockingDequeueMany(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
sess.run(enqueue_op)
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testBlockingDequeueUpTo(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
sess.run(enqueue_op)
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testDequeueManyWithTensorParameter(self):
with self.test_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
count_q = data_flow_ops.RandomShuffleQueue(100, 0, dtypes_lib.int32)
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
q = data_flow_ops.RandomShuffleQueue(total_count, 0, dtypes_lib.int32, (
(),))
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesMany
# that number of elements.
dequeued_t = q.dequeue_many(count_q.dequeue())
enqueue_counts_op.run()
enqueue_elems_op.run()
dequeued_elems = []
for _ in dequeue_counts:
dequeued_elems.extend(dequeued_t.eval())
self.assertItemsEqual(elems, dequeued_elems)
def testDequeueUpToWithTensorParameter(self):
with self.test_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
count_q = data_flow_ops.RandomShuffleQueue(100, 0, dtypes_lib.int32)
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
q = data_flow_ops.RandomShuffleQueue(total_count, 0, dtypes_lib.int32, (
(),))
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesUpTo
# that number of elements.
dequeued_t = q.dequeue_up_to(count_q.dequeue())
enqueue_counts_op.run()
enqueue_elems_op.run()
dequeued_elems = []
for _ in dequeue_counts:
dequeued_elems.extend(dequeued_t.eval())
self.assertItemsEqual(elems, dequeued_elems)
def testDequeueFromClosedQueue(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 2, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
close_op.run()
results = [dequeued_t.eval() for _ in elems]
expected = [[elem] for elem in elems]
self.assertItemsEqual(expected, results)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
dequeued_t.eval()
def testBlockingDequeueFromClosedQueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 2, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
results = []
def dequeue():
for _ in elems:
results.append(sess.run(dequeued_t))
self.assertItemsEqual(elems, results)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
# The dequeue thread blocked when it hit the min_size requirement.
self.assertEqual(len(results), 2)
close_op.run()
dequeue_thread.join()
# Once the queue is closed, the min_size requirement is lifted.
self.assertEqual(len(results), 4)
def testBlockingDequeueFromClosedEmptyQueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
close_op = q.close()
dequeued_t = q.dequeue()
finished = [] # Needs to be a mutable type
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
finished.append(True)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.assertEqual(len(finished), 0)
close_op.run()
dequeue_thread.join()
self.assertEqual(len(finished), 1)
def testBlockingDequeueManyFromClosedQueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
progress = [] # Must be mutable
def dequeue():
self.assertItemsEqual(elems, sess.run(dequeued_t))
progress.append(1)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
progress.append(2)
self.assertEqual(len(progress), 0)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
for _ in range(100):
time.sleep(0.01)
if len(progress) == 1:
break
self.assertEqual(len(progress), 1)
time.sleep(0.01)
close_op.run()
dequeue_thread.join()
self.assertEqual(len(progress), 2)
def testBlockingDequeueUpToFromClosedQueueReturnsRemainder(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(3)
enqueue_op.run()
results = []
def dequeue():
results.extend(sess.run(dequeued_t))
self.assertEquals(3, len(results))
results.extend(sess.run(dequeued_t))
self.assertEquals(4, len(results))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
self.assertItemsEqual(results, elems)
def testBlockingDequeueUpToSmallerThanMinAfterDequeue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
capacity=10,
min_after_dequeue=2,
dtypes=dtypes_lib.float32,
shapes=((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(3)
enqueue_op.run()
results = []
def dequeue():
results.extend(sess.run(dequeued_t))
self.assertEquals(3, len(results))
# min_after_dequeue is 2, we ask for 3 elements, and we end up only
# getting the remaining 1.
results.extend(sess.run(dequeued_t))
self.assertEquals(4, len(results))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
self.assertItemsEqual(results, elems)
def testBlockingDequeueManyFromClosedQueueWithElementsRemaining(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
cleanup_dequeue_t = q.dequeue_many(q.size())
enqueue_op.run()
results = []
def dequeue():
results.extend(sess.run(dequeued_t))
self.assertEqual(len(results), 3)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
# While the last dequeue failed, we want to insure that it returns
# any elements that it potentially reserved to dequeue. Thus the
# next cleanup should return a single element.
results.extend(sess.run(cleanup_dequeue_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
self.assertEqual(len(results), 4)
def testBlockingDequeueManyFromClosedEmptyQueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueUpToFromClosedEmptyQueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_up_to(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueToClosedQueue(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 4, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testEnqueueManyToClosedQueue(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testBlockingEnqueueToFullQueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
results = []
for _ in elems:
results.append(dequeued_t.eval())
results.append(dequeued_t.eval())
self.assertItemsEqual(elems + [50.0], results)
# There wasn't room for 50.0 in the queue when the first element was
# dequeued.
self.assertNotEqual(50.0, results[0])
thread.join()
def testBlockingEnqueueManyToFullQueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
results = []
for _ in elems:
time.sleep(0.01)
results.append(dequeued_t.eval())
results.append(dequeued_t.eval())
results.append(dequeued_t.eval())
self.assertItemsEqual(elems + [50.0, 60.0], results)
# There wasn't room for 50.0 or 60.0 in the queue when the first
# element was dequeued.
self.assertNotEqual(50.0, results[0])
self.assertNotEqual(60.0, results[0])
# Similarly for 60.0 and the second element.
self.assertNotEqual(60.0, results[1])
thread.join()
def testBlockingEnqueueToClosedQueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
close_op = q.close()
enqueue_op.run()
def blocking_enqueue():
# Expect the operation to succeed since it will complete
# before the queue is closed.
sess.run(blocking_enqueue_op)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "closed"):
sess.run(blocking_enqueue_op)
thread1 = self.checkedThread(target=blocking_enqueue)
thread1.start()
# The close_op should run after the first blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def blocking_close():
sess.run(close_op)
thread2 = self.checkedThread(target=blocking_close)
thread2.start()
# Wait for the close op to block before unblocking the enqueue.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
results = []
# Dequeue to unblock the first blocking_enqueue_op, after which the
# close will complete.
results.append(dequeued_t.eval())
self.assertTrue(results[0] in elems)
thread2.join()
thread1.join()
def testBlockingEnqueueManyToClosedQueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
close_op = q.close()
size_t = q.size()
enqueue_op.run()
self.assertEqual(size_t.eval(), 3)
def blocking_enqueue():
# This will block until the dequeue after the close.
sess.run(blocking_enqueue_op)
# At this point the close operation will become unblocked, so the
# next enqueue will fail.
with self.assertRaisesRegexp(errors_impl.CancelledError, "closed"):
sess.run(blocking_enqueue_op)
thread1 = self.checkedThread(target=blocking_enqueue)
thread1.start()
# The close_op should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
# First blocking_enqueue_op of blocking_enqueue has enqueued 1 of 2
# elements, and is blocked waiting for one more element to be dequeue.
self.assertEqual(size_t.eval(), 4)
def blocking_close():
sess.run(close_op)
thread2 = self.checkedThread(target=blocking_close)
thread2.start()
# The close_op should run before the second blocking_enqueue_op
# has started.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
# Unblock the first blocking_enqueue_op in blocking_enqueue.
q.dequeue().eval()
thread2.join()
thread1.join()
def testSharedQueueSameSession(self):
with self.test_session():
q1 = data_flow_ops.RandomShuffleQueue(
1, 0, dtypes_lib.float32, ((),), shared_name="shared_queue")
q1.enqueue((10.0,)).run()
# TensorFlow TestCase adds a default graph seed (=87654321). We check if
# the seed computed from the default graph seed is reproduced.
seed = 887634792
q2 = data_flow_ops.RandomShuffleQueue(
1,
0,
dtypes_lib.float32, ((),),
shared_name="shared_queue",
seed=seed)
q1_size_t = q1.size()
q2_size_t = q2.size()
self.assertEqual(q1_size_t.eval(), 1)
self.assertEqual(q2_size_t.eval(), 1)
self.assertEqual(q2.dequeue().eval(), 10.0)
self.assertEqual(q1_size_t.eval(), 0)
self.assertEqual(q2_size_t.eval(), 0)
q2.enqueue((20.0,)).run()
self.assertEqual(q1_size_t.eval(), 1)
self.assertEqual(q2_size_t.eval(), 1)
self.assertEqual(q1.dequeue().eval(), 20.0)
self.assertEqual(q1_size_t.eval(), 0)
self.assertEqual(q2_size_t.eval(), 0)
def testSharedQueueSameSessionGraphSeedNone(self):
with self.test_session():
q1 = data_flow_ops.RandomShuffleQueue(
1,
0,
dtypes_lib.float32, ((),),
shared_name="shared_queue",
seed=98765432)
q1.enqueue((10.0,)).run()
# If both graph and op seeds are not provided, the default value must be
# used, and in case a shared queue is already created, the second queue op
# must accept any previous seed value.
random_seed.set_random_seed(None)
q2 = data_flow_ops.RandomShuffleQueue(
1, 0, dtypes_lib.float32, ((),), shared_name="shared_queue")
q1_size_t = q1.size()
q2_size_t = q2.size()
self.assertEqual(q1_size_t.eval(), 1)
self.assertEqual(q2_size_t.eval(), 1)
def testIncompatibleSharedQueueErrors(self):
with self.test_session():
q_a_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_a")
q_a_2 = data_flow_ops.RandomShuffleQueue(
15, 5, dtypes_lib.float32, shared_name="q_a")
q_a_1.queue_ref.op.run()
with self.assertRaisesOpError("capacity"):
q_a_2.queue_ref.op.run()
q_b_1 = data_flow_ops.RandomShuffleQueue(
10, 0, dtypes_lib.float32, shared_name="q_b")
q_b_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_b")
q_b_1.queue_ref.op.run()
with self.assertRaisesOpError("min_after_dequeue"):
q_b_2.queue_ref.op.run()
q_c_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_c")
q_c_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, shared_name="q_c")
q_c_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_c_2.queue_ref.op.run()
q_d_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_d")
q_d_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_d")
q_d_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_d_2.queue_ref.op.run()
q_e_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_e")
q_e_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_e")
q_e_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_e_2.queue_ref.op.run()
q_f_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_f")
q_f_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 4)], shared_name="q_f")
q_f_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_f_2.queue_ref.op.run()
q_g_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_g")
q_g_2 = data_flow_ops.RandomShuffleQueue(
10, 5, (dtypes_lib.float32, dtypes_lib.int32), shared_name="q_g")
q_g_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_g_2.queue_ref.op.run()
q_h_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, seed=12, shared_name="q_h")
q_h_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, seed=21, shared_name="q_h")
q_h_1.queue_ref.op.run()
with self.assertRaisesOpError("random seeds"):
q_h_2.queue_ref.op.run()
def testSelectQueue(self):
with self.test_session():
num_queues = 10
qlist = list()
for _ in xrange(num_queues):
qlist.append(
data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32))
# Enqueue/Dequeue into a dynamically selected queue
for _ in xrange(20):
index = np.random.randint(num_queues)
q = data_flow_ops.RandomShuffleQueue.from_list(index, qlist)
q.enqueue((10.,)).run()
self.assertEqual(q.dequeue().eval(), 10.0)
def testSelectQueueOutOfRange(self):
with self.test_session():
q1 = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
q2 = data_flow_ops.RandomShuffleQueue(15, 0, dtypes_lib.float32)
enq_q = data_flow_ops.RandomShuffleQueue.from_list(3, [q1, q2])
with self.assertRaisesOpError("is not in"):
enq_q.dequeue().eval()
def _blockingDequeue(self, sess, dequeue_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(dequeue_op)
def _blockingDequeueMany(self, sess, dequeue_many_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(dequeue_many_op)
def _blockingDequeueUpTo(self, sess, dequeue_up_to_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(dequeue_up_to_op)
def _blockingEnqueue(self, sess, enqueue_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(enqueue_op)
def _blockingEnqueueMany(self, sess, enqueue_many_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(enqueue_many_op)
def testResetOfBlockingOperation(self):
with self.test_session() as sess:
q_empty = data_flow_ops.RandomShuffleQueue(5, 0, dtypes_lib.float32, (
(),))
dequeue_op = q_empty.dequeue()
dequeue_many_op = q_empty.dequeue_many(1)
dequeue_up_to_op = q_empty.dequeue_up_to(1)
q_full = data_flow_ops.RandomShuffleQueue(5, 0, dtypes_lib.float32, ((),))
sess.run(q_full.enqueue_many(([1.0, 2.0, 3.0, 4.0, 5.0],)))
enqueue_op = q_full.enqueue((6.0,))
enqueue_many_op = q_full.enqueue_many(([6.0],))
threads = [
self.checkedThread(
self._blockingDequeue, args=(sess, dequeue_op)),
self.checkedThread(
self._blockingDequeueMany, args=(sess, dequeue_many_op)),
self.checkedThread(
self._blockingDequeueUpTo, args=(sess, dequeue_up_to_op)),
self.checkedThread(
self._blockingEnqueue, args=(sess, enqueue_op)),
self.checkedThread(
self._blockingEnqueueMany, args=(sess, enqueue_many_op))
]
for t in threads:
t.start()
time.sleep(0.1)
sess.close() # Will cancel the blocked operations.
for t in threads:
t.join()
def testDequeueManyInDifferentOrders(self):
with self.test_session():
# Specify seeds to make the test deterministic
# (https://en.wikipedia.org/wiki/Taxicab_number).
q1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=1729)
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=87539319)
enq1 = q1.enqueue_many(([1, 2, 3, 4, 5],))
enq2 = q2.enqueue_many(([1, 2, 3, 4, 5],))
deq1 = q1.dequeue_many(5)
deq2 = q2.dequeue_many(5)
enq1.run()
enq1.run()
enq2.run()
enq2.run()
results = [[], [], [], []]
results[0].extend(deq1.eval())
results[1].extend(deq2.eval())
q1.close().run()
q2.close().run()
results[2].extend(deq1.eval())
results[3].extend(deq2.eval())
# No two should match
for i in range(1, 4):
for j in range(i):
self.assertNotEqual(results[i], results[j])
def testDequeueUpToInDifferentOrders(self):
with self.test_session():
# Specify seeds to make the test deterministic
# (https://en.wikipedia.org/wiki/Taxicab_number).
q1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=1729)
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=87539319)
enq1 = q1.enqueue_many(([1, 2, 3, 4, 5],))
enq2 = q2.enqueue_many(([1, 2, 3, 4, 5],))
deq1 = q1.dequeue_up_to(5)
deq2 = q2.dequeue_up_to(5)
enq1.run()
enq1.run()
enq2.run()
enq2.run()
results = [[], [], [], []]
results[0].extend(deq1.eval())
results[1].extend(deq2.eval())
q1.close().run()
q2.close().run()
results[2].extend(deq1.eval())
results[3].extend(deq2.eval())
# No two should match
for i in range(1, 4):
for j in range(i):
self.assertNotEqual(results[i], results[j])
def testDequeueInDifferentOrders(self):
with self.test_session():
# Specify seeds to make the test deterministic
# (https://en.wikipedia.org/wiki/Taxicab_number).
q1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=1729)
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=87539319)
enq1 = q1.enqueue_many(([1, 2, 3, 4, 5],))
enq2 = q2.enqueue_many(([1, 2, 3, 4, 5],))
deq1 = q1.dequeue()
deq2 = q2.dequeue()
enq1.run()
enq1.run()
enq2.run()
enq2.run()
results = [[], [], [], []]
for _ in range(5):
results[0].append(deq1.eval())
results[1].append(deq2.eval())
q1.close().run()
q2.close().run()
for _ in range(5):
results[2].append(deq1.eval())
results[3].append(deq2.eval())
# No two should match
for i in range(1, 4):
for j in range(i):
self.assertNotEqual(results[i], results[j])
def testBigEnqueueMany(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(5, 0, dtypes_lib.int32, ((),))
elem = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
enq = q.enqueue_many((elem,))
deq = q.dequeue()
size_op = q.size()
enq_done = []
def blocking_enqueue():
enq_done.append(False)
# This will fill the queue and then block until enough dequeues happen.
sess.run(enq)
enq_done.append(True)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The enqueue should start and then block.
results = []
results.append(deq.eval()) # Will only complete after the enqueue starts.
self.assertEqual(len(enq_done), 1)
self.assertEqual(sess.run(size_op), 5)
for _ in range(3):
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 1)
self.assertEqual(sess.run(size_op), 5)
# This dequeue will unblock the thread.
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 2)
thread.join()
for i in range(5):
self.assertEqual(size_op.eval(), 5 - i)
results.append(deq.eval())
self.assertEqual(size_op.eval(), 5 - i - 1)
self.assertItemsEqual(elem, results)
def testBigDequeueMany(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(2, 0, dtypes_lib.int32, ((),))
elem = np.arange(4, dtype=np.int32)
enq_list = [q.enqueue((e,)) for e in elem]
deq = q.dequeue_many(4)
results = []
def blocking_dequeue():
# Will only complete after 4 enqueues complete.
results.extend(sess.run(deq))
thread = self.checkedThread(target=blocking_dequeue)
thread.start()
# The dequeue should start and then block.
for enq in enq_list:
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.assertEqual(len(results), 0)
sess.run(enq)
# Enough enqueued to unblock the dequeue
thread.join()
self.assertItemsEqual(elem, results)
if __name__ == "__main__":
test.main()
|
apache-2.0
|
jasonmccampbell/numpy-refactor-sprint
|
numpy/f2py/tests/test_return_character.py
|
59
|
3768
|
from numpy.testing import *
from numpy import array
from numpy.compat import asbytes
import util
class TestReturnCharacter(util.F2PyTest):
def check_function(self, t):
tname = t.__doc__.split()[0]
if tname in ['t0','t1','s0','s1']:
assert t(23)==asbytes('2')
r = t('ab');assert r==asbytes('a'),`r`
r = t(array('ab'));assert r==asbytes('a'),`r`
r = t(array(77,'u1'));assert r==asbytes('M'),`r`
#assert_raises(ValueError, t, array([77,87]))
#assert_raises(ValueError, t, array(77))
elif tname in ['ts','ss']:
assert t(23)==asbytes('23 '),`t(23)`
assert t('123456789abcdef')==asbytes('123456789a')
elif tname in ['t5','s5']:
assert t(23)==asbytes('23 '),`t(23)`
assert t('ab')==asbytes('ab '),`t('ab')`
assert t('123456789abcdef')==asbytes('12345')
else:
raise NotImplementedError
class TestF77ReturnCharacter(TestReturnCharacter):
code = """
function t0(value)
character value
character t0
t0 = value
end
function t1(value)
character*1 value
character*1 t1
t1 = value
end
function t5(value)
character*5 value
character*5 t5
t5 = value
end
function ts(value)
character*(*) value
character*(*) ts
ts = value
end
subroutine s0(t0,value)
character value
character t0
cf2py intent(out) t0
t0 = value
end
subroutine s1(t1,value)
character*1 value
character*1 t1
cf2py intent(out) t1
t1 = value
end
subroutine s5(t5,value)
character*5 value
character*5 t5
cf2py intent(out) t5
t5 = value
end
subroutine ss(ts,value)
character*(*) value
character*10 ts
cf2py intent(out) ts
ts = value
end
"""
@dec.slow
def test_all(self):
for name in "t0,t1,t5,s0,s1,s5,ss".split(","):
self.check_function(getattr(self.module, name))
class TestF90ReturnCharacter(TestReturnCharacter):
suffix = ".f90"
code = """
module f90_return_char
contains
function t0(value)
character :: value
character :: t0
t0 = value
end function t0
function t1(value)
character(len=1) :: value
character(len=1) :: t1
t1 = value
end function t1
function t5(value)
character(len=5) :: value
character(len=5) :: t5
t5 = value
end function t5
function ts(value)
character(len=*) :: value
character(len=10) :: ts
ts = value
end function ts
subroutine s0(t0,value)
character :: value
character :: t0
!f2py intent(out) t0
t0 = value
end subroutine s0
subroutine s1(t1,value)
character(len=1) :: value
character(len=1) :: t1
!f2py intent(out) t1
t1 = value
end subroutine s1
subroutine s5(t5,value)
character(len=5) :: value
character(len=5) :: t5
!f2py intent(out) t5
t5 = value
end subroutine s5
subroutine ss(ts,value)
character(len=*) :: value
character(len=10) :: ts
!f2py intent(out) ts
ts = value
end subroutine ss
end module f90_return_char
"""
@dec.slow
def test_all(self):
for name in "t0,t1,t5,ts,s0,s1,s5,ss".split(","):
self.check_function(getattr(self.module.f90_return_char, name))
if __name__ == "__main__":
import nose
nose.runmodule()
|
bsd-3-clause
|
wscullin/spack
|
var/spack/repos/builtin/packages/libvorbis/package.py
|
3
|
1890
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Libvorbis(AutotoolsPackage):
"""Ogg Vorbis is a fully open, non-proprietary, patent-and-royalty-free,
general-purpose compressed audio format for mid to high quality (8kHz-
48.0kHz, 16+ bit, polyphonic) audio and music at fixed and variable
bitrates from 16 to 128 kbps/channel."""
homepage = "https://xiph.org/vorbis/"
url = "http://downloads.xiph.org/releases/vorbis/libvorbis-1.3.5.tar.gz"
version('1.3.5', '7220e089f3be3412a2317d6fde9e3944')
depends_on('libogg')
depends_on('pkg-config@0.9.0:', type='build')
# `make check` crashes when run in parallel
parallel = False
|
lgpl-2.1
|
mhotwagner/abackend
|
abackend-env/lib/python3.5/site-packages/psycopg2/_json.py
|
48
|
7836
|
"""Implementation of the JSON adaptation objects
This module exists to avoid a circular import problem: pyscopg2.extras depends
on psycopg2.extension, so I can't create the default JSON typecasters in
extensions importing register_json from extras.
"""
# psycopg/_json.py - Implementation of the JSON adaptation objects
#
# Copyright (C) 2012 Daniele Varrazzo <daniele.varrazzo@gmail.com>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
import sys
from psycopg2._psycopg import ISQLQuote, QuotedString
from psycopg2._psycopg import new_type, new_array_type, register_type
# import the best json implementation available
if sys.version_info[:2] >= (2,6):
import json
else:
try:
import simplejson as json
except ImportError:
json = None
# oids from PostgreSQL 9.2
JSON_OID = 114
JSONARRAY_OID = 199
# oids from PostgreSQL 9.4
JSONB_OID = 3802
JSONBARRAY_OID = 3807
class Json(object):
"""
An `~psycopg2.extensions.ISQLQuote` wrapper to adapt a Python object to
:sql:`json` data type.
`!Json` can be used to wrap any object supported by the provided *dumps*
function. If none is provided, the standard :py:func:`json.dumps()` is
used (`!simplejson` for Python < 2.6;
`~psycopg2.extensions.ISQLQuote.getquoted()` will raise `!ImportError` if
the module is not available).
"""
def __init__(self, adapted, dumps=None):
self.adapted = adapted
if dumps is not None:
self._dumps = dumps
elif json is not None:
self._dumps = json.dumps
else:
self._dumps = None
def __conform__(self, proto):
if proto is ISQLQuote:
return self
def dumps(self, obj):
"""Serialize *obj* in JSON format.
The default is to call `!json.dumps()` or the *dumps* function
provided in the constructor. You can override this method to create a
customized JSON wrapper.
"""
dumps = self._dumps
if dumps is not None:
return dumps(obj)
else:
raise ImportError(
"json module not available: "
"you should provide a dumps function")
def getquoted(self):
s = self.dumps(self.adapted)
return QuotedString(s).getquoted()
if sys.version_info < (3,):
def __str__(self):
return self.getquoted()
else:
def __str__(self):
# getquoted is binary in Py3
return self.getquoted().decode('ascii', 'replace')
def register_json(conn_or_curs=None, globally=False, loads=None,
oid=None, array_oid=None, name='json'):
"""Create and register typecasters converting :sql:`json` type to Python objects.
:param conn_or_curs: a connection or cursor used to find the :sql:`json`
and :sql:`json[]` oids; the typecasters are registered in a scope
limited to this object, unless *globally* is set to `!True`. It can be
`!None` if the oids are provided
:param globally: if `!False` register the typecasters only on
*conn_or_curs*, otherwise register them globally
:param loads: the function used to parse the data into a Python object. If
`!None` use `!json.loads()`, where `!json` is the module chosen
according to the Python version (see above)
:param oid: the OID of the :sql:`json` type if known; If not, it will be
queried on *conn_or_curs*
:param array_oid: the OID of the :sql:`json[]` array type if known;
if not, it will be queried on *conn_or_curs*
:param name: the name of the data type to look for in *conn_or_curs*
The connection or cursor passed to the function will be used to query the
database and look for the OID of the :sql:`json` type (or an alternative
type if *name* if provided). No query is performed if *oid* and *array_oid*
are provided. Raise `~psycopg2.ProgrammingError` if the type is not found.
"""
if oid is None:
oid, array_oid = _get_json_oids(conn_or_curs, name)
JSON, JSONARRAY = _create_json_typecasters(
oid, array_oid, loads=loads, name=name.upper())
register_type(JSON, not globally and conn_or_curs or None)
if JSONARRAY is not None:
register_type(JSONARRAY, not globally and conn_or_curs or None)
return JSON, JSONARRAY
def register_default_json(conn_or_curs=None, globally=False, loads=None):
"""
Create and register :sql:`json` typecasters for PostgreSQL 9.2 and following.
Since PostgreSQL 9.2 :sql:`json` is a builtin type, hence its oid is known
and fixed. This function allows specifying a customized *loads* function
for the default :sql:`json` type without querying the database.
All the parameters have the same meaning of `register_json()`.
"""
return register_json(conn_or_curs=conn_or_curs, globally=globally,
loads=loads, oid=JSON_OID, array_oid=JSONARRAY_OID)
def register_default_jsonb(conn_or_curs=None, globally=False, loads=None):
"""
Create and register :sql:`jsonb` typecasters for PostgreSQL 9.4 and following.
As in `register_default_json()`, the function allows to register a
customized *loads* function for the :sql:`jsonb` type at its known oid for
PostgreSQL 9.4 and following versions. All the parameters have the same
meaning of `register_json()`.
"""
return register_json(conn_or_curs=conn_or_curs, globally=globally,
loads=loads, oid=JSONB_OID, array_oid=JSONBARRAY_OID, name='jsonb')
def _create_json_typecasters(oid, array_oid, loads=None, name='JSON'):
"""Create typecasters for json data type."""
if loads is None:
if json is None:
raise ImportError("no json module available")
else:
loads = json.loads
def typecast_json(s, cur):
if s is None:
return None
return loads(s)
JSON = new_type((oid, ), name, typecast_json)
if array_oid is not None:
JSONARRAY = new_array_type((array_oid, ), "%sARRAY" % name, JSON)
else:
JSONARRAY = None
return JSON, JSONARRAY
def _get_json_oids(conn_or_curs, name='json'):
# lazy imports
from psycopg2.extensions import STATUS_IN_TRANSACTION
from psycopg2.extras import _solve_conn_curs
conn, curs = _solve_conn_curs(conn_or_curs)
# Store the transaction status of the connection to revert it after use
conn_status = conn.status
# column typarray not available before PG 8.3
typarray = conn.server_version >= 80300 and "typarray" or "NULL"
# get the oid for the hstore
curs.execute(
"SELECT t.oid, %s FROM pg_type t WHERE t.typname = %%s;"
% typarray, (name,))
r = curs.fetchone()
# revert the status of the connection as before the command
if (conn_status != STATUS_IN_TRANSACTION and not conn.autocommit):
conn.rollback()
if not r:
raise conn.ProgrammingError("%s data type not found" % name)
return r
|
mit
|
indrajitr/ansible-modules-extras
|
notification/sns.py
|
44
|
5736
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Michael J. Schultz <mjschultz@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
module: sns
short_description: Send Amazon Simple Notification Service (SNS) messages
description:
- The M(sns) module sends notifications to a topic on your Amazon SNS account
version_added: 1.6
author: "Michael J. Schultz (@mjschultz)"
options:
msg:
description:
- Default message to send.
required: true
aliases: [ "default" ]
subject:
description:
- Subject line for email delivery.
required: false
topic:
description:
- The topic you want to publish to.
required: true
email:
description:
- Message to send to email-only subscription
required: false
sqs:
description:
- Message to send to SQS-only subscription
required: false
sms:
description:
- Message to send to SMS-only subscription
required: false
http:
description:
- Message to send to HTTP-only subscription
required: false
https:
description:
- Message to send to HTTPS-only subscription
required: false
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
default: None
aliases: ['ec2_secret_key', 'secret_key']
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: None
aliases: ['ec2_access_key', 'access_key']
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: false
aliases: ['aws_region', 'ec2_region']
requirements: [ "boto" ]
"""
EXAMPLES = """
- name: Send default notification message via SNS
local_action:
module: sns
msg: "{{ inventory_hostname }} has completed the play."
subject: "Deploy complete!"
topic: "deploy"
- name: Send notification messages via SNS with short message for SMS
local_action:
module: sns
msg: "{{ inventory_hostname }} has completed the play."
sms: "deployed!"
subject: "Deploy complete!"
topic: "deploy"
"""
import sys
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
try:
import boto
import boto.ec2
import boto.sns
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def arn_topic_lookup(connection, short_topic):
response = connection.get_all_topics()
result = response[u'ListTopicsResponse'][u'ListTopicsResult']
# topic names cannot have colons, so this captures the full topic name
lookup_topic = ':{}'.format(short_topic)
for topic in result[u'Topics']:
if topic[u'TopicArn'].endswith(lookup_topic):
return topic[u'TopicArn']
return None
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
msg=dict(type='str', required=True, aliases=['default']),
subject=dict(type='str', default=None),
topic=dict(type='str', required=True),
email=dict(type='str', default=None),
sqs=dict(type='str', default=None),
sms=dict(type='str', default=None),
http=dict(type='str', default=None),
https=dict(type='str', default=None),
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
msg = module.params['msg']
subject = module.params['subject']
topic = module.params['topic']
email = module.params['email']
sqs = module.params['sqs']
sms = module.params['sms']
http = module.params['http']
https = module.params['https']
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg="region must be specified")
try:
connection = connect_to_aws(boto.sns, region, **aws_connect_params)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))
# .publish() takes full ARN topic id, but I'm lazy and type shortnames
# so do a lookup (topics cannot contain ':', so thats the decider)
if ':' in topic:
arn_topic = topic
else:
arn_topic = arn_topic_lookup(connection, topic)
if not arn_topic:
module.fail_json(msg='Could not find topic: {}'.format(topic))
dict_msg = {'default': msg}
if email:
dict_msg.update(email=email)
if sqs:
dict_msg.update(sqs=sqs)
if sms:
dict_msg.update(sms=sms)
if http:
dict_msg.update(http=http)
if https:
dict_msg.update(https=https)
json_msg = json.dumps(dict_msg)
try:
connection.publish(topic=arn_topic, subject=subject,
message_structure='json', message=json_msg)
except boto.exception.BotoServerError, e:
module.fail_json(msg=str(e))
module.exit_json(msg="OK")
main()
|
gpl-3.0
|
egoldchain/egoldchain-master
|
test/functional/invalidtxrequest.py
|
50
|
2621
|
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test node responses to invalid transactions.
In this test we connect to one node over p2p, and test tx requests.
"""
from test_framework.test_framework import ComparisonTestFramework
from test_framework.comptool import TestManager, TestInstance, RejectResult
from test_framework.blocktools import *
import time
# Use the ComparisonTestFramework with 1 node: only use --testbinary.
class InvalidTxRequestTest(ComparisonTestFramework):
''' Can either run this test as 1 node with expected answers, or two and compare them.
Change the "outcome" variable from each TestInstance object to only do the comparison. '''
def __init__(self):
super().__init__()
self.num_nodes = 1
def run_test(self):
test = TestManager(self, self.options.tmpdir)
test.add_all_connections(self.nodes)
self.tip = None
self.block_time = None
NetworkThread().start() # Start up network handling in another thread
test.run()
def get_tests(self):
if self.tip is None:
self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0)
self.block_time = int(time.time())+1
'''
Create a new block with an anyone-can-spend coinbase
'''
height = 1
block = create_block(self.tip, create_coinbase(height), self.block_time)
self.block_time += 1
block.solve()
# Save the coinbase for later
self.block1 = block
self.tip = block.sha256
height += 1
yield TestInstance([[block, True]])
'''
Now we need that block to mature so we can spend the coinbase.
'''
test = TestInstance(sync_every_block=False)
for i in range(100):
block = create_block(self.tip, create_coinbase(height), self.block_time)
block.solve()
self.tip = block.sha256
self.block_time += 1
test.blocks_and_transactions.append([block, True])
height += 1
yield test
# b'\x64' is OP_NOTIF
# Transaction will be rejected with code 16 (REJECT_INVALID)
tx1 = create_transaction(self.block1.vtx[0], 0, b'\x64', 50 * COIN - 12000)
yield TestInstance([[tx1, RejectResult(16, b'mandatory-script-verify-flag-failed')]])
# TODO: test further transactions...
if __name__ == '__main__':
InvalidTxRequestTest().main()
|
mit
|
simbs/edx-platform
|
openedx/core/djangoapps/credit/exceptions.py
|
60
|
2281
|
"""Exceptions raised by the credit API. """
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from rest_framework import status
from rest_framework.exceptions import APIException
# TODO: Cleanup this mess! ECOM-2908
class CreditApiBadRequest(Exception):
"""
Could not complete a request to the credit API because
there was a problem with the request (as opposed to an internal error).
"""
pass
class InvalidCreditRequirements(CreditApiBadRequest):
"""
The requirement dictionary provided has invalid format.
"""
pass
class InvalidCreditCourse(CreditApiBadRequest):
"""
The course is not configured for credit.
"""
pass
class UserIsNotEligible(CreditApiBadRequest):
"""
The user has not satisfied eligibility requirements for credit.
"""
pass
class CreditProviderNotConfigured(CreditApiBadRequest):
"""
The requested credit provider is not configured correctly for the course.
"""
pass
class RequestAlreadyCompleted(CreditApiBadRequest):
"""
The user has already submitted a request and received a response from the credit provider.
"""
pass
class CreditRequestNotFound(CreditApiBadRequest):
"""
The request does not exist.
"""
pass
class InvalidCreditStatus(CreditApiBadRequest):
"""
The status is not either "approved" or "rejected".
"""
pass
class InvalidCreditRequest(APIException):
""" API request is invalid. """
status_code = status.HTTP_400_BAD_REQUEST
class UserNotEligibleException(InvalidCreditRequest):
""" User not eligible for credit for a given course. """
def __init__(self, course_key, username):
detail = _('[{username}] is not eligible for credit for [{course_key}].').format(username=username,
course_key=course_key)
super(UserNotEligibleException, self).__init__(detail)
class InvalidCourseKey(InvalidCreditRequest):
""" Course key is invalid. """
def __init__(self, course_key):
detail = _('[{course_key}] is not a valid course key.').format(course_key=course_key)
super(InvalidCourseKey, self).__init__(detail)
|
agpl-3.0
|
Koulio/rietveld
|
third_party/oauth2client/file.py
|
253
|
3160
|
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for OAuth.
Utilities for making it easier to work with OAuth 2.0
credentials.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import os
import stat
import threading
from anyjson import simplejson
from client import Storage as BaseStorage
from client import Credentials
class CredentialsFileSymbolicLinkError(Exception):
"""Credentials files must not be symbolic links."""
class Storage(BaseStorage):
"""Store and retrieve a single credential to and from a file."""
def __init__(self, filename):
self._filename = filename
self._lock = threading.Lock()
def _validate_file(self):
if os.path.islink(self._filename):
raise CredentialsFileSymbolicLinkError(
'File: %s is a symbolic link.' % self._filename)
def acquire_lock(self):
"""Acquires any lock necessary to access this Storage.
This lock is not reentrant."""
self._lock.acquire()
def release_lock(self):
"""Release the Storage lock.
Trying to release a lock that isn't held will result in a
RuntimeError.
"""
self._lock.release()
def locked_get(self):
"""Retrieve Credential from file.
Returns:
oauth2client.client.Credentials
Raises:
CredentialsFileSymbolicLinkError if the file is a symbolic link.
"""
credentials = None
self._validate_file()
try:
f = open(self._filename, 'rb')
content = f.read()
f.close()
except IOError:
return credentials
try:
credentials = Credentials.new_from_json(content)
credentials.set_store(self)
except ValueError:
pass
return credentials
def _create_file_if_needed(self):
"""Create an empty file if necessary.
This method will not initialize the file. Instead it implements a
simple version of "touch" to ensure the file has been created.
"""
if not os.path.exists(self._filename):
old_umask = os.umask(0177)
try:
open(self._filename, 'a+b').close()
finally:
os.umask(old_umask)
def locked_put(self, credentials):
"""Write Credentials to file.
Args:
credentials: Credentials, the credentials to store.
Raises:
CredentialsFileSymbolicLinkError if the file is a symbolic link.
"""
self._create_file_if_needed()
self._validate_file()
f = open(self._filename, 'wb')
f.write(credentials.to_json())
f.close()
def locked_delete(self):
"""Delete Credentials file.
Args:
credentials: Credentials, the credentials to store.
"""
os.unlink(self._filename)
|
apache-2.0
|
jiachenning/odoo
|
addons/base_report_designer/plugin/openerp_report_designer/bin/script/ConvertBracesToField.py
|
384
|
12556
|
#########################################################################
#
# Copyright (c) 2003-2004 Danny Brewer d29583@groovegarden.com
# Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
#############################################################################
import uno
import unohelper
import string
import re
import base64
from com.sun.star.task import XJobExecutor
if __name__<>"package":
from lib.gui import *
from LoginTest import *
from lib.logreport import *
from lib.rpc import *
database="test"
uid = 1
class ConvertBracesToField( unohelper.Base, XJobExecutor ):
def __init__(self, ctx):
self.ctx = ctx
self.module = "openerp_report"
self.version = "0.1"
LoginTest()
self.logobj=Logger()
if not loginstatus and __name__=="package":
exit(1)
global passwd
self.password = passwd
global url
self.sock=RPCSession(url)
self.aReportSyntex=[]
self.getBraces(self.aReportSyntex)
self.setValue()
def setValue(self):
desktop=getDesktop()
doc = desktop.getCurrentComponent()
docinfo= doc.getDocumentInfo()
count = 0
regexes = [
['[a-zA-Z0-9_]+\.[a-zA-Z0-9_.]+',"Field"],
['\\[\\[ *repeatIn\\( *([a-zA-Z0-9_\.]+), *\'([a-zA-Z0-9_]+)\' *\\) *\\]\\]', "RepeatIn"],
['\\[\\[ *([a-zA-Z0-9_\.]+) *\\]\\]', "Field"]
# ['\\[\\[ ([a-zA-Z0-9_]+\.[a-zA-Z1-9]) \\]\\]',"Field"],
# ['\\[\\[ [a-zA-Z0-9_\.]+ and ([a-zA-Z0-9_\.]+) or .+? \\]\\]',"Field"],
# ['\\[\\[ ([a-zA-Z0-9_\.]+) or .+? \\]\\]',"Field"],
# ['\\[\\[ ([a-zA-Z0-9_\.]+) and .+? \\]\\]',"Field"],
# ['\\[\\[ .+? or ([a-zA-Z0-9_\.]+) \\]\\]',"Field"],
# ['\\[\\[ (.+?) and ([a-zA-Z0-9_\.]+) \\]\\]',"Field"],
# ['\\[\\[ .+? % ([a-zA-Z0-9_\.]+) \\]\\]',"Field"]
]
oFieldObject = []
oRepeatInObjects = []
saRepeatInList = []
sHost = docinfo.getUserFieldValue(0)
nCount = 0
oParEnum = doc.getTextFields().createEnumeration()
while oParEnum.hasMoreElements():
oPar = oParEnum.nextElement()
nCount += 1
getList(oRepeatInObjects,sHost,nCount)
for ro in oRepeatInObjects:
if ro.find("(")<>-1:
saRepeatInList.append( [ ro[:ro.find("(")], ro[ro.find("(")+1:ro.find(")")] ])
try:
oParEnum = doc.getTextFields().createEnumeration()
while oParEnum.hasMoreElements():
oPar = oParEnum.nextElement()
if oPar.supportsService("com.sun.star.text.TextField.DropDown"):
for reg in regexes:
res=re.findall(reg[0],oPar.Items[1])
if len(res) <> 0:
if res[0][0] == "objects":
sTemp = docinfo.getUserFieldValue(3)
sTemp = "|-." + sTemp[sTemp.rfind(".")+1:] + ".-|"
oPar.Items=(sTemp.encode("utf-8"),oPar.Items[1].replace(' ',""))
oPar.update()
elif type(res[0]) <> type(u''):
sObject = self.getRes(self.sock, docinfo.getUserFieldValue(3), res[0][0][res[0][0].find(".")+1:].replace(".","/"))
r = self.sock.execute(database, uid, self.password, docinfo.getUserFieldValue(3) , 'fields_get')
sExpr="|-." + r[res[0][0][res[0][0].rfind(".")+1:]]["string"] + ".-|"
oPar.Items=(sExpr.encode("utf-8"),oPar.Items[1].replace(' ',""))
oPar.update()
else:
obj = None
for rl in saRepeatInList:
if rl[0] == res[0][:res[0].find(".")]:
obj=rl[1]
try:
sObject = self.getRes(self.sock, obj, res[0][res[0].find(".")+1:].replace(".","/"))
r = self.sock.execute(database, uid, self.password, sObject , 'read',[1])
except Exception,e:
r = "TTT"
self.logobj.log_write('ConvertBracesToField', LOG_ERROR, str(e))
if len(r) <> 0:
if r <> "TTT":
if len(res)>1:
sExpr=""
print res
if reg[1] == 'Field':
for ires in res:
try:
sExpr=r[0][ires[ires.rfind(".")+1:]]
break
except Exception,e:
import traceback,sys
info = reduce(lambda x, y: x+y, traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
self.logobj.log_write('ConvertBracesToField', LOG_ERROR,info)
try:
oPar.Items=(sExpr.encode("utf-8") ,oPar.Items[1])
oPar.update()
except:
oPar.Items=(str(sExpr) ,oPar.Items[1])
oPar.update()
import traceback,sys
info = reduce(lambda x, y: x+y, traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
self.logobj.log_write('ConvertBracesToField', LOG_ERROR, info)
else:
sExpr=r[0][res[0][res[0].rfind(".")+1:]]
try:
if sExpr:
oPar.Items=(sExpr.encode("utf-8") ,oPar.Items[1])
oPar.update()
else:
oPar.Items=(u"/",oPar.Items[1])
oPar.update()
except:
oPar.Items=(str(sExpr) ,oPar.Items[1])
oPar.update()
import traceback,sys
info = reduce(lambda x, y: x+y, traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
self.logobj.log_write('ConvertBracesToField', LOG_ERROR,info)
else:
oPar.Items=(u""+r,oPar.Items[1])
oPar.update()
else:
oPar.Items=(u"TTT",oPar.Items[1])
oPar.update()
except:
import traceback,sys
info = reduce(lambda x, y: x+y, traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
self.logobj.log_write('ConvertBraceToField', LOG_ERROR, info)
def getRes(self, sock, sObject, sVar):
desktop=getDesktop()
doc =desktop.getCurrentComponent()
docinfo=doc.getDocumentInfo()
res = sock.execute(database, uid, self.password, sObject , 'fields_get')
key = res.keys()
key.sort()
myval=None
if not sVar.find("/")==-1:
myval=sVar[:sVar.find("/")]
else:
myval=sVar
for k in key:
if (res[k]['type'] in ['many2one']) and k==myval:
sObject = self.getRes(sock,res[myval]['relation'], sVar[sVar.find("/")+1:])
return sObject
def getBraces(self, aReportSyntex=None):
if aReportSyntex is None:
aReportSyntex = []
desktop=getDesktop()
doc = desktop.getCurrentComponent()
aSearchString=[]
aReplaceString=[]
aRes=[]
try:
regexes = [
['\\[\\[ *repeatIn\\( *([a-zA-Z0-9_\.]+), *\'([a-zA-Z0-9_]+)\' *\\) *\\]\\]', "RepeatIn"],
['\\[\\[ *([a-zA-Z0-9_\.]+) *\\]\\]', "Field"],
['\\[\\[ *.+? *\\]\\]', "Expression"]
]
search = doc.createSearchDescriptor()
search.SearchRegularExpression = True
for reg in regexes:
search.SearchString = reg[0]
found = doc.findFirst( search )
while found:
res=re.findall(reg[0],found.String)
print len(res)
if found.String not in [r[0] for r in aReportSyntex] and len(res) == 1 :
text=found.getText()
oInputList = doc.createInstance("com.sun.star.text.TextField.DropDown")
if reg[1]<>"Expression":
oInputList.Items=(u""+found.String,u""+found.String)
else:
oInputList.Items=(u"?",u""+found.String)
aReportSyntex.append([oInputList,reg[1]])
text.insertTextContent(found,oInputList,False)
found.String =""
else:
aRes.append([res,reg[1]])
found = doc.findNext(found.End, search)
search = doc.createSearchDescriptor()
search.SearchRegularExpression = False
for res in aRes:
for r in res[0]:
search.SearchString=r
found=doc.findFirst(search)
while found:
text=found.getText()
oInputList = doc.createInstance("com.sun.star.text.TextField.DropDown")
if res[1]<>"Expression":
oInputList.Items=(u""+found.String,u""+found.String)
else:
oInputList.Items=(u"?",u""+found.String)
aReportSyntex.append([oInputList,res[1]])
text.insertTextContent(found,oInputList,False)
found.String =""
found = doc.findNext(found.End, search)
except:
import traceback,sys
info = reduce(lambda x, y: x+y, traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
self.logobj.log_write('ConvertBraceToField', LOG_ERROR, info)
if __name__<>"package":
ConvertBracesToField(None)
else:
g_ImplementationHelper.addImplementation( ConvertBracesToField, "org.openoffice.openerp.report.convertBF", ("com.sun.star.task.Job",),)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
Belxjander/Kirito
|
Python-3.5.0-main/Lib/json/encoder.py
|
8
|
15978
|
"""Implementation of JSONEncoder
"""
import re
try:
from _json import encode_basestring_ascii as c_encode_basestring_ascii
except ImportError:
c_encode_basestring_ascii = None
try:
from _json import encode_basestring as c_encode_basestring
except ImportError:
c_encode_basestring = None
try:
from _json import make_encoder as c_make_encoder
except ImportError:
c_make_encoder = None
ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]')
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
HAS_UTF8 = re.compile(b'[\x80-\xff]')
ESCAPE_DCT = {
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
for i in range(0x20):
ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i))
#ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
INFINITY = float('inf')
FLOAT_REPR = repr
def py_encode_basestring(s):
"""Return a JSON representation of a Python string
"""
def replace(match):
return ESCAPE_DCT[match.group(0)]
return '"' + ESCAPE.sub(replace, s) + '"'
encode_basestring = (c_encode_basestring or py_encode_basestring)
def py_encode_basestring_ascii(s):
"""Return an ASCII-only JSON representation of a Python string
"""
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
return '\\u{0:04x}'.format(n)
#return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
return '\\u{0:04x}\\u{1:04x}'.format(s1, s2)
return '"' + ESCAPE_ASCII.sub(replace, s) + '"'
encode_basestring_ascii = (
c_encode_basestring_ascii or py_encode_basestring_ascii)
class JSONEncoder(object):
"""Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str | string |
+-------------------+---------------+
| int, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
``.default()`` method with another method that returns a serializable
object for ``o`` if possible, otherwise it should call the superclass
implementation (to raise ``TypeError``).
"""
item_separator = ', '
key_separator = ': '
def __init__(self, skipkeys=False, ensure_ascii=True,
check_circular=True, allow_nan=True, sort_keys=False,
indent=None, separators=None, default=None):
"""Constructor for JSONEncoder, with sensible defaults.
If skipkeys is false, then it is a TypeError to attempt
encoding of keys that are not str, int, float or None. If
skipkeys is True, such items are simply skipped.
If ensure_ascii is true, the output is guaranteed to be str
objects with all incoming non-ASCII characters escaped. If
ensure_ascii is false, the output can contain non-ASCII characters.
If check_circular is true, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is true, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is true, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If indent is a non-negative integer, then JSON array
elements and object members will be pretty-printed with that
indent level. An indent level of 0 will only insert newlines.
None is the most compact representation.
If specified, separators should be an (item_separator, key_separator)
tuple. The default is (', ', ': ') if *indent* is ``None`` and
(',', ': ') otherwise. To get the most compact JSON representation,
you should specify (',', ':') to eliminate whitespace.
If specified, default is a function that gets called for objects
that can't otherwise be serialized. It should return a JSON encodable
version of the object or raise a ``TypeError``.
"""
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
self.indent = indent
if separators is not None:
self.item_separator, self.key_separator = separators
elif indent is not None:
self.item_separator = ','
if default is not None:
self.default = default
def default(self, o):
"""Implement this method in a subclass such that it returns
a serializable object for ``o``, or calls the base implementation
(to raise a ``TypeError``).
For example, to support arbitrary iterators, you could
implement default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
# Let the base class default method raise the TypeError
return JSONEncoder.default(self, o)
"""
raise TypeError(repr(o) + " is not JSON serializable")
def encode(self, o):
"""Return a JSON string representation of a Python data structure.
>>> from json.encoder import JSONEncoder
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo": ["bar", "baz"]}'
"""
# This is for extremely simple cases and benchmarks.
if isinstance(o, str):
if self.ensure_ascii:
return encode_basestring_ascii(o)
else:
return encode_basestring(o)
# This doesn't pass the iterator directly to ''.join() because the
# exceptions aren't as detailed. The list call should be roughly
# equivalent to the PySequence_Fast that ''.join() would do.
chunks = self.iterencode(o, _one_shot=True)
if not isinstance(chunks, (list, tuple)):
chunks = list(chunks)
return ''.join(chunks)
def iterencode(self, o, _one_shot=False):
"""Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
if self.ensure_ascii:
_encoder = encode_basestring_ascii
else:
_encoder = encode_basestring
def floatstr(o, allow_nan=self.allow_nan,
_repr=FLOAT_REPR, _inf=INFINITY, _neginf=-INFINITY):
# Check for specials. Note that this type of test is processor
# and/or platform-specific, so do tests which don't depend on the
# internals.
if o != o:
text = 'NaN'
elif o == _inf:
text = 'Infinity'
elif o == _neginf:
text = '-Infinity'
else:
return _repr(o)
if not allow_nan:
raise ValueError(
"Out of range float values are not JSON compliant: " +
repr(o))
return text
if (_one_shot and c_make_encoder is not None
and self.indent is None):
_iterencode = c_make_encoder(
markers, self.default, _encoder, self.indent,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, self.allow_nan)
else:
_iterencode = _make_iterencode(
markers, self.default, _encoder, self.indent, floatstr,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, _one_shot)
return _iterencode(o, 0)
def _make_iterencode(markers, _default, _encoder, _indent, _floatstr,
_key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot,
## HACK: hand-optimized bytecode; turn globals into locals
ValueError=ValueError,
dict=dict,
float=float,
id=id,
int=int,
isinstance=isinstance,
list=list,
str=str,
tuple=tuple,
):
if _indent is not None and not isinstance(_indent, str):
_indent = ' ' * _indent
def _iterencode_list(lst, _current_indent_level):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
buf = '['
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + _indent * _current_indent_level
separator = _item_separator + newline_indent
buf += newline_indent
else:
newline_indent = None
separator = _item_separator
first = True
for value in lst:
if first:
first = False
else:
buf = separator
if isinstance(value, str):
yield buf + _encoder(value)
elif value is None:
yield buf + 'null'
elif value is True:
yield buf + 'true'
elif value is False:
yield buf + 'false'
elif isinstance(value, int):
# Subclasses of int/float may override __str__, but we still
# want to encode them as integers/floats in JSON. One example
# within the standard library is IntEnum.
yield buf + str(int(value))
elif isinstance(value, float):
# see comment above for int
yield buf + _floatstr(float(value))
else:
yield buf
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
yield from chunks
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + _indent * _current_indent_level
yield ']'
if markers is not None:
del markers[markerid]
def _iterencode_dict(dct, _current_indent_level):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + _indent * _current_indent_level
item_separator = _item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = _item_separator
first = True
if _sort_keys:
items = sorted(dct.items(), key=lambda kv: kv[0])
else:
items = dct.items()
for key, value in items:
if isinstance(key, str):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
# see comment for int/float in _make_iterencode
key = _floatstr(float(key))
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif isinstance(key, int):
# see comment for int/float in _make_iterencode
key = str(int(key))
elif _skipkeys:
continue
else:
raise TypeError("key " + repr(key) + " is not a string")
if first:
first = False
else:
yield item_separator
yield _encoder(key)
yield _key_separator
if isinstance(value, str):
yield _encoder(value)
elif value is None:
yield 'null'
elif value is True:
yield 'true'
elif value is False:
yield 'false'
elif isinstance(value, int):
# see comment for int/float in _make_iterencode
yield str(int(value))
elif isinstance(value, float):
# see comment for int/float in _make_iterencode
yield _floatstr(float(value))
else:
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
yield from chunks
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + _indent * _current_indent_level
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(o, _current_indent_level):
if isinstance(o, str):
yield _encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, int):
# see comment for int/float in _make_iterencode
yield str(int(o))
elif isinstance(o, float):
# see comment for int/float in _make_iterencode
yield _floatstr(float(o))
elif isinstance(o, (list, tuple)):
yield from _iterencode_list(o, _current_indent_level)
elif isinstance(o, dict):
yield from _iterencode_dict(o, _current_indent_level)
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
o = _default(o)
yield from _iterencode(o, _current_indent_level)
if markers is not None:
del markers[markerid]
return _iterencode
|
gpl-3.0
|
xcgd/account_voucher_sepa
|
account_sepa_purpose.py
|
1
|
1468
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2015 XCG Consulting
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
from openerp.tools.translate import _
class account_sepa_purpose(osv.Model):
"""Represents the payment category purpose code for SEPA payments specified
by ISO 20022.
"""
_name = 'account.sepa.purpose'
_columns = {
'code': fields.char(
size=8,
string='Code',
required=True,
),
'name': fields.char(
size=64,
string='Name',
required=True,
),
}
|
agpl-3.0
|
minhphung171093/GreenERP
|
openerp/addons/base/tests/test_translate.py
|
10
|
6065
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import unittest
from openerp.tools.translate import quote, unquote, xml_translate
class TranslationToolsTestCase(unittest.TestCase):
def test_quote_unquote(self):
def test_string(str):
quoted = quote(str)
#print "\n1:", repr(str)
#print "2:", repr(quoted)
unquoted = unquote("".join(quoted.split('"\n"')))
#print "3:", repr(unquoted)
self.assertEquals(str, unquoted)
test_string("""test \nall kinds\n \n o\r
\\\\ nope\n\n"
""")
# The ones with 1+ backslashes directly followed by
# a newline or literal N can fail... we would need a
# state-machine parser to handle these, but this would
# be much slower so it's better to avoid them at the moment
self.assertRaises(AssertionError, quote, """test \nall kinds\n\no\r
\\\\nope\n\n"
""")
def test_translate_xml_base(self):
""" Test xml_translate() without formatting elements. """
terms = []
source = """<form string="Form stuff">
<h1>Blah blah blah</h1>
Put some more text here
<field name="foo"/>
</form>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['Form stuff', 'Blah blah blah', 'Put some more text here'])
def test_translate_xml_inline1(self):
""" Test xml_translate() with formatting elements. """
terms = []
source = """<form string="Form stuff">
<h1>Blah <i>blah</i> blah</h1>
Put some <b>more text</b> here
<field name="foo"/>
</form>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['Form stuff', 'Blah <i>blah</i> blah', 'Put some <b>more text</b> here'])
def test_translate_xml_inline2(self):
""" Test xml_translate() with formatting elements embedding other elements. """
terms = []
source = """<form string="Form stuff">
<b><h1>Blah <i>blah</i> blah</h1></b>
Put <em>some <b>more text</b></em> here
<field name="foo"/>
</form>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['Form stuff', 'Blah <i>blah</i> blah', 'Put <em>some <b>more text</b></em> here'])
def test_translate_xml_inline3(self):
""" Test xml_translate() with formatting elements without actual text. """
terms = []
source = """<form string="Form stuff">
<div>
<span class="before"/>
<h1>Blah blah blah</h1>
<span class="after">
<i class="hack"/>
</span>
</div>
</form>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['Form stuff', 'Blah blah blah'])
def test_translate_xml_t(self):
""" Test xml_translate() with t-* attributes. """
terms = []
source = """<t t-name="stuff">
stuff before
<span t-field="o.name"/>
stuff after
</t>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['stuff before', 'stuff after'])
def test_translate_xml_off(self):
""" Test xml_translate() with attribute translate="off". """
terms = []
source = """<div>
stuff before
<div translation="off">Do not translate this</div>
stuff after
</div>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['stuff before', 'stuff after'])
def test_translate_xml_attribute(self):
""" Test xml_translate() with <attribute> elements. """
terms = []
source = """<field name="foo" position="attributes">
<attribute name="string">Translate this</attribute>
<attribute name="option">Do not translate this</attribute>
</field>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['Translate this'])
def test_translate_xml_a(self):
""" Test xml_translate() with <a> elements. """
terms = []
source = """<t t-name="stuff">
<ul class="nav navbar-nav">
<li>
<a class="oe_menu_leaf" href="/web#menu_id=42&action=54">
<span class="oe_menu_text">Blah</span>
</a>
</li>
<li class="dropdown" id="menu_more_container" style="display: none;">
<a class="dropdown-toggle" data-toggle="dropdown" href="#">More <b class="caret"/></a>
<ul class="dropdown-menu" id="menu_more"/>
</li>
</ul>
</t>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['<span class="oe_menu_text">Blah</span>', 'More <b class="caret"/>'])
|
gpl-3.0
|
maartenq/ansible
|
lib/ansible/plugins/action/ironware.py
|
21
|
4380
|
#
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import copy
from ansible import constants as C
from ansible.module_utils._text import to_text
from ansible.module_utils.connection import Connection, ConnectionError
from ansible.plugins.action.normal import ActionModule as _ActionModule
from ansible.module_utils.network.common.utils import load_provider
from ansible.module_utils.network.ironware.ironware import ironware_provider_spec
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
del tmp # tmp no longer has any effect
socket_path = None
if self._play_context.connection == 'network_cli':
provider = self._task.args.get('provider', {})
if any(provider.values()):
display.warning('provider is unnecessary when using network_cli and will be ignored')
del self._task.args['provider']
elif self._play_context.connection == 'local':
provider = load_provider(ironware_provider_spec, self._task.args)
pc = copy.deepcopy(self._play_context)
pc.connection = 'network_cli'
pc.network_os = 'ironware'
pc.remote_addr = provider['host'] or self._play_context.remote_addr
pc.port = int(provider['port'] or self._play_context.port or 22)
pc.remote_user = provider['username'] or self._play_context.connection_user
pc.password = provider['password'] or self._play_context.password
pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file
pc.become = provider['authorize'] or False
if pc.become:
pc.become_method = 'enable'
pc.become_pass = provider['auth_pass']
display.vvv('using connection plugin %s (was local)' % pc.connection, pc.remote_addr)
connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin)
command_timeout = int(provider['timeout']) if provider['timeout'] else connection.get_option('persistent_command_timeout')
connection.set_options(direct={'persistent_command_timeout': command_timeout})
socket_path = connection.run()
display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
if not socket_path:
return {'failed': True,
'msg': 'unable to open shell. Please see: ' +
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}
task_vars['ansible_socket'] = socket_path
else:
return {'failed': True, 'msg': 'Connection type %s is not valid for this module' % self._play_context.connection}
# make sure we are in the right cli context which should be
# enable mode and not config module
if socket_path is None:
socket_path = self._connection.socket_path
conn = Connection(socket_path)
try:
out = conn.get_prompt()
while to_text(out, errors='surrogate_then_replace').strip().endswith(')#'):
display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr)
conn.send_command('exit')
out = conn.get_prompt()
except ConnectionError as exc:
return {'failed': True, 'msg': to_text(exc)}
result = super(ActionModule, self).run(task_vars=task_vars)
return result
|
gpl-3.0
|
saifrahmed/bokeh
|
examples/plotting/server/markers.py
|
42
|
1618
|
# The plot server must be running
# Go to http://localhost:5006/bokeh to view this plot
from numpy.random import random
from bokeh.plotting import figure, show, output_server
def mscatter(p, x, y, typestr):
p.scatter(x, y, marker=typestr,
line_color="#6666ee", fill_color="#ee6666", fill_alpha=0.5, size=12)
def mtext(p, x, y, textstr):
p.text(x, y, text=[textstr],
text_color="#449944", text_align="center", text_font_size="10pt")
output_server("markers")
p = figure(title="markers.py example")
N = 10
mscatter(p, random(N)+2, random(N)+1, "circle")
mscatter(p, random(N)+4, random(N)+1, "square")
mscatter(p, random(N)+6, random(N)+1, "triangle")
mscatter(p, random(N)+8, random(N)+1, "asterisk")
mscatter(p, random(N)+2, random(N)+4, "circle_x")
mscatter(p, random(N)+4, random(N)+4, "square_x")
mscatter(p, random(N)+6, random(N)+4, "inverted_triangle")
mscatter(p, random(N)+8, random(N)+4, "x")
mscatter(p, random(N)+2, random(N)+7, "circle_cross")
mscatter(p, random(N)+4, random(N)+7, "square_cross")
mscatter(p, random(N)+6, random(N)+7, "diamond")
mscatter(p, random(N)+8, random(N)+7, "cross")
mtext(p, [2.5], [0.5], "circle / o")
mtext(p, [4.5], [0.5], "square")
mtext(p, [6.5], [0.5], "triangle")
mtext(p, [8.5], [0.5], "asterisk / *")
mtext(p, [2.5], [3.5], "circle_x / ox")
mtext(p, [4.5], [3.5], "square_x")
mtext(p, [6.5], [3.5], "inverted_triangle")
mtext(p, [8.5], [3.5], "x")
mtext(p, [2.5], [6.5], "circle_cross / o+")
mtext(p, [4.5], [6.5], "square_cross")
mtext(p, [6.5], [6.5], "diamond")
mtext(p, [8.5], [6.5], "cross / +")
show(p) # open a browser
|
bsd-3-clause
|
rainslytherin/ansible
|
plugins/inventory/collins.py
|
22
|
18049
|
#!/usr/bin/env python
"""
Collins external inventory script
=================================
Ansible has a feature where instead of reading from /etc/ansible/hosts
as a text file, it can query external programs to obtain the list
of hosts, groups the hosts are in, and even variables to assign to each host.
Collins is a hardware asset management system originally developed by
Tumblr for tracking new hardware as it built out its own datacenters. It
exposes a rich API for manipulating and querying one's hardware inventory,
which makes it an ideal 'single point of truth' for driving systems
automation like Ansible. Extensive documentation on Collins, including a quickstart,
API docs, and a full reference manual, can be found here:
http://tumblr.github.io/collins
This script adds support to Ansible for obtaining a dynamic inventory of
assets in your infrastructure, grouping them in Ansible by their useful attributes,
and binding all facts provided by Collins to each host so that they can be used to
drive automation. Some parts of this script were cribbed shamelessly from mdehaan's
Cobbler inventory script.
To use it, copy it to your repo and pass -i <collins script> to the ansible or
ansible-playbook command; if you'd like to use it by default, simply copy collins.ini
to /etc/ansible and this script to /etc/ansible/hosts.
Alongside the options set in collins.ini, there are several environment variables
that will be used instead of the configured values if they are set:
- COLLINS_USERNAME - specifies a username to use for Collins authentication
- COLLINS_PASSWORD - specifies a password to use for Collins authentication
- COLLINS_ASSET_TYPE - specifies a Collins asset type to use during querying;
this can be used to run Ansible automation against different asset classes than
server nodes, such as network switches and PDUs
- COLLINS_CONFIG - specifies an alternative location for collins.ini, defaults to
<location of collins.py>/collins.ini
If errors are encountered during operation, this script will return an exit code of
255; otherwise, it will return an exit code of 0.
Collins attributes are accessable as variables in ansible via the COLLINS['attribute_name'].
Tested against Ansible 1.8.2 and Collins 1.3.0.
"""
# (c) 2014, Steve Salevan <steve.salevan@gmail.com>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
import argparse
import base64
import ConfigParser
import logging
import os
import re
import sys
from time import time
import traceback
import urllib
import urllib2
try:
import json
except ImportError:
import simplejson as json
class CollinsDefaults(object):
ASSETS_API_ENDPOINT = '%s/api/assets'
SPECIAL_ATTRIBUTES = set([
'CREATED',
'DELETED',
'UPDATED',
'STATE',
])
LOG_FORMAT = '%(asctime)-15s %(message)s'
class Error(Exception):
pass
class MaxRetriesError(Error):
pass
class CollinsInventory(object):
def __init__(self):
""" Constructs CollinsInventory object and reads all configuration. """
self.inventory = dict() # A list of groups and the hosts in that group
self.cache = dict() # Details about hosts in the inventory
# Read settings and parse CLI arguments
self.read_settings()
self.parse_cli_args()
logging.basicConfig(format=CollinsDefaults.LOG_FORMAT,
filename=self.log_location)
self.log = logging.getLogger('CollinsInventory')
def _asset_get_attribute(self, asset, attrib):
""" Returns a user-defined attribute from an asset if it exists; otherwise,
returns None. """
if 'ATTRIBS' in asset:
for attrib_block in asset['ATTRIBS'].keys():
if attrib in asset['ATTRIBS'][attrib_block]:
return asset['ATTRIBS'][attrib_block][attrib]
return None
def _asset_has_attribute(self, asset, attrib):
""" Returns whether a user-defined attribute is present on an asset. """
if 'ATTRIBS' in asset:
for attrib_block in asset['ATTRIBS'].keys():
if attrib in asset['ATTRIBS'][attrib_block]:
return True
return False
def run(self):
""" Main execution path """
# Updates cache if cache is not present or has expired.
successful = True
if self.args.refresh_cache:
successful = self.update_cache()
elif not self.is_cache_valid():
successful = self.update_cache()
else:
successful = self.load_inventory_from_cache()
successful &= self.load_cache_from_cache()
data_to_print = ""
# Data to print
if self.args.host:
data_to_print = self.get_host_info()
elif self.args.list:
# Display list of instances for inventory
data_to_print = self.json_format_dict(self.inventory, self.args.pretty)
else: # default action with no options
data_to_print = self.json_format_dict(self.inventory, self.args.pretty)
print data_to_print
return successful
def find_assets(self, attributes = {}, operation = 'AND'):
""" Obtains Collins assets matching the provided attributes. """
# Formats asset search query to locate assets matching attributes, using
# the CQL search feature as described here:
# http://tumblr.github.io/collins/recipes.html
attributes_query = [ '='.join(attr_pair)
for attr_pair in attributes.iteritems() ]
query_parameters = {
'details': ['True'],
'operation': [operation],
'query': attributes_query,
'remoteLookup': [str(self.query_remote_dcs)],
'size': [self.results_per_query],
'type': [self.collins_asset_type],
}
assets = []
cur_page = 0
num_retries = 0
# Locates all assets matching the provided query, exhausting pagination.
while True:
if num_retries == self.collins_max_retries:
raise MaxRetriesError("Maximum of %s retries reached; giving up" % \
self.collins_max_retries)
query_parameters['page'] = cur_page
query_url = "%s?%s" % (
(CollinsDefaults.ASSETS_API_ENDPOINT % self.collins_host),
urllib.urlencode(query_parameters, doseq=True)
)
request = urllib2.Request(query_url)
request.add_header('Authorization', self.basic_auth_header)
try:
response = urllib2.urlopen(request, timeout=self.collins_timeout_secs)
json_response = json.loads(response.read())
# Adds any assets found to the array of assets.
assets += json_response['data']['Data']
# If we've retrieved all of our assets, breaks out of the loop.
if len(json_response['data']['Data']) == 0:
break
cur_page += 1
num_retries = 0
except:
self.log.error("Error while communicating with Collins, retrying:\n%s",
traceback.format_exc())
num_retries += 1
return assets
def is_cache_valid(self):
""" Determines if the cache files have expired, or if it is still valid """
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
if os.path.isfile(self.cache_path_inventory):
return True
return False
def read_settings(self):
""" Reads the settings from the collins.ini file """
config_loc = os.getenv('COLLINS_CONFIG',
os.path.dirname(os.path.realpath(__file__)) + '/collins.ini')
config = ConfigParser.SafeConfigParser()
config.read(os.path.dirname(os.path.realpath(__file__)) + '/collins.ini')
self.collins_host = config.get('collins', 'host')
self.collins_username = os.getenv('COLLINS_USERNAME',
config.get('collins', 'username'))
self.collins_password = os.getenv('COLLINS_PASSWORD',
config.get('collins', 'password'))
self.collins_asset_type = os.getenv('COLLINS_ASSET_TYPE',
config.get('collins', 'asset_type'))
self.collins_timeout_secs = config.getint('collins', 'timeout_secs')
self.collins_max_retries = config.getint('collins', 'max_retries')
self.results_per_query = config.getint('collins', 'results_per_query')
self.ip_address_index = config.getint('collins', 'ip_address_index')
self.query_remote_dcs = config.getboolean('collins', 'query_remote_dcs')
self.prefer_hostnames = config.getboolean('collins', 'prefer_hostnames')
cache_path = config.get('collins', 'cache_path')
self.cache_path_cache = cache_path + \
'/ansible-collins-%s.cache' % self.collins_asset_type
self.cache_path_inventory = cache_path + \
'/ansible-collins-%s.index' % self.collins_asset_type
self.cache_max_age = config.getint('collins', 'cache_max_age')
log_path = config.get('collins', 'log_path')
self.log_location = log_path + '/ansible-collins.log'
self.basic_auth_header = "Basic %s" % base64.encodestring(
'%s:%s' % (self.collins_username, self.collins_password))[:-1]
def parse_cli_args(self):
""" Command line argument processing """
parser = argparse.ArgumentParser(
description='Produces an Ansible Inventory file based on Collins')
parser.add_argument('--list',
action='store_true', default=True, help='List instances (default: True)')
parser.add_argument('--host',
action='store', help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache',
action='store_true', default=False,
help='Force refresh of cache by making API requests to Collins ' \
'(default: False - use cache files)')
parser.add_argument('--pretty',
action='store_true', default=False, help='Pretty print all JSON output')
self.args = parser.parse_args()
def update_cache(self):
""" Make calls to Collins and saves the output in a cache """
self.cache = dict()
self.inventory = dict()
# Locates all server assets from Collins.
try:
server_assets = self.find_assets()
except:
self.log.error("Error while locating assets from Collins:\n%s",
traceback.format_exc())
return False
for asset in server_assets:
# Determines the index to retrieve the asset's IP address either by an
# attribute set on the Collins asset or the pre-configured value.
if self._asset_has_attribute(asset, 'ANSIBLE_IP_INDEX'):
ip_index = self._asset_get_attribute(asset, 'ANSIBLE_IP_INDEX')
try:
ip_index = int(ip_index)
except:
self.log.error(
"ANSIBLE_IP_INDEX attribute on asset %s not an integer: %s", asset,
ip_index)
else:
ip_index = self.ip_address_index
asset['COLLINS'] = {}
# Attempts to locate the asset's primary identifier (hostname or IP address),
# which will be used to index the asset throughout the Ansible inventory.
if self.prefer_hostnames and self._asset_has_attribute(asset, 'HOSTNAME'):
asset_identifier = self._asset_get_attribute(asset, 'HOSTNAME')
elif 'ADDRESSES' not in asset:
self.log.warning("No IP addresses found for asset '%s', skipping",
asset)
continue
elif len(asset['ADDRESSES']) < ip_index + 1:
self.log.warning(
"No IP address found at index %s for asset '%s', skipping",
ip_index, asset)
continue
else:
asset_identifier = asset['ADDRESSES'][ip_index]['ADDRESS']
# Adds an asset index to the Ansible inventory based upon unpacking
# the name of the asset's current STATE from its dictionary.
if 'STATE' in asset['ASSET'] and asset['ASSET']['STATE']:
state_inventory_key = self.to_safe(
'STATE-%s' % asset['ASSET']['STATE']['NAME'])
self.push(self.inventory, state_inventory_key, asset_identifier)
# Indexes asset by all user-defined Collins attributes.
if 'ATTRIBS' in asset:
for attrib_block in asset['ATTRIBS'].keys():
for attrib in asset['ATTRIBS'][attrib_block].keys():
asset['COLLINS'][attrib] = asset['ATTRIBS'][attrib_block][attrib]
attrib_key = self.to_safe('%s-%s' % (attrib, asset['ATTRIBS'][attrib_block][attrib]))
self.push(self.inventory, attrib_key, asset_identifier)
# Indexes asset by all built-in Collins attributes.
for attribute in asset['ASSET'].keys():
if attribute not in CollinsDefaults.SPECIAL_ATTRIBUTES:
attribute_val = asset['ASSET'][attribute]
if attribute_val is not None:
attrib_key = self.to_safe('%s-%s' % (attribute, attribute_val))
self.push(self.inventory, attrib_key, asset_identifier)
# Indexes asset by hardware product information.
if 'HARDWARE' in asset:
if 'PRODUCT' in asset['HARDWARE']['BASE']:
product = asset['HARDWARE']['BASE']['PRODUCT']
if product:
product_key = self.to_safe(
'HARDWARE-PRODUCT-%s' % asset['HARDWARE']['BASE']['PRODUCT'])
self.push(self.inventory, product_key, asset_identifier)
# Indexing now complete, adds the host details to the asset cache.
self.cache[asset_identifier] = asset
try:
self.write_to_cache(self.cache, self.cache_path_cache)
self.write_to_cache(self.inventory, self.cache_path_inventory)
except:
self.log.error("Error while writing to cache:\n%s", traceback.format_exc())
return False
return True
def push(self, dictionary, key, value):
""" Adds a value to a list at a dictionary key, creating the list if it doesn't
exist. """
if key not in dictionary:
dictionary[key] = []
dictionary[key].append(value)
def get_host_info(self):
""" Get variables about a specific host. """
if not self.cache or len(self.cache) == 0:
# Need to load index from cache
self.load_cache_from_cache()
if not self.args.host in self.cache:
# try updating the cache
self.update_cache()
if not self.args.host in self.cache:
# host might not exist anymore
return self.json_format_dict({}, self.args.pretty)
return self.json_format_dict(self.cache[self.args.host], self.args.pretty)
def load_inventory_from_cache(self):
""" Reads the index from the cache file sets self.index """
try:
cache = open(self.cache_path_inventory, 'r')
json_inventory = cache.read()
self.inventory = json.loads(json_inventory)
return True
except:
self.log.error("Error while loading inventory:\n%s",
traceback.format_exc())
self.inventory = {}
return False
def load_cache_from_cache(self):
""" Reads the cache from the cache file sets self.cache """
try:
cache = open(self.cache_path_cache, 'r')
json_cache = cache.read()
self.cache = json.loads(json_cache)
return True
except:
self.log.error("Error while loading host cache:\n%s",
traceback.format_exc())
self.cache = {}
return False
def write_to_cache(self, data, filename):
""" Writes data in JSON format to a specified file. """
json_data = self.json_format_dict(data, self.args.pretty)
cache = open(filename, 'w')
cache.write(json_data)
cache.close()
def to_safe(self, word):
""" Converts 'bad' characters in a string to underscores so they
can be used as Ansible groups """
return re.sub("[^A-Za-z0-9\-]", "_", word)
def json_format_dict(self, data, pretty=False):
""" Converts a dict to a JSON object and dumps it as a formatted string """
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
if __name__ in '__main__':
inventory = CollinsInventory()
if inventory.run():
sys.exit(0)
else:
sys.exit(-1)
|
gpl-3.0
|
f-prettyland/angr
|
tests/test_director.py
|
5
|
2232
|
import os
import sys
import logging
import nose.tools
import angr
from angr.sim_type import SimTypePointer, SimTypeChar
test_location = str(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'binaries', 'tests'))
def test_execute_address_brancher():
p = angr.Project(os.path.join(test_location, 'x86_64', 'brancher'), load_options={'auto_load_libs': False})
pg = p.factory.simgr()
# initialize the exploration technique
dm = angr.exploration_techniques.Director(num_fallback_states=1)
goal = angr.exploration_techniques.ExecuteAddressGoal(0x400594)
dm.add_goal(goal)
pg.use_technique(dm)
pg.explore(find=(0x4005b4,))
nose.tools.assert_greater(len(pg.deprioritized), 0)
def test_call_function_brancher():
class NonLocal(object):
the_state = None
the_goal = None
def goal_reached_callback(goal, p, pg): # pylint:disable=unused-argument
NonLocal.the_state = p
NonLocal.the_goal = goal
p = angr.Project(os.path.join(test_location, 'x86_64', 'brancher'), load_options={'auto_load_libs': False})
pg = p.factory.simgr()
# initialize the exploration technique
dm = angr.exploration_techniques.Director(cfg_keep_states=True, goal_satisfied_callback=goal_reached_callback,
num_fallback_states=1
)
_ = p.analyses.CFG()
puts_func = p.kb.functions.function(name='puts')
goal = angr.exploration_techniques.CallFunctionGoal(puts_func, [(SimTypePointer(SimTypeChar()), ">=20")])
dm.add_goal(goal)
pg.use_technique(dm)
pg.explore(find=(0x40059e,))
nose.tools.assert_greater(len(pg.deprioritized), 0)
nose.tools.assert_greater(len(pg.found), 0)
nose.tools.assert_is_not(NonLocal.the_state, None)
nose.tools.assert_is(NonLocal.the_goal, goal)
if __name__ == "__main__":
logging.getLogger('angr.exploration_techniques.director').setLevel(logging.DEBUG)
if len(sys.argv) > 1:
globals()['test_' + sys.argv[1]]()
else:
g = globals().copy()
for k, v in g.iteritems():
if k.startswith("test_") and hasattr(v, '__call__'):
v()
|
bsd-2-clause
|
chengdh/openerp-ktv
|
openerp/addons/base_module_doc_rst/wizard/tech_rst_guide.py
|
9
|
17321
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import osv, fields
import netsvc
import base64
import tempfile
import tarfile
import httplib
import os
class RstDoc(object):
def __init__(self, module, objects):
self.dico = {
'name': module.name,
'shortdesc': module.shortdesc,
'latest_version': module.latest_version,
'website': module.website,
'description': self._handle_text(module.description.strip() or 'None'),
'report_list': self._handle_list_items(module.reports_by_module),
'menu_list': self._handle_list_items(module.menus_by_module),
'view_list': self._handle_list_items(module.views_by_module),
'depends': module.dependencies_id,
'quality_certified': bool(module.certificate) and 'yes' or 'no',
'official_module': str(module.certificate)[:2] == '00' and 'yes' or 'no',
'author': module.author,
'quality_certified_label': self._quality_certified_label(module),
}
self.objects = objects
self.module = module
def _quality_certified_label(self, module):
label = ""
certificate = module.certificate
if certificate and len(certificate) > 1:
if certificate[:2] == '00':
# addons
label = "(Official, Quality Certified)"
elif certificate[:2] == '01':
# extra addons
label = "(Quality Certified)"
return label
def _handle_list_items(self, list_item_as_string):
list_item_as_string = list_item_as_string.strip()
if list_item_as_string:
return [item.replace('*', '\*') for item in list_item_as_string.split('\n')]
else:
return []
def _handle_text(self, txt):
lst = [' %s' % line for line in txt.split('\n')]
return '\n'.join(lst)
def _get_download_links(self):
def _is_connection_status_good(link):
server = "openerp.com"
status_good = False
try:
conn = httplib.HTTPConnection(server)
conn.request("HEAD", link)
res = conn.getresponse()
if res.status in (200, ):
status_good = True
except (Exception, ), e:
logger = netsvc.Logger()
msg = "error connecting to server '%s' with link '%s'. Error message: %s" % (server, link, str(e))
logger.notifyChannel("base_module_doc_rst", netsvc.LOG_ERROR, msg)
status_good = False
return status_good
versions = ('4.2', '5.0', 'trunk')
download_links = []
for ver in versions:
link = 'http://www.openerp.com/download/modules/%s/%s.zip' % (ver, self.dico['name'])
if _is_connection_status_good(link):
download_links.append(" * `%s <%s>`_" % (ver, link))
if download_links:
res = '\n'.join(download_links)
else:
res = "(No download links available)"
return res
def _write_header(self):
dico = self.dico
title = "%s (*%s*)" % (dico['shortdesc'], dico['name'])
title_underline = "=" * len(title)
dico['title'] = title
dico['title_underline'] = title_underline
dico['download_links'] = self._get_download_links()
sl = [
"",
".. module:: %(name)s",
" :synopsis: %(shortdesc)s %(quality_certified_label)s",
" :noindex:",
".. ",
"",
".. raw:: html",
"",
" <br />",
""" <link rel="stylesheet" href="../_static/hide_objects_in_sidebar.css" type="text/css" />""",
"",
""".. tip:: This module is part of the OpenERP software, the leading Open Source """,
""" enterprise management system. If you want to discover OpenERP, check our """,
""" `screencasts <http://openerp.tv>`_ or download """,
""" `OpenERP <http://openerp.com>`_ directly.""",
"",
".. raw:: html",
"",
""" <div class="js-kit-rating" title="" permalink="" standalone="yes" path="/%s"></div>""" % (dico['name'], ),
""" <script src="http://js-kit.com/ratings.js"></script>""",
"",
"%(title)s",
"%(title_underline)s",
":Module: %(name)s",
":Name: %(shortdesc)s",
":Version: %(latest_version)s",
":Author: %(author)s",
":Directory: %(name)s",
":Web: %(website)s",
":Official module: %(official_module)s",
":Quality certified: %(quality_certified)s",
"",
"Description",
"-----------",
"",
"::",
"",
"%(description)s",
"",
"Download links",
"--------------",
"",
"You can download this module as a zip file in the following version:",
"",
"%(download_links)s",
"",
""]
return '\n'.join(sl) % (dico)
def _write_reports(self):
sl = ["",
"Reports",
"-------"]
reports = self.dico['report_list']
if reports:
for report in reports:
if report:
sl.append("")
sl.append(" * %s" % report)
else:
sl.extend(["", "None", ""])
sl.append("")
return '\n'.join(sl)
def _write_menus(self):
sl = ["",
"Menus",
"-------",
""]
menus = self.dico['menu_list']
if menus:
for menu in menus:
if menu:
sl.append(" * %s" % menu)
else:
sl.extend(["", "None", ""])
sl.append("")
return '\n'.join(sl)
def _write_views(self):
sl = ["",
"Views",
"-----",
""]
views = self.dico['view_list']
if views:
for view in views:
if view:
sl.append(" * %s" % view)
else:
sl.extend(["", "None", ""])
sl.append("")
return '\n'.join(sl)
def _write_depends(self):
sl = ["",
"Dependencies",
"------------",
""]
depends = self.dico['depends']
if depends:
for dependency in depends:
sl.append(" * :mod:`%s`" % (dependency.name))
else:
sl.extend(["", "None", ""])
sl.append("")
return '\n'.join(sl)
def _write_objects(self):
def write_field(field_def):
if not isinstance(field_def, tuple):
logger = netsvc.Logger()
msg = "Error on Object %s: field_def: %s [type: %s]" % (obj_name.encode('utf8'), field_def.encode('utf8'), type(field_def))
logger.notifyChannel("base_module_doc_rst", netsvc.LOG_ERROR, msg)
return ""
field_name = field_def[0]
field_dict = field_def[1]
field_required = field_dict.get('required', '') and ', required'
field_readonly = field_dict.get('readonly', '') and ', readonly'
field_help_s = field_dict.get('help', '')
if field_help_s:
field_help_s = "*%s*" % (field_help_s)
field_help = '\n'.join([' %s' % line.strip() for line in field_help_s.split('\n')])
else:
field_help = ''
sl = ["",
":%s: %s, %s%s%s" % (field_name, field_dict.get('string', 'Unknown'), field_dict['type'], field_required, field_readonly),
"",
field_help,
]
return '\n'.join(sl)
sl = ["",
"",
"Objects",
"-------"]
if self.objects:
for obj in self.objects:
obj_name = obj['object'].name
obj_model = obj['object'].model
title = "Object: %s (%s)" % (obj_name, obj_model)
slo = [
"",
title,
'#' * len(title),
"",
]
for field in obj['fields']:
slf = [
"",
write_field(field),
"",
]
slo.extend(slf)
sl.extend(slo)
else:
sl.extend(["", "None", ""])
return u'\n'.join([a.decode('utf8') for a in sl])
def _write_relationship_graph(self, module_name=False):
sl = ["",
"Relationship Graph",
"------------------",
"",
".. figure:: %s_module.png" % (module_name, ),
" :scale: 50",
" :align: center",
""]
sl.append("")
return '\n'.join(sl)
def write(self, module_name=False):
s = ''
s += self._write_header()
s += self._write_depends()
s += self._write_reports()
s += self._write_menus()
s += self._write_views()
s += self._write_objects()
if module_name:
s += self._write_relationship_graph(module_name)
return s
class wizard_tech_guide_rst(osv.osv_memory):
_name = "tech.guide.rst"
_columns = {
'rst_file': fields.binary('File', required=True, readonly=True),
}
def _generate(self, cr, uid, context):
module_model = self.pool.get('ir.module.module')
module_ids = context['active_ids']
module_index = []
# create a temporary gzipped tarfile:
tgz_tmp_filename = tempfile.mktemp('_rst_module_doc.tgz')
try:
tarf = tarfile.open(tgz_tmp_filename, 'w:gz')
modules = module_model.browse(cr, uid, module_ids)
for module in modules:
index_dict = {
'name': module.name,
'shortdesc': module.shortdesc,
}
module_index.append(index_dict)
objects = self._get_objects(cr, uid, module)
module.test_views = self._get_views(cr, uid, module.id, context=context)
rstdoc = RstDoc(module, objects)
# Append Relationship Graph on rst
graph_mod = False
module_name = False
if module.file_graph:
graph_mod = base64.decodestring(module.file_graph)
else:
module_data = module_model.get_relation_graph(cr, uid, module.name, context=context)
if module_data['module_file']:
graph_mod = base64.decodestring(module_data['module_file'])
if graph_mod:
module_name = module.name
try:
tmpdir = tempfile.mkdtemp()
tmp_file_graph = tempfile.NamedTemporaryFile()
tmp_file_graph.write(graph_mod)
tmp_file_graph.file.flush()
tarf.add(tmp_file_graph.name, arcname= module.name + '_module.png')
finally:
tmp_file_graph.close()
out = rstdoc.write(module_name)
try:
tmp_file = tempfile.NamedTemporaryFile()
tmp_file.write(out.encode('utf8'))
tmp_file.file.flush() # write content to file
tarf.add(tmp_file.name, arcname=module.name + '.rst')
finally:
tmp_file.close()
# write index file:
tmp_file = tempfile.NamedTemporaryFile()
out = self._create_index(module_index)
tmp_file.write(out.encode('utf8'))
tmp_file.file.flush()
tarf.add(tmp_file.name, arcname='index.rst')
finally:
tarf.close()
f = open(tgz_tmp_filename, 'rb')
out = f.read()
f.close()
if os.path.exists(tgz_tmp_filename):
try:
os.unlink(tgz_tmp_filename)
except Exception, e:
logger = netsvc.Logger()
msg = "Temporary file %s could not be deleted. (%s)" % (tgz_tmp_filename, e)
logger.notifyChannel("warning", netsvc.LOG_WARNING, msg)
return base64.encodestring(out)
def _get_views(self, cr, uid, module_id, context=None):
module_module_obj = self.pool.get('ir.module.module')
model_data_obj = self.pool.get('ir.model.data')
view_obj = self.pool.get('ir.ui.view')
report_obj = self.pool.get('ir.actions.report.xml')
menu_obj = self.pool.get('ir.ui.menu')
res = {}
mlist = module_module_obj.browse(cr, uid, [module_id], context=context)
mnames = {}
for m in mlist:
mnames[m.name] = m.id
res[m.id] = {
'menus_by_module': [],
'reports_by_module': [],
'views_by_module': []
}
view_id = model_data_obj.search(cr, uid, [('module', 'in', mnames.keys()),
('model', 'in', ('ir.ui.view', 'ir.actions.report.xml', 'ir.ui.menu'))])
for data_id in model_data_obj.browse(cr, uid, view_id, context):
# We use try except, because views or menus may not exist
try:
key = data_id['model']
if key == 'ir.ui.view':
v = view_obj.browse(cr, uid, data_id.res_id)
v_dict = {
'name': v.name,
'inherit': v.inherit_id,
'type': v.type}
res[mnames[data_id.module]]['views_by_module'].append(v_dict)
elif key == 'ir.actions.report.xml':
res[mnames[data_id.module]]['reports_by_module'].append(report_obj.browse(cr, uid, data_id.res_id).name)
elif key == 'ir.ui.menu':
res[mnames[data_id.module]]['menus_by_module'].append(menu_obj.browse(cr, uid, data_id.res_id).complete_name)
except (KeyError, ):
pass
return res
def _create_index(self, module_index):
sl = ["",
".. _module-technical-guide-link:",
"",
"Module Technical Guide: Introspection report on objects",
"=======================================================",
"",
".. toctree::",
" :maxdepth: 1",
"",
]
for mod in module_index:
sl.append(" %s" % mod['name'])
sl.append("")
return '\n'.join(sl)
def _get_objects(self, cr, uid, module):
res = []
objects = self._object_find(cr, uid, module)
for obj in objects:
fields = self._fields_find(cr, uid, obj.model)
dico = {
'object': obj,
'fields': fields
}
res.append(dico)
return res
def _object_find(self, cr, uid, module):
ir_model_data = self.pool.get('ir.model.data')
ids2 = ir_model_data.search(cr, uid, [('module', '=', module.name), ('model', '=', 'ir.model')])
ids = []
for mod in ir_model_data.browse(cr, uid, ids2):
ids.append(mod.res_id)
return self.pool.get('ir.model').browse(cr, uid, ids)
def _fields_find(self, cr, uid, obj):
modobj = self.pool.get(obj)
if modobj:
res = modobj.fields_get(cr, uid).items()
return res
else:
logger = netsvc.Logger()
msg = "Object %s not found" % (obj)
logger.notifyChannel("base_module_doc_rst", netsvc.LOG_ERROR, msg)
return ""
_defaults = {
'rst_file': _generate,
}
wizard_tech_guide_rst()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
coinkeeper/2015-04-19_21-21_nautiluscoin
|
contrib/testgen/base58.py
|
4
|
2833
|
'''
Nautiluscoin base58 encoding and decoding.
Based on https://bitcointalk.org/index.php?topic=1026.0 (public domain)
'''
import hashlib
# for compatibility with following code...
class SHA256:
new = hashlib.sha256
if str != bytes:
# Python 3.x
def ord(c):
return c
def chr(n):
return bytes( (n,) )
__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
__b58base = len(__b58chars)
b58chars = __b58chars
def b58encode(v):
""" encode v, which is a string of bytes, to base58.
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += (256**i) * ord(c)
result = ''
while long_value >= __b58base:
div, mod = divmod(long_value, __b58base)
result = __b58chars[mod] + result
long_value = div
result = __b58chars[long_value] + result
# Nautiluscoin does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == '\0': nPad += 1
else: break
return (__b58chars[0]*nPad) + result
def b58decode(v, length = None):
""" decode v into a string of len bytes
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += __b58chars.find(c) * (__b58base**i)
result = bytes()
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == __b58chars[0]: nPad += 1
else: break
result = chr(0)*nPad + result
if length is not None and len(result) != length:
return None
return result
def checksum(v):
"""Return 32-bit checksum based on SHA256"""
return SHA256.new(SHA256.new(v).digest()).digest()[0:4]
def b58encode_chk(v):
"""b58encode a string, with 32-bit checksum"""
return b58encode(v + checksum(v))
def b58decode_chk(v):
"""decode a base58 string, check and remove checksum"""
result = b58decode(v)
if result is None:
return None
h3 = checksum(result[:-4])
if result[-4:] == checksum(result[:-4]):
return result[:-4]
else:
return None
def get_bcaddress_version(strAddress):
""" Returns None if strAddress is invalid. Otherwise returns integer version of address. """
addr = b58decode_chk(strAddress)
if addr is None or len(addr)!=21: return None
version = addr[0]
return ord(version)
if __name__ == '__main__':
# Test case (from http://gitorious.org/nautiluscoin/python-base58.git)
assert get_bcaddress_version('15VjRaDX9zpbA8LVnbrCAFzrVzN7ixHNsC') is 0
_ohai = 'o hai'.encode('ascii')
_tmp = b58encode(_ohai)
assert _tmp == 'DYB3oMS'
assert b58decode(_tmp, 5) == _ohai
print("Tests passed")
|
mit
|
immenz/pyload
|
module/plugins/hoster/FilepostCom.py
|
2
|
4945
|
# -*- coding: utf-8 -*-
import re
from time import time
from module.common.json_layer import json_loads
from module.plugins.internal.CaptchaService import ReCaptcha
from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
class FilepostCom(SimpleHoster):
__name__ = "FilepostCom"
__type__ = "hoster"
__version__ = "0.33"
__pattern__ = r'https?://(?:www\.)?(?:filepost\.com/files|fp\.io)/(?P<ID>[^/]+)'
__description__ = """Filepost.com hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
INFO_PATTERN = r'<input type="text" id="url" value=\'<a href[^>]*>(?P<N>[^>]+?) - (?P<S>[\d.,]+) (?P<U>[\w^_]+)</a>\' class="inp_text"/>'
OFFLINE_PATTERN = r'class="error_msg_title"> Invalid or Deleted File. </div>|<div class="file_info file_info_deleted">'
PREMIUM_ONLY_PATTERN = r'members only. Please upgrade to premium|a premium membership is required to download this file'
RECAPTCHA_PATTERN = r'Captcha.init\({\s*key:\s*\'(.+?)\''
FLP_TOKEN_PATTERN = r'set_store_options\({token: \'(.+?)\''
def handleFree(self, pyfile):
m = re.search(self.FLP_TOKEN_PATTERN, self.html)
if m is None:
self.error(_("Token"))
flp_token = m.group(1)
m = re.search(self.RECAPTCHA_PATTERN, self.html)
if m is None:
self.error(_("Captcha key"))
captcha_key = m.group(1)
# Get wait time
get_dict = {'SID': self.req.cj.getCookie('SID'), 'JsHttpRequest': str(int(time() * 10000)) + '-xml'}
post_dict = {'action': 'set_download', 'token': flp_token, 'code': self.info['pattern']['ID']}
wait_time = int(self.getJsonResponse(get_dict, post_dict, 'wait_time'))
if wait_time > 0:
self.wait(wait_time)
post_dict = {"token": flp_token, "code": self.info['pattern']['ID'], "file_pass": ''}
if 'var is_pass_exists = true;' in self.html:
# Solve password
password = self.getPassword()
if password:
self.logInfo(_("Password protected link, trying ") + file_pass)
get_dict['JsHttpRequest'] = str(int(time() * 10000)) + '-xml'
post_dict['file_pass'] = file_pass
self.link = self.getJsonResponse(get_dict, post_dict, 'link')
if not self.link:
self.fail(_("Incorrect password"))
else:
self.fail(_("No password found"))
else:
# Solve recaptcha
recaptcha = ReCaptcha(self)
for i in xrange(5):
get_dict['JsHttpRequest'] = str(int(time() * 10000)) + '-xml'
if i:
post_dict['recaptcha_response_field'], post_dict['recaptcha_challenge_field'] = recaptcha.challenge(
captcha_key)
self.logDebug(u"RECAPTCHA: %s : %s : %s" % (
captcha_key, post_dict['recaptcha_challenge_field'], post_dict['recaptcha_response_field']))
download_url = self.getJsonResponse(get_dict, post_dict, 'link')
if download_url:
if i:
self.correctCaptcha()
break
elif i:
self.invalidCaptcha()
else:
self.fail(_("Invalid captcha"))
# Download
self.download(download_url)
def getJsonResponse(self, get_dict, post_dict, field):
res = json_loads(self.load('https://filepost.com/files/get/', get=get_dict, post=post_dict))
self.logDebug(res)
if not 'js' in res:
self.error(_("JSON %s 1") % field)
# i changed js_answer to res['js'] since js_answer is nowhere set.
# i don't know the JSON-HTTP specs in detail, but the previous author
# accessed res['js']['error'] as well as js_answer['error'].
# see the two lines commented out with "# ~?".
if 'error' in res['js']:
if res['js']['error'] == 'download_delay':
self.retry(wait_time=res['js']['params']['next_download'])
# ~? self.retry(wait_time=js_answer['params']['next_download'])
elif 'Wrong file password' in res['js']['error'] \
or 'You entered a wrong CAPTCHA code' in res['js']['error'] \
or 'CAPTCHA Code nicht korrekt' in res['js']['error']:
return None
elif 'CAPTCHA' in res['js']['error']:
self.logDebug("Error response is unknown, but mentions CAPTCHA")
return None
else:
self.fail(res['js']['error'])
if not 'answer' in res['js'] or not field in res['js']['answer']:
self.error(_("JSON %s 2") % field)
return res['js']['answer'][field]
getInfo = create_getInfo(FilepostCom)
|
gpl-3.0
|
guschmue/tensorflow
|
tensorflow/contrib/slim/nets.py
|
191
|
1609
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TF-Slim Nets.
## Standard Networks.
@@alexnet_v2
@@inception_v1
@@inception_v1_base
@@inception_v2
@@inception_v2_base
@@inception_v3
@@inception_v3_base
@@overfeat
@@vgg_a
@@vgg_16
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,
# Collapse nets into a single namespace.
from tensorflow.contrib.slim.python.slim.nets import alexnet
from tensorflow.contrib.slim.python.slim.nets import inception
from tensorflow.contrib.slim.python.slim.nets import overfeat
from tensorflow.contrib.slim.python.slim.nets import resnet_utils
from tensorflow.contrib.slim.python.slim.nets import resnet_v1
from tensorflow.contrib.slim.python.slim.nets import resnet_v2
from tensorflow.contrib.slim.python.slim.nets import vgg
from tensorflow.python.util.all_util import make_all
# pylint: enable=unused-import
__all__ = make_all(__name__)
|
apache-2.0
|
mpvoss/RickAndMortyWeatherTweets
|
env/lib/python3.5/site-packages/pyowm/webapi25/stationparser.py
|
1
|
2291
|
"""
Module containing a concrete implementation for JSONParser abstract class,
returning a Station instance
"""
import json
import time
from pyowm.webapi25 import station
from pyowm.webapi25 import weather
from pyowm.abstractions import jsonparser
from pyowm.exceptions import parse_response_error, api_response_error
class StationParser(jsonparser.JSONParser):
"""
Concrete *JSONParser* implementation building a *Station* instance
out of raw JSON data coming from OWM web API responses.
"""
def parse_JSON(self, JSON_string):
"""
Parses a *Station* instance out of raw JSON data. Only certain
properties of the data are used: if these properties are not found or
cannot be parsed, an error is issued.
:param JSON_string: a raw JSON string
:type JSON_string: str
:returns: a *Station* instance or ``None`` if no data is available
:raises: *ParseResponseError* if it is impossible to find or parse the
data needed to build the result, *APIResponseError* if the JSON
string embeds an HTTP status error (this is an OWM web API 2.5 bug)
"""
d = json.loads(JSON_string)
try:
name = d['station']['name']
station_ID = d['station']['id']
station_type = d['station']['type']
status = d['station']['status']
lat = d['station']['coord']['lat']
if 'lon' in d['station']['coord']:
lon = d['station']['coord']['lon']
elif 'lng' in d['station']['coord']:
lon = d['station']['coord']['lng']
else:
lon = None
if 'distance' in d:
distance = d['distance']
else:
distance = None
except KeyError as e:
error_msg = ''.join((__name__, ': unable to read JSON data', ))
raise parse_response_error.ParseResponseError(error_msg)
else:
if 'last' in d:
last_weather = weather.weather_from_dictionary(d['last'])
else:
last_weather = None
return station.Station(name, station_ID, station_type, status, lat, lon,
distance, last_weather)
|
mit
|
Leoniela/nipype
|
nipype/interfaces/fsl/tests/test_auto_PowerSpectrum.py
|
9
|
1107
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.fsl.utils import PowerSpectrum
def test_PowerSpectrum_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(argstr='%s',
mandatory=True,
position=0,
),
out_file=dict(argstr='%s',
genfile=True,
hash_files=False,
position=1,
),
output_type=dict(),
terminal_output=dict(nohash=True,
),
)
inputs = PowerSpectrum.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_PowerSpectrum_outputs():
output_map = dict(out_file=dict(),
)
outputs = PowerSpectrum.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
|
bsd-3-clause
|
sda2b/youtube-dl
|
youtube_dl/extractor/hotnewhiphop.py
|
129
|
2306
|
from __future__ import unicode_literals
import base64
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
compat_urllib_request,
)
from ..utils import (
ExtractorError,
HEADRequest,
)
class HotNewHipHopIE(InfoExtractor):
_VALID_URL = r'http://www\.hotnewhiphop\.com/.*\.(?P<id>.*)\.html'
_TEST = {
'url': 'http://www.hotnewhiphop.com/freddie-gibbs-lay-it-down-song.1435540.html',
'md5': '2c2cd2f76ef11a9b3b581e8b232f3d96',
'info_dict': {
'id': '1435540',
'ext': 'mp3',
'title': 'Freddie Gibbs - Lay It Down'
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_url_base64 = self._search_regex(
r'data-path="(.*?)"', webpage, 'video URL', default=None)
if video_url_base64 is None:
video_url = self._search_regex(
r'"contentUrl" content="(.*?)"', webpage, 'content URL')
return self.url_result(video_url, ie='Youtube')
reqdata = compat_urllib_parse.urlencode([
('mediaType', 's'),
('mediaId', video_id),
])
r = compat_urllib_request.Request(
'http://www.hotnewhiphop.com/ajax/media/getActions/', data=reqdata)
r.add_header('Content-Type', 'application/x-www-form-urlencoded')
mkd = self._download_json(
r, video_id, note='Requesting media key',
errnote='Could not download media key')
if 'mediaKey' not in mkd:
raise ExtractorError('Did not get a media key')
redirect_url = base64.b64decode(video_url_base64).decode('utf-8')
redirect_req = HEADRequest(redirect_url)
req = self._request_webpage(
redirect_req, video_id,
note='Resolving final URL', errnote='Could not resolve final URL')
video_url = req.geturl()
if video_url.endswith('.html'):
raise ExtractorError('Redirect failed')
video_title = self._og_search_title(webpage).strip()
return {
'id': video_id,
'url': video_url,
'title': video_title,
'thumbnail': self._og_search_thumbnail(webpage),
}
|
unlicense
|
RealTimeWeb/wikisite
|
MoinMoin/formatter/text_html.py
|
1
|
51473
|
# -*- coding: iso-8859-1 -*-
"""
MoinMoin - "text/html+css" Formatter
@copyright: 2000-2004 by Juergen Hermann <jh@web.de>
@license: GNU GPL, see COPYING for details.
"""
import os.path, re
from MoinMoin import log
logging = log.getLogger(__name__)
from MoinMoin.formatter import FormatterBase
from MoinMoin import wikiutil, i18n
from MoinMoin.Page import Page
from MoinMoin.action import AttachFile
from MoinMoin.support.python_compatibility import set
# insert IDs into output wherever they occur
# warning: breaks toggle line numbers javascript
_id_debug = False
line_anchors = True
prettyprint = False
# These are the HTML elements that we treat as block elements.
_blocks = set(['dd', 'div', 'dl', 'dt', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'hr', 'li', 'ol', 'p', 'pre', 'table', 'tbody', 'td', 'tfoot', 'th',
'thead', 'tr', 'ul', 'blockquote', ])
# These are the HTML elements which are typically only used with
# an opening tag without a separate closing tag. We do not
# include 'script' or 'style' because sometimes they do have
# content, and also IE has a parsing bug with those two elements (only)
# when they don't have a closing tag even if valid XHTML.
_self_closing_tags = set(['area', 'base', 'br', 'col', 'frame', 'hr', 'img',
'input', 'isindex', 'link', 'meta', 'param'])
# We only open those tags and let the browser auto-close them:
_auto_closing_tags = set(['p'])
# These are the elements which generally should cause an increase in the
# indention level in the html souce code.
_indenting_tags = set(['ol', 'ul', 'dl', 'li', 'dt', 'dd', 'tr', 'td'])
# These are the elements that discard any whitespace they contain as
# immediate child nodes.
_space_eating_tags = set(['colgroup', 'dl', 'frameset', 'head', 'map' 'menu',
'ol', 'optgroup', 'select', 'table', 'tbody', 'tfoot',
'thead', 'tr', 'ul'])
# These are standard HTML attributes which are typically used without any
# value; e.g., as boolean flags indicated by their presence.
_html_attribute_boolflags = set(['compact', 'disabled', 'ismap', 'nohref',
'noresize', 'noshade', 'nowrap', 'readonly',
'selected', 'wrap'])
# These are all the standard HTML attributes that are allowed on any element.
_common_attributes = set(['accesskey', 'class', 'dir', 'disabled', 'id', 'lang',
'style', 'tabindex', 'title'])
def rewrite_attribute_name(name, default_namespace='html'):
"""
Takes an attribute name and tries to make it HTML correct.
This function takes an attribute name as a string, as it may be
passed in to a formatting method using a keyword-argument syntax,
and tries to convert it into a real attribute name. This is
necessary because some attributes may conflict with Python
reserved words or variable syntax (such as 'for', 'class', or
'z-index'); and also to help with backwards compatibility with
older versions of MoinMoin where different names may have been
used (such as 'content_id' or 'css').
Returns a tuple of strings: (namespace, attribute).
Namespaces: The default namespace is always assumed to be 'html',
unless the input string contains a colon or a double-underscore;
in which case the first such occurance is assumed to separate the
namespace prefix from name. So, for example, to get the HTML
attribute 'for' (as on a <label> element), you can pass in the
string 'html__for' or even '__for'.
Hyphens: To better support hyphens (which are not allowed in Python
variable names), all occurances of two underscores will be replaced
with a hyphen. If you use this, then you must also provide a
namespace since the first occurance of '__' separates a namespace
from the name.
Special cases: Within the 'html' namespace, mainly to preserve
backwards compatibility, these exceptions ars recognized:
'content_type', 'content_id', 'css_class', and 'css'.
Additionally all html attributes starting with 'on' are forced to
lower-case. Also the string 'xmlns' is recognized as having
no namespace.
Examples:
'id' -> ('html', 'id')
'css_class' -> ('html', 'class')
'content_id' -> ('html', 'id')
'content_type' -> ('html', 'type')
'html__for' -> ('html', 'for)
'xml__space' -> ('xml', 'space')
'__z__index' -> ('html', 'z-index')
'__http__equiv' -> ('html', 'http-equiv')
'onChange' -> ('html', 'onchange')
'xmlns' -> ('', 'xmlns')
'xmlns__abc' -> ('xmlns', 'abc')
(In actuality we only deal with namespace prefixes, not any real
namespace URI...we only care about the syntax not the meanings.)
"""
# Handle any namespaces (just in case someday we support XHTML)
if ':' in name:
ns, name = name.split(':', 1)
elif '__' in name:
ns, name = name.split('__', 1)
elif name == 'xmlns':
ns = ''
else:
ns = default_namespace
name.replace('__', '-')
if ns == 'html':
# We have an HTML attribute, fix according to DTD
if name == 'content_type': # MIME type such as in <a> and <link> elements
name = 'type'
elif name == 'content_id': # moin historical convention
name = 'id'
elif name in ('css_class', 'css'): # to avoid python word 'class'
name = 'class'
elif name.startswith('on'): # event handler hook
name = name.lower()
return ns, name
def extend_attribute_dictionary(attributedict, ns, name, value):
"""Add a new attribute to an attribute dictionary, merging values where possible.
The attributedict must be a dictionary with tuple-keys of the form:
(namespace, attrname).
The given ns, name, and value will be added to the dictionary. It
will replace the old value if it existed, except for a few special
cases where the values are logically merged instead (CSS class
names and style rules).
As a special case, if value is None (not just ''), then the
attribute is actually deleted from the dictionary.
"""
key = ns, name
if value is None:
if key in attributedict:
del attributedict[key]
else:
if ns == 'html' and key in attributedict:
if name == 'class':
# CSS classes are appended by space-separated list
value = attributedict[key] + ' ' + value
elif name == 'style':
# CSS styles are appended by semicolon-separated rules list
value = attributedict[key] + '; ' + value
elif name in _html_attribute_boolflags:
# All attributes must have a value. According to XHTML those
# traditionally used as flags should have their value set to
# the same as the attribute name.
value = name
attributedict[key] = value
class Formatter(FormatterBase):
"""
Send HTML data.
"""
hardspace = ' '
indentspace = ' '
def __init__(self, request, **kw):
FormatterBase.__init__(self, request, **kw)
self._indent_level = 0
self._in_code = 0 # used by text_gedit
self._in_code_area = 0
self._in_code_line = 0
self._code_area_js = 0
self._code_area_state = ['', 0, -1, -1, 0]
# code format string. id - code block id, num - line number.
# Caution: upon changing, also check line numbers hide/show js.
self._code_id_format = "%(id)s_%(num)d"
self._show_section_numbers = None
self.pagelink_preclosed = False
self._is_included = kw.get('is_included', False)
self.request = request
self.cfg = request.cfg
self.no_magic = kw.get('no_magic', False) # disabled tag auto closing
if not hasattr(request, '_fmt_hd_counters'):
request._fmt_hd_counters = []
# Primitive formatter functions #####################################
# all other methods should use these to format tags. This keeps the
# code clean and handle pathological cases like unclosed p and
# inline tags.
def _langAttr(self, lang=None):
""" Return lang and dir attribute
(INTERNAL USE BY HTML FORMATTER ONLY!)
Must be used on all block elements - div, p, table, etc.
@param lang: if defined, will return attributes for lang. if not
defined, will return attributes only if the current lang is
different from the content lang.
@rtype: dict
@return: language attributes
"""
if not lang:
lang = self.request.current_lang
# Actions that generate content in user language should change
# the content lang from the default defined in cfg.
if lang == self.request.content_lang:
# lang is inherited from content div
return {}
#attr = {'xml:lang': lang, 'lang': lang, 'dir': i18n.getDirection(lang),}
attr = {'lang': lang, 'dir': i18n.getDirection(lang), }
return attr
def _formatAttributes(self, attr=None, allowed_attrs=None, **kw):
""" Return HTML attributes formatted as a single string. (INTERNAL USE BY HTML FORMATTER ONLY!)
@param attr: dict containing keys and values
@param allowed_attrs: A list of allowable attribute names
@param kw: other arbitrary attributes expressed as keyword arguments.
@rtype: string
@return: formated attributes or empty string
The attributes and their values can either be given in the
'attr' dictionary, or as extra keyword arguments. They are
both merged together. See the function
rewrite_attribute_name() for special notes on how to name
attributes.
Setting a value to None rather than a string (or string
coercible) will remove that attribute from the list.
If the list of allowed_attrs is provided, then an error is
raised if an HTML attribute is encountered that is not in that
list (or is not a common attribute which is always allowed or
is not in another XML namespace using the double-underscore
syntax).
"""
# Merge the attr dict and kw dict into a single attributes
# dictionary (rewriting any attribute names, extracting
# namespaces, and merging some values like css classes).
attributes = {} # dict of key=(namespace,name): value=attribute_value
if attr:
for a, v in attr.items():
a_ns, a_name = rewrite_attribute_name(a)
extend_attribute_dictionary(attributes, a_ns, a_name, v)
if kw:
for a, v in kw.items():
a_ns, a_name = rewrite_attribute_name(a)
extend_attribute_dictionary(attributes, a_ns, a_name, v)
# Add title attribute if missing, but it has an alt.
if ('html', 'alt') in attributes and ('html', 'title') not in attributes:
attributes[('html', 'title')] = attributes[('html', 'alt')]
# Force both lang and xml:lang to be present and identical if
# either exists. The lang takes precedence over xml:lang if
# both exist.
#if ('html', 'lang') in attributes:
# attributes[('xml', 'lang')] = attributes[('html', 'lang')]
#elif ('xml', 'lang') in attributes:
# attributes[('html', 'lang')] = attributes[('xml', 'lang')]
# Check all the HTML attributes to see if they are known and
# allowed. Ignore attributes if in non-HTML namespaces.
if allowed_attrs:
for name in [key[1] for key in attributes if key[0] == 'html']:
if name in _common_attributes or name in allowed_attrs:
pass
elif name.startswith('on'):
pass # Too many event handlers to enumerate, just let them all pass.
else:
# Unknown or unallowed attribute.
err = 'Illegal HTML attribute "%s" passed to formatter' % name
raise ValueError(err)
# Finally, format them all as a single string.
if attributes:
# Construct a formatted string containing all attributes
# with their values escaped. Any html:* namespace
# attributes drop the namespace prefix. We build this by
# separating the attributes into three categories:
#
# * Those without any namespace (should only be xmlns attributes)
# * Those in the HTML namespace (we drop the html: prefix for these)
# * Those in any other non-HTML namespace, including xml:
xmlnslist = ['%s="%s"' % (k[1], wikiutil.escape(v, 1))
for k, v in attributes.items() if not k[0]]
htmllist = ['%s="%s"' % (k[1], wikiutil.escape(v, 1))
for k, v in attributes.items() if k[0] == 'html']
otherlist = ['%s:%s="%s"' % (k[0], k[1], wikiutil.escape(v, 1))
for k, v in attributes.items() if k[0] and k[0] != 'html']
# Join all these lists together in a space-separated string. Also
# prefix the whole thing with a space too.
htmllist.sort()
otherlist.sort()
all = [''] + xmlnslist + htmllist + otherlist
return ' '.join(all)
return ''
def _open(self, tag, newline=False, attr=None, allowed_attrs=None,
is_unique=False, **kw):
""" Open a tag with optional attributes (INTERNAL USE BY HTML FORMATTER ONLY!)
@param tag: html tag, string
@param newline: render tag so following data is on a separate line
@param attr: dict with tag attributes
@param allowed_attrs: list of allowed attributes for this element
@param is_unique: ID is already unique
@param kw: arbitrary attributes and values
@rtype: string ?
@return: open tag with attributes as a string
"""
# If it is self-closing, then don't expect a closing tag later on.
is_self_closing = (tag in _self_closing_tags) and ' /' or ''
# make ID unique
id = None
if not is_unique:
if attr and 'id' in attr:
id = self.make_id_unique(attr['id'])
id = self.qualify_id(id)
attr['id'] = id
if 'id' in kw:
id = self.make_id_unique(kw['id'])
id = self.qualify_id(id)
kw['id'] = id
else:
if attr and 'id' in attr:
id = attr['id']
if 'id' in kw:
id = kw['id']
if tag in _blocks:
# Block elements
result = []
# Add language attributes, but let caller overide the default
attributes = self._langAttr()
if attr:
attributes.update(attr)
# Format
attributes = self._formatAttributes(attributes, allowed_attrs=allowed_attrs, **kw)
result.append('<%s%s%s>' % (tag, attributes, is_self_closing))
if newline:
result.append(self._newline())
if _id_debug and id:
result.append('(%s) ' % id)
tagstr = ''.join(result)
else:
# Inline elements
tagstr = '<%s%s%s>' % (tag,
self._formatAttributes(attr, allowed_attrs, **kw),
is_self_closing)
return tagstr
def _close(self, tag, newline=False):
""" Close tag (INTERNAL USE BY HTML FORMATTER ONLY!)
@param tag: html tag, string
@param newline: render tag so following data is on a separate line
@rtype: string
@return: closing tag as a string
"""
if tag in _self_closing_tags or (tag in _auto_closing_tags and not self.no_magic):
# This tag was already closed
tagstr = ''
elif tag in _blocks:
# Block elements
result = []
if newline:
result.append(self._newline())
result.append('</%s>' % (tag))
tagstr = ''.join(result)
else:
# Inline elements
tagstr = '</%s>' % tag
if newline:
tagstr += self._newline()
return tagstr
# Public methods ###################################################
def startContent(self, content_id='content', newline=True, **kw):
""" Start page content div.
A link anchor is provided at the beginning of the div, with
an id of 'top' (modified by the request ID cache).
"""
if hasattr(self, 'page'):
self.request.uid_generator.begin(self.page.page_name)
result = []
# Use the content language
attr = self._langAttr(self.request.content_lang)
attr['id'] = content_id
result.append(self._open('div', newline=False, attr=attr,
allowed_attrs=['align'], **kw))
result.append(self.anchordef('top'))
if newline:
result.append('\n')
return ''.join(result)
def endContent(self, newline=True):
""" Close page content div.
A link anchor is provided at the end of the div, with
an id of 'bottom' (modified by the request ID cache).
"""
result = []
result.append(self.anchordef('bottom'))
result.append(self._close('div', newline=newline))
if hasattr(self, 'page'):
self.request.uid_generator.end()
return ''.join(result)
def lang(self, on, lang_name):
""" Insert text with specific lang and direction.
Enclose within span tag if lang_name is different from
the current lang
"""
tag = 'span'
if lang_name != self.request.current_lang:
# Enclose text in span using lang attributes
if on:
attr = self._langAttr(lang=lang_name)
return self._open(tag, attr=attr)
return self._close(tag)
# Direction did not change, no need for span
return ''
# Links ##############################################################
def pagelink(self, on, pagename='', page=None, **kw):
""" Link to a page.
formatter.text_python will use an optimized call with a page!=None
parameter. DO NOT USE THIS YOURSELF OR IT WILL BREAK.
See wikiutil.link_tag() for possible keyword parameters.
"""
FormatterBase.pagelink(self, on, pagename, page, **kw)
if 'generated' in kw:
del kw['generated']
if page is None:
page = Page(self.request, pagename, formatter=self)
if self.request.user.show_nonexist_qm and on and not page.exists():
self.pagelink_preclosed = True
return (page.link_to(self.request, on=1, **kw) +
self.text("?") +
page.link_to(self.request, on=0, **kw))
elif not on and self.pagelink_preclosed:
self.pagelink_preclosed = False
return ""
else:
return page.link_to(self.request, on=on, **kw)
def interwikilink(self, on, interwiki='', pagename='', **kw):
"""
@keyword title: override using the interwiki wikiname as title
"""
querystr = kw.get('querystr', {})
wikitag, wikiurl, wikitail, wikitag_bad = wikiutil.resolve_interwiki(self.request, interwiki, pagename)
wikiurl = wikiutil.mapURL(self.request, wikiurl)
if wikitag == 'Self': # for own wiki, do simple links
wikitail = wikiutil.url_unquote(wikitail)
try: # XXX this is the only place where we access self.page - do we need it? Crashes silently on actions!
pagename = wikiutil.AbsPageName(self.page.page_name, wikitail)
except:
pagename = wikitail
return self.pagelink(on, pagename, **kw)
else: # return InterWiki hyperlink
if on:
href = wikiutil.join_wiki(wikiurl, wikitail)
if querystr:
separator = ('?', '&')['?' in href]
href = '%s%s%s' % (href, separator, wikiutil.makeQueryString(querystr))
anchor = kw.get('anchor')
if anchor:
href = '%s#%s' % (href, self.sanitize_to_id(anchor))
if wikitag_bad:
html_class = 'badinterwiki'
else:
html_class = 'interwiki'
title = kw.get('title', wikitag)
return self.url(1, href, title=title, css=html_class) # interwiki links with umlauts
else:
return self.url(0)
def url(self, on, url=None, css=None, do_escape=None, **kw):
"""
Inserts an <a> element (you can give any A tag attributes as kw args).
@param on: 1 to start the link, 0 to end the link (no other arguments are needed when on==0).
@param url: the URL to link to; will go through Wiki URL mapping.
@param css: a space-separated list of CSS classes
@param do_escape: DEPRECATED and not used any more, please remove it from your code!
We will remove this parameter in moin 1.8 (it used to filter url
param through wikiutil.escape, but text_html formatter's _open
will do it again, so this just leads to double escaping now).
"""
if do_escape is not None:
if do_escape:
logging.warning("Deprecation warning: MoinMoin.formatter.text_html.url being called with do_escape=1/True parameter, please review caller.")
else:
logging.warning("Deprecation warning: MoinMoin.formatter.text_html.url being called with do_escape=0/False parameter, please remove it from the caller.")
if on:
attrs = self._langAttr()
# Handle the URL mapping
if url is None and 'href' in kw:
url = kw['href']
del kw['href']
if url is not None:
url = wikiutil.mapURL(self.request, url)
attrs['href'] = url
if css:
attrs['class'] = css
markup = self._open('a', attr=attrs, **kw)
else:
markup = self._close('a')
return markup
def anchordef(self, id):
"""Inserts an invisible element used as a link target.
Inserts an empty <span> element with an id attribute, used as an anchor
for link references. We use <span></span> rather than <span/>
for browser portability.
"""
# Don't add newlines, \n, as it will break pre and
# line-numbered code sections (from line_achordef() method).
#return '<a id="%s"></a>' % (id, ) # do not use - this breaks PRE sections for IE
id = self.make_id_unique(id)
id = self.qualify_id(id)
return '<span class="anchor" id="%s"></span>' % id
def line_anchordef(self, lineno):
if line_anchors:
return self.anchordef("line-%d" % lineno)
else:
return ''
def anchorlink(self, on, name='', **kw):
"""Insert an <a> link pointing to an anchor on the same page.
Call once with on=1 to start the link, and a second time with
on=0 to end it. No other arguments are needed on the second
call.
The name argument should be the same as the id provided to the
anchordef() method, or some other elment. It should NOT start
with '#' as that will be added automatically.
The id argument, if provided, is instead the id of this link
itself and not of the target element the link references.
"""
attrs = self._langAttr()
if name:
name = self.sanitize_to_id(name)
attrs['href'] = '#' + self.qualify_id(name)
if 'href' in kw:
del kw['href']
if on:
str = self._open('a', attr=attrs, **kw)
else:
str = self._close('a')
return str
def line_anchorlink(self, on, lineno=0):
if line_anchors:
return self.anchorlink(on, name="line-%d" % lineno)
else:
return ''
# Attachments ######################################################
def attachment_link(self, on, url=None, querystr=None, **kw):
""" Link to an attachment.
@param on: 1/True=start link, 0/False=end link
@param url: filename.ext or PageName/filename.ext
"""
assert on in (0, 1, False, True) # make sure we get called the new way, not like the 1.5 api was
_ = self.request.getText
if querystr is None:
querystr = {}
assert isinstance(querystr, dict) # new in 1.6, only support dicts
if 'do' not in querystr:
querystr['do'] = 'view'
if on:
pagename, filename = AttachFile.absoluteName(url, self.page.page_name)
#logging.debug("attachment_link: url %s pagename %s filename %s" % (url, pagename, filename))
fname = wikiutil.taintfilename(filename)
if AttachFile.exists(self.request, pagename, fname):
target = AttachFile.getAttachUrl(pagename, fname, self.request, do=querystr['do'])
if not 'title' in kw:
kw['title'] = "attachment:%s" % url
kw['css'] = 'attachment'
else:
target = AttachFile.getAttachUrl(pagename, fname, self.request, do='upload_form')
kw['title'] = _('Upload new attachment "%(filename)s"') % {'filename': fname}
kw['css'] = 'attachment nonexistent'
return self.url(on, target, **kw)
else:
return self.url(on)
def attachment_image(self, url, **kw):
_ = self.request.getText
pagename, filename = AttachFile.absoluteName(url, self.page.page_name)
fname = wikiutil.taintfilename(filename)
exists = AttachFile.exists(self.request, pagename, fname)
if exists:
kw['css'] = 'attachment'
kw['src'] = AttachFile.getAttachUrl(pagename, fname, self.request, addts=1)
title = _('Inlined image: %(url)s') % {'url': self.text(url)}
if not 'title' in kw:
kw['title'] = title
# alt is required for images:
if not 'alt' in kw:
kw['alt'] = kw['title']
return self.image(**kw)
else:
title = _('Upload new attachment "%(filename)s"') % {'filename': fname}
img = self.icon('attachimg')
css = 'nonexistent'
target = AttachFile.getAttachUrl(pagename, fname, self.request, do='upload_form')
return self.url(1, target, css=css, title=title) + img + self.url(0)
def attachment_drawing(self, url, text, **kw):
# ToDo try to move this to a better place e.g. __init__
try:
drawing_action = AttachFile.get_action(self.request, url, do='modify')
assert drawing_action is not None
attachment_drawing = wikiutil.importPlugin(self.request.cfg, 'action',
drawing_action, 'attachment_drawing')
return attachment_drawing(self, url, text, **kw)
except (wikiutil.PluginMissingError, wikiutil.PluginAttributeError, AssertionError):
return url
# Text ##############################################################
def _text(self, text):
text = wikiutil.escape(text)
if self._in_code:
text = text.replace(' ', self.hardspace)
return text
# Inline ###########################################################
def strong(self, on, **kw):
"""Creates an HTML <strong> element.
Call once with on=1 to start the region, and a second time
with on=0 to end it.
"""
tag = 'strong'
if on:
return self._open(tag, allowed_attrs=[], **kw)
return self._close(tag)
def emphasis(self, on, **kw):
"""Creates an HTML <em> element.
Call once with on=1 to start the region, and a second time
with on=0 to end it.
"""
tag = 'em'
if on:
return self._open(tag, allowed_attrs=[], **kw)
return self._close(tag)
def underline(self, on, **kw):
"""Creates a text span for underlining (css class "u").
Call once with on=1 to start the region, and a second time
with on=0 to end it.
"""
tag = 'span'
if on:
return self._open(tag, attr={'class': 'u'}, allowed_attrs=[], **kw)
return self._close(tag)
def highlight(self, on, **kw):
"""Creates a text span for highlighting (css class "highlight").
Call once with on=1 to start the region, and a second time
with on=0 to end it.
"""
tag = 'strong'
if on:
return self._open(tag, attr={'class': 'highlight'}, allowed_attrs=[], **kw)
return self._close(tag)
def sup(self, on, **kw):
"""Creates a <sup> element for superscript text.
Call once with on=1 to start the region, and a second time
with on=0 to end it.
"""
tag = 'sup'
if on:
return self._open(tag, allowed_attrs=[], **kw)
return self._close(tag)
def sub(self, on, **kw):
"""Creates a <sub> element for subscript text.
Call once with on=1 to start the region, and a second time
with on=0 to end it.
"""
tag = 'sub'
if on:
return self._open(tag, allowed_attrs=[], **kw)
return self._close(tag)
def strike(self, on, **kw):
"""Creates a text span for line-through (strikeout) text (css class 'strike').
Call once with on=1 to start the region, and a second time
with on=0 to end it.
"""
# This does not use <strike> because has been deprecated in standard HTML.
tag = 'span'
if on:
return self._open(tag, attr={'class': 'strike'},
allowed_attrs=[], **kw)
return self._close(tag)
def code(self, on, **kw):
"""Creates a <tt> element for inline code or monospaced text.
Call once with on=1 to start the region, and a second time
with on=0 to end it.
Any text within this section will have spaces converted to
non-break spaces.
"""
tag = 'tt'
self._in_code = on
if on:
return self._open(tag, allowed_attrs=[], **kw)
return self._close(tag)
def small(self, on, **kw):
"""Creates a <small> element for smaller font.
Call once with on=1 to start the region, and a second time
with on=0 to end it.
"""
tag = 'small'
if on:
return self._open(tag, allowed_attrs=[], **kw)
return self._close(tag)
def big(self, on, **kw):
"""Creates a <big> element for larger font.
Call once with on=1 to start the region, and a second time
with on=0 to end it.
"""
tag = 'big'
if on:
return self._open(tag, allowed_attrs=[], **kw)
return self._close(tag)
# Block elements ####################################################
def preformatted(self, on, **kw):
"""Creates a preformatted text region, with a <pre> element.
Call once with on=1 to start the region, and a second time
with on=0 to end it.
"""
FormatterBase.preformatted(self, on)
tag = 'pre'
if on:
return self._open(tag, newline=1, **kw)
return self._close(tag)
# Use by code area
_toggleLineNumbersScript = """
<script type="text/javascript">
function isnumbered(obj) {
return obj.childNodes.length && obj.firstChild.childNodes.length && obj.firstChild.firstChild.className == 'LineNumber';
}
function nformat(num,chrs,add) {
var nlen = Math.max(0,chrs-(''+num).length), res = '';
while (nlen>0) { res += ' '; nlen-- }
return res+num+add;
}
function addnumber(did, nstart, nstep) {
var c = document.getElementById(did), l = c.firstChild, n = 1;
if (!isnumbered(c)) {
if (typeof nstart == 'undefined') nstart = 1;
if (typeof nstep == 'undefined') nstep = 1;
var n = nstart;
while (l != null) {
if (l.tagName == 'SPAN') {
var s = document.createElement('SPAN');
var a = document.createElement('A');
s.className = 'LineNumber';
a.appendChild(document.createTextNode(nformat(n,4,'')));
a.href = '#' + did + '_' + n;
s.appendChild(a);
s.appendChild(document.createTextNode(' '));
n += nstep;
if (l.childNodes.length) {
l.insertBefore(s, l.firstChild);
}
else {
l.appendChild(s);
}
}
l = l.nextSibling;
}
}
return false;
}
function remnumber(did) {
var c = document.getElementById(did), l = c.firstChild;
if (isnumbered(c)) {
while (l != null) {
if (l.tagName == 'SPAN' && l.firstChild.className == 'LineNumber') l.removeChild(l.firstChild);
l = l.nextSibling;
}
}
return false;
}
function togglenumber(did, nstart, nstep) {
var c = document.getElementById(did);
if (isnumbered(c)) {
remnumber(did);
} else {
addnumber(did,nstart,nstep);
}
return false;
}
</script>
"""
def code_area(self, on, code_id, code_type='code', show=0, start=-1, step=-1, msg=None):
"""Creates a formatted code region, with line numbering.
This region is formatted as a <div> with a <pre> inside it. The
code_id argument is assigned to the 'id' of the div element, and
must be unique within the document. The show, start, and step are
used for line numbering.
Note this is not like most formatter methods, it can not take any
extra keyword arguments.
Call once with on=1 to start the region, and a second time
with on=0 to end it.
the msg string is not escaped
"""
_ = self.request.getText
res = []
if on:
code_id = self.sanitize_to_id('CA-%s' % code_id)
ci = self.qualify_id(self.make_id_unique(code_id))
# Open a code area
self._in_code_area = 1
self._in_code_line = 0
# id in here no longer used
self._code_area_state = [None, show, start, step, start, ci]
if msg:
attr = {'class': 'codemsg'}
res.append(self._open('div', attr={'class': 'codemsg'}))
res.append(msg)
res.append(self._close('div'))
# Open the code div - using left to right always!
attr = {'class': 'codearea', 'lang': 'en', 'dir': 'ltr'}
res.append(self._open('div', attr=attr))
# Add the script only in the first code area on the page
if self._code_area_js == 0 and self._code_area_state[1] >= 0:
res.append(self._toggleLineNumbersScript)
self._code_area_js = 1
# Add line number link, but only for JavaScript enabled browsers.
if self._code_area_state[1] >= 0:
toggleLineNumbersLink = r'''
<script type="text/javascript">
document.write('<a href="#" onclick="return togglenumber(\'%s\', %d, %d);" \
class="codenumbers">%s<\/a>');
</script>
''' % (ci, self._code_area_state[2], self._code_area_state[3],
_("Toggle line numbers"))
res.append(toggleLineNumbersLink)
# Open pre - using left to right always!
attr = {'id': ci, 'lang': 'en', 'dir': 'ltr'}
res.append(self._open('pre', newline=True, attr=attr, is_unique=True))
else:
# Close code area
res = []
if self._in_code_line:
res.append(self.code_line(0))
res.append(self._close('pre'))
res.append(self._close('div'))
# Update state
self._in_code_area = 0
return ''.join(res)
def code_line(self, on):
res = ''
if not on or (on and self._in_code_line):
res += '</span>\n'
if on:
res += '<span class="line">'
if self._code_area_state[1] > 0:
res += ('<span class="LineNumber"><a href="#%(fmt)s">%%(num)4d</a> </span><span class="LineAnchor" id="%(fmt)s"></span>' % {'fmt': self._code_id_format, }) % {
'id': self._code_area_state[5],
'num': self._code_area_state[4],
}
self._code_area_state[4] += self._code_area_state[3]
self._in_code_line = on != 0
return res
def code_token(self, on, tok_type):
return ['<span class="%s">' % tok_type, '</span>'][not on]
# Paragraphs, Lines, Rules ###########################################
def _indent_spaces(self):
"""Returns space(s) for indenting the html source so list nesting is easy to read.
Note that this mostly works, but because of caching may not always be accurate."""
if prettyprint:
return self.indentspace * self._indent_level
else:
return ''
def _newline(self):
"""Returns the whitespace for starting a new html source line, properly indented."""
if prettyprint:
return '\n' + self._indent_spaces()
else:
return ''
def linebreak(self, preformatted=1):
"""Creates a line break in the HTML output.
If preformatted is true a <br> element is inserted, otherwise
the linebreak will only be visible in the HTML source.
"""
if self._in_code_area:
preformatted = 1
return ['\n', '<br>\n'][not preformatted] + self._indent_spaces()
def paragraph(self, on, **kw):
"""Creates a paragraph with a <p> element.
Call once with on=1 to start the region, and a second time
with on=0 to end it.
"""
if self._terse:
return ''
FormatterBase.paragraph(self, on)
tag = 'p'
if on:
tagstr = self._open(tag, **kw)
else:
tagstr = self._close(tag)
return tagstr
def rule(self, size=None, **kw):
"""Creates a horizontal rule with an <hr> element.
If size is a number in the range [1..6], the CSS class of the rule
is set to 'hr1' through 'hr6'. The intent is that the larger the
size number the thicker or bolder the rule will be.
"""
if size and 1 <= size <= 6:
# Add hr class: hr1 - hr6
return self._open('hr', newline=1, attr={'class': 'hr%d' % size}, **kw)
return self._open('hr', newline=1, **kw)
# Images / Transclusion ##############################################
def icon(self, type):
return self.request.theme.make_icon(type)
smiley = icon
def image(self, src=None, **kw):
"""Creates an inline image with an <img> element.
The src argument must be the URL to the image file.
"""
if src:
kw['src'] = src
return self._open('img', **kw)
def transclusion(self, on, **kw):
"""Transcludes (includes/embeds) another object."""
if on:
return self._open('object',
allowed_attrs=['archive', 'classid', 'codebase',
'codetype', 'data', 'declare',
'height', 'name', 'standby',
'type', 'width', ],
**kw)
else:
return self._close('object')
def transclusion_param(self, **kw):
"""Give a parameter to a transcluded object."""
return self._open('param',
allowed_attrs=['name', 'type', 'value', 'valuetype', ],
**kw)
# Lists ##############################################################
def number_list(self, on, type=None, start=None, **kw):
"""Creates an HTML ordered list, <ol> element.
The 'type' if specified can be any legal numbered
list-style-type, such as 'decimal','lower-roman', etc.
The 'start' argument if specified gives the numeric value of
the first list item (default is 1).
Call once with on=1 to start the list, and a second time
with on=0 to end it.
"""
tag = 'ol'
if on:
attr = {}
if type is not None:
attr['type'] = type
if start is not None:
attr['start'] = start
tagstr = self._open(tag, newline=1, attr=attr, **kw)
else:
tagstr = self._close(tag, newline=1)
return tagstr
def bullet_list(self, on, **kw):
"""Creates an HTML ordered list, <ul> element.
The 'type' if specified can be any legal unnumbered
list-style-type, such as 'disc','square', etc.
Call once with on=1 to start the list, and a second time
with on=0 to end it.
"""
tag = 'ul'
if on:
tagstr = self._open(tag, newline=1, **kw)
else:
tagstr = self._close(tag, newline=1)
return tagstr
def listitem(self, on, **kw):
"""Adds a list item, <li> element, to a previously opened
bullet or number list.
Call once with on=1 to start the region, and a second time
with on=0 to end it.
"""
tag = 'li'
if on:
tagstr = self._open(tag, newline=1, **kw)
else:
tagstr = self._close(tag, newline=1)
return tagstr
def definition_list(self, on, **kw):
"""Creates an HTML definition list, <dl> element.
Call once with on=1 to start the list, and a second time
with on=0 to end it.
"""
tag = 'dl'
if on:
tagstr = self._open(tag, newline=1, **kw)
else:
tagstr = self._close(tag, newline=1)
return tagstr
def definition_term(self, on, **kw):
"""Adds a new term to a definition list, HTML element <dt>.
Call once with on=1 to start the term, and a second time
with on=0 to end it.
"""
tag = 'dt'
if on:
tagstr = self._open(tag, newline=1, **kw)
else:
tagstr = self._close(tag, newline=0)
return tagstr
def definition_desc(self, on, **kw):
"""Gives the definition to a definition item, HTML element <dd>.
Call once with on=1 to start the definition, and a second time
with on=0 to end it.
"""
tag = 'dd'
if on:
tagstr = self._open(tag, newline=1, **kw)
else:
tagstr = self._close(tag, newline=0)
return tagstr
def heading(self, on, depth, **kw):
# remember depth of first heading, and adapt counting depth accordingly
if not self._base_depth:
self._base_depth = depth
count_depth = max(depth - (self._base_depth - 1), 1)
# check numbering, possibly changing the default
if self._show_section_numbers is None:
self._show_section_numbers = self.cfg.show_section_numbers
numbering = self.request.getPragma('section-numbers', '').lower()
if numbering in ['0', 'off']:
self._show_section_numbers = 0
elif numbering in ['1', 'on']:
self._show_section_numbers = 1
elif numbering in ['2', '3', '4', '5', '6']:
# explicit base level for section number display
self._show_section_numbers = int(numbering)
heading_depth = depth
# closing tag, with empty line after, to make source more readable
if not on:
return self._close('h%d' % heading_depth) + '\n'
# create section number
number = ''
if self._show_section_numbers:
# count headings on all levels
self.request._fmt_hd_counters = self.request._fmt_hd_counters[:count_depth]
while len(self.request._fmt_hd_counters) < count_depth:
self.request._fmt_hd_counters.append(0)
self.request._fmt_hd_counters[-1] = self.request._fmt_hd_counters[-1] + 1
number = '.'.join([str(x) for x in self.request._fmt_hd_counters[self._show_section_numbers-1:]])
if number: number += ". "
# Add space before heading, easier to check source code
result = '\n' + self._open('h%d' % heading_depth, **kw)
if self.request.user.show_topbottom:
result += "%s%s%s%s%s%s" % (
self.anchorlink(1, "bottom"), self.icon('bottom'), self.anchorlink(0),
self.anchorlink(1, "top"), self.icon('top'), self.anchorlink(0))
return "%s%s" % (result, number)
# Tables #############################################################
_allowed_table_attrs = {
'table': ['class', 'id', 'style'],
'row': ['class', 'id', 'style'],
'': ['colspan', 'rowspan', 'class', 'id', 'style', 'abbr'],
}
def _checkTableAttr(self, attrs, prefix):
""" Check table attributes
Convert from wikitable attributes to html 4 attributes.
@param attrs: attribute dict
@param prefix: used in wiki table attributes
@rtype: dict
@return: valid table attributes
"""
if not attrs:
return {}
result = {}
s = [] # we collect synthesized style in s
for key, val in attrs.items():
# Ignore keys that don't start with prefix
if prefix and key[:len(prefix)] != prefix:
continue
key = key[len(prefix):]
val = val.strip('"')
# remove invalid attrs from dict and synthesize style
if key == 'width':
s.append("width: %s" % val)
elif key == 'height':
s.append("height: %s" % val)
elif key == 'bgcolor':
s.append("background-color: %s" % val)
elif key == 'align':
s.append("text-align: %s" % val)
elif key == 'valign':
s.append("vertical-align: %s" % val)
# Ignore unknown keys
if key not in self._allowed_table_attrs[prefix]:
continue
result[key] = val
st = result.get('style', '').split(';')
st = '; '.join(st + s)
st = st.strip(';')
st = st.strip()
if not st:
try:
del result['style'] # avoid empty style attr
except:
pass
else:
result['style'] = st
#logging.debug("_checkTableAttr returns %r" % result)
return result
def table(self, on, attrs=None, **kw):
""" Create table
@param on: start table
@param attrs: table attributes
@rtype: string
@return start or end tag of a table
"""
result = []
if on:
# Open div to get correct alignment with table width smaller
# than 100%
result.append(self._open('div', newline=1))
# Open table
if not attrs:
attrs = {}
else:
attrs = self._checkTableAttr(attrs, 'table')
result.append(self._open('table', newline=1, attr=attrs,
allowed_attrs=self._allowed_table_attrs['table'],
**kw))
result.append(self._open('tbody', newline=1))
else:
# Close tbody, table, and then div
result.append(self._close('tbody'))
result.append(self._close('table'))
result.append(self._close('div'))
return ''.join(result)
def table_row(self, on, attrs=None, **kw):
tag = 'tr'
if on:
if not attrs:
attrs = {}
else:
attrs = self._checkTableAttr(attrs, 'row')
return self._open(tag, newline=1, attr=attrs,
allowed_attrs=self._allowed_table_attrs['row'],
**kw)
return self._close(tag) + '\n'
def table_cell(self, on, attrs=None, **kw):
tag = 'td'
if on:
if not attrs:
attrs = {}
else:
attrs = self._checkTableAttr(attrs, '')
return ' ' + self._open(tag, attr=attrs,
allowed_attrs=self._allowed_table_attrs[''],
**kw)
return self._close(tag) + '\n'
def text(self, text, **kw):
txt = FormatterBase.text(self, text, **kw)
if kw:
return self._open('span', **kw) + txt + self._close('span')
return txt
def escapedText(self, text, **kw):
txt = wikiutil.escape(text)
if kw:
return self._open('span', **kw) + txt + self._close('span')
return txt
def rawHTML(self, markup):
return markup
def sysmsg(self, on, **kw):
tag = 'div'
if on:
return self._open(tag, attr={'class': 'message'}, **kw)
return self._close(tag)
def div(self, on, **kw):
css_class = kw.get('css_class')
# the display of comment class divs depends on a user setting:
if css_class and 'comment' in css_class.split():
style = kw.get('style')
display = self.request.user.show_comments and "display:''" or "display:none"
if not style:
style = display
else:
style += "; %s" % display
kw['style'] = style
tag = 'div'
if on:
return self._open(tag, **kw)
return self._close(tag)
def span(self, on, **kw):
css_class = kw.get('css_class')
# the display of comment class spans depends on a user setting:
if css_class and 'comment' in css_class.split():
style = kw.get('style')
display = self.request.user.show_comments and "display:''" or "display:none"
if not style:
style = display
else:
style += "; %s" % display
kw['style'] = style
tag = 'span'
if on:
return self._open(tag, **kw)
return self._close(tag)
def sanitize_to_id(self, text):
return wikiutil.anchor_name_from_text(text)
|
apache-2.0
|
petewarden/tensorflow
|
tensorflow/python/debug/lib/check_numerics_callback_test.py
|
9
|
17920
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.debug.lib import check_numerics_callback
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_grad # pylint: disable=unused-import
from tensorflow.python.ops import custom_gradient
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import math_grad # pylint: disable=unused-import
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
class LimitStringLengthTest(test_util.TensorFlowTestCase):
def testLimitStringLengthWithExplicitLimit(self):
self.assertEqual(
check_numerics_callback.limit_string_length("", max_len=2), "")
self.assertEqual(
check_numerics_callback.limit_string_length("e", max_len=2), "e")
self.assertEqual(
check_numerics_callback.limit_string_length("de", max_len=2), "de")
self.assertEqual(
check_numerics_callback.limit_string_length("abcde", max_len=2),
"...de")
def testLimitStringLengthWithNoLimit(self):
self.assertEqual(check_numerics_callback.limit_string_length(
"A" * 100 + "B", max_len=None), "A" * 100 + "B")
self.assertEqual(
check_numerics_callback.limit_string_length("", max_len=None), "")
def testLimitStringLengthWithDefaultLimit(self):
self.assertEqual(
check_numerics_callback.limit_string_length("A" * 50 + "B"),
"..." + "A" * 49 + "B")
class CheckNumericsCallbackTest(test_util.TensorFlowTestCase):
def tearDown(self):
check_numerics_callback.disable_check_numerics()
super(CheckNumericsCallbackTest, self).tearDown()
def testCallingDisableCheckNumericsWithoutEnablingFirstIsTolerated(self):
check_numerics_callback.disable_check_numerics()
def testNoCatchEagerOpExecution(self):
"""Test running multiple steps of eager execution without Inf/NaN."""
check_numerics_callback.enable_check_numerics()
x = constant_op.constant([2.0, 3.0])
y = constant_op.constant([1.0, 0.0])
self.assertAllClose((x + y) * (x - y), [3.0, 9.0])
@test_util.run_in_graph_and_eager_modes
def testDatasetMapHealthyResults(self):
check_numerics_callback.enable_check_numerics()
tensor = constant_op.constant(
[0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0])
def map_fn(x):
return math_ops.log(math_ops.square(x) + 1)
dataset = dataset_ops.Dataset.from_tensor_slices(tensor).batch(2).map(
map_fn)
@def_function.function
def get_batches():
iterator = iter(dataset)
return [next(iterator), next(iterator)]
batches = self.evaluate(get_batches())
self.assertLen(batches, 2)
self.assertAllClose(batches[0], np.log([1.25, 2]))
self.assertAllClose(batches[1], np.log([3.25, 5]))
@test_util.run_in_graph_and_eager_modes
def testGraphModeUsesCorrectPathLengthAndStackHeightLimits(self):
check_numerics_callback.enable_check_numerics(
stack_height_limit=123, path_length_limit=1200)
@def_function.function
def add_fn(x, y):
return x + y
fake_get_check_numerics_error_message = test.mock.MagicMock(
return_value="dummy_message")
with test.mock.patch.object(check_numerics_callback,
"get_check_numerics_error_message",
fake_get_check_numerics_error_message):
x = constant_op.constant(2.0)
y = constant_op.constant(3.0)
self.assertAllClose(self.evaluate(add_fn(x, y)), 5.0)
(_, call_kwargs) = fake_get_check_numerics_error_message.call_args
self.assertEqual(call_kwargs["stack_height_limit"], 123)
self.assertEqual(call_kwargs["path_length_limit"], 1200)
class CheckNumericsCallbackUnhealthyTest(test_util.TensorFlowTestCase):
"""Test for cases in which enable_check_numerics() catches infs or nans."""
def tearDown(self):
check_numerics_callback.disable_check_numerics()
super(CheckNumericsCallbackUnhealthyTest, self).tearDown()
def _assertRaisesInvalidArgumentErrorAndGetMessage(self, func):
caught = None
try:
func()
except errors.InvalidArgumentError as error:
caught = error
self.assertTrue(caught, "Failed to catch expected InvalidArgumentError")
return caught.message
def testCatchEagerOpFloat32Inf(self):
"""Test catching Infinity in eager op execution: float32."""
check_numerics_callback.enable_check_numerics()
x = constant_op.constant([2.0, 3.0])
y = constant_op.constant([1.0, 0.0])
message = self._assertRaisesInvalidArgumentErrorAndGetMessage(
lambda: x / y)
# Check the content of the error message.
self.assertTrue(re.search(r"eagerly-executing op.*\"RealDiv\"", message))
self.assertTrue(re.search(r"dtype.*float32", message))
self.assertIn("shape: (2,)\n", message)
self.assertIn("# of +Inf elements: 1\n", message)
self.assertIn("0: %s" % x, message)
self.assertIn("1: %s" % y, message)
def testEnableCheckNumericsIsIdempotent(self):
"""Two calls to enable_check_numerics() have same effect as one call."""
check_numerics_callback.enable_check_numerics()
check_numerics_callback.enable_check_numerics()
x = constant_op.constant([2.0, 3.0])
y = constant_op.constant([1.0, 0.0])
message = self._assertRaisesInvalidArgumentErrorAndGetMessage(
lambda: x / y)
# Check the content of the error message.
self.assertTrue(re.search(r"eagerly-executing op.*\"RealDiv\"", message))
self.assertTrue(re.search(r"dtype.*float32", message))
self.assertIn("shape: (2,)\n", message)
self.assertIn("# of +Inf elements: 1\n", message)
self.assertIn("0: %s" % x, message)
self.assertIn("1: %s" % y, message)
def testCatchEagerOpFloat16NaN(self):
"""Test catching Infinity in eager op execution: float16."""
check_numerics_callback.enable_check_numerics()
def log1p(x):
y = 1.0 + x
return math_ops.log(y)
x = constant_op.constant([[-1.0]], dtype=dtypes.float16)
message = self._assertRaisesInvalidArgumentErrorAndGetMessage(
lambda: log1p(x))
# Check the content of the error message.
self.assertTrue(re.search(r"eagerly-executing op.*\"Log\"", message))
self.assertTrue(re.search(r"dtype.*float16", message))
self.assertIn("shape: (1, 1)\n", message)
self.assertIn("# of -Inf elements: 1\n", message)
self.assertTrue(re.search(r"Input tensor.*0\.", message))
@test_util.run_in_graph_and_eager_modes
def testCatchFunctionOpInfFloat64(self):
"""Test catching infinites generated in a FuncGraph."""
check_numerics_callback.enable_check_numerics()
@def_function.function
def divide_sum_with_diff(x, y):
w1 = x + y
w2 = x - y
u = w1 / w2
return u * 2.0
x = constant_op.constant(2.0, dtype=dtypes.float64)
y = constant_op.constant(2.0, dtype=dtypes.float64)
message = self._assertRaisesInvalidArgumentErrorAndGetMessage(
lambda: self.evaluate(divide_sum_with_diff(x, y)))
# Check the content of the error message.
self.assertTrue(re.search(r"graph op.*\"RealDiv\"", message))
self.assertTrue(re.search(r"dtype.*float64", message))
self.assertIn("shape: ()\n", message)
self.assertIn("Input tensors (2):", message)
# Check that the correct input ops are printed.
self.assertTrue(re.search(r"0:.*Tensor.*add:0", message))
self.assertTrue(re.search(r"1:.*Tensor.*sub:0", message))
# Check that the correct line for op creation is printed.
self.assertTrue(re.search(r"Stack trace of op's creation", message))
self.assertIn("u = w1 / w2", message)
@test_util.run_in_graph_and_eager_modes
@test_util.disable_xla(
"TODO(b/141100809): XLA has no way to assert inside of a kernel.")
def testControlFlowGraphWithNaNBFloat16(self):
"""Test catching bfloat16 NaNs in a control-flow-v2 FuncGraph."""
check_numerics_callback.enable_check_numerics()
@def_function.function
def my_conditional(x):
if math_ops.less(math_ops.reduce_sum(x), 0.0):
return math_ops.log(x)
else:
return math_ops.log(-x)
x = constant_op.constant([1.0, 2.0, 3.0], dtype=dtypes.bfloat16)
message = self._assertRaisesInvalidArgumentErrorAndGetMessage(
lambda: self.evaluate(my_conditional(x)))
# Check the content of the error message.
self.assertTrue(re.search(r"graph op.*\"Log\"", message))
self.assertTrue(re.search(r"dtype.*bfloat16", message))
self.assertIn("shape: (3,)\n", message)
# Check that the correct input op is printed.
self.assertTrue(re.search(r"Input tensor.*Tensor.*Neg", message))
# Check that the correct line for op creation is printed.
self.assertTrue(re.search(r"Stack trace of op's creation", message))
self.assertIn("return math_ops.log(-x)", message)
@test_util.run_in_graph_and_eager_modes
@test_util.disable_xla(
"There is a small inconsistency in the step at which overflow happens: "
"128 (without XLA) and 127 (with XLA).")
@test_util.disable_tfrt("b/177261532: TFRT cannot detect overflow yet.")
def testOverflowInTfFunction(self):
"""Test catching Infinity caused by overflow in a tf.function with while."""
check_numerics_callback.enable_check_numerics()
@def_function.function
def accumulation_function(counter, lim, accum):
while math_ops.less(counter, lim):
accum.assign(accum * 2.0)
counter.assign_add(1)
counter = variables.Variable(0, dtype=dtypes.int32)
# Repeated `* 2.0` overflows a float32 tensor in 128 steps. So the
# 1000-step limit is sufficient.
lim = constant_op.constant(1000, dtype=dtypes.int32)
accum = variables.Variable(1.0)
if not context.executing_eagerly():
self.evaluate([counter.initializer, accum.initializer])
message = self._assertRaisesInvalidArgumentErrorAndGetMessage(
lambda: self.evaluate(accumulation_function(counter, lim, accum)))
self.assertAllClose(self.evaluate(counter), 128)
# Check the content of the error message.
# The overflow to +Infinity happens during the `* 2.0` operation.
self.assertTrue(re.search(r"graph op.*\"Mul\"", message))
self.assertTrue(re.search(r"dtype.*float32", message))
self.assertIn("shape: ()\n", message)
# Check that the correct input op is printed.
self.assertIn("Input tensors (2):", message)
# Check that the correct input ops are printed.
self.assertTrue(re.search(r"0:.*Tensor.*ReadVariableOp:0", message))
self.assertTrue(re.search(r"1:.*Tensor.*mul/y:0", message))
# Check that the correct line for op creation is printed.
self.assertTrue(re.search(r"Stack trace of op's creation", message))
self.assertIn("accum.assign(accum * 2.0)", message)
@test_util.run_in_graph_and_eager_modes
def testNanInConstIsCaptured(self):
check_numerics_callback.enable_check_numerics()
v = variables.Variable(3.0, dtype=dtypes.float32)
@def_function.function
def add_a_bad_constant(x):
c = constant_op.constant(np.nan)
return x + c
if not context.executing_eagerly():
self.evaluate(v.initializer)
message = self._assertRaisesInvalidArgumentErrorAndGetMessage(
lambda: self.evaluate(add_a_bad_constant(v)))
self.assertTrue(re.search(r"graph op.*\"Const\"", message))
self.assertTrue(re.search(r"dtype:.*float32", message))
self.assertTrue(re.search(r"shape:.*\(\)", message))
self.assertTrue(re.search(r"Graph name:.*add_a_bad_constant", message))
@test_util.run_in_graph_and_eager_modes
def testCatchInfinityInDatasetMapFunction(self):
"""Test that callback catches NaN in a tf.dataset map function."""
check_numerics_callback.enable_check_numerics()
def generate_nan(x):
"""Intentionally generates NaNs by taking log of negative number."""
casted_x = math_ops.cast(x, dtypes.float32)
return math_ops.log([[-1.0, 1.0], [3.0, 5.0]]) + casted_x
dataset = dataset_ops.Dataset.range(10).map(generate_nan)
iterator = dataset_ops.make_one_shot_iterator(dataset)
message = self._assertRaisesInvalidArgumentErrorAndGetMessage(
lambda: self.evaluate(iterator.get_next()))
# Check the content of the error message.
self.assertTrue(re.search(r"graph op.*\"Log\"", message))
self.assertTrue(re.search(r"dtype.*float32", message))
self.assertIn("shape: (2, 2)\n", message)
self.assertTrue(re.search(r"Input tensor.*Tensor.*Log/x:0", message))
self.assertIn(
"-> | return math_ops.log([[-1.0, 1.0], [3.0, 5.0]]) + casted_x",
message)
@test_util.run_in_graph_and_eager_modes
def testCustomGradientWithNaNWithTfFunction(self):
"""Test that callback catches NaN in a gradient function during backprop."""
check_numerics_callback.enable_check_numerics()
@custom_gradient.custom_gradient
def func_with_bad_grad(x):
output = math_ops.sin(x)
@def_function.function
def grad(dy):
# `dy` will come in as 1.0. Taking log of -1.0 leads to NaN.
return math_ops.log(-dy)
return output, grad
x = constant_op.constant(-2.0, dtype=dtypes.float16)
def f(x):
return func_with_bad_grad(x)
message = self._assertRaisesInvalidArgumentErrorAndGetMessage(
lambda: gradient_checker_v2.compute_gradient(f, [x]))
# Check the content of the error message.
self.assertTrue(re.search(r"graph op.*\"Log\"", message))
self.assertTrue(re.search(r"dtype.*float16", message))
if context.executing_eagerly():
self.assertIn("shape: ()\n", message)
self.assertTrue(re.search(r"Input tensor.*Tensor.*Neg:0", message))
self.assertIn("-> | return math_ops.log(-dy)", message)
@test_util.run_in_graph_and_eager_modes
def testNestedFunctionGradientCall(self):
"""Catching inf in the inner nested tf.function during backprop."""
check_numerics_callback.enable_check_numerics()
x = constant_op.constant(1.0 - 1e-8, dtype=dtypes.float32)
@def_function.function
def asinp1(x):
# asin()'s gradient overflows at the value close to 1.0.
return math_ops.asin(x) + 1.0
@def_function.function
def loss(x):
return math_ops.square(asinp1(x))
with backprop.GradientTape() as tape:
tape.watch(x)
y = loss(x)
message = self._assertRaisesInvalidArgumentErrorAndGetMessage(
lambda: self.evaluate(tape.gradient(y, x)))
# Check the content of the error message.
# Assume the op Reciprocal or Xdivy is used in the gradient function for
# asin().
self.assertTrue((re.search(r"graph op.*\"Reciprocal\"", message) or
re.search(r"graph op.*\"Xdivy\"", message)))
self.assertTrue(re.search(r"dtype.*float32", message))
def testEagerModeUsesCorrectPathLengthAndStackHeightLimits(self):
check_numerics_callback.enable_check_numerics(
stack_height_limit=123, path_length_limit=1200)
fake_get_check_numerics_error_message = test.mock.MagicMock(
return_value="dummy_message")
with test.mock.patch.object(check_numerics_callback,
"get_check_numerics_error_message",
fake_get_check_numerics_error_message):
x = constant_op.constant(2.0)
y = constant_op.constant(0.0)
self._assertRaisesInvalidArgumentErrorAndGetMessage(
lambda: x / y) # Expected to generate an inf.
(_, call_kwargs) = fake_get_check_numerics_error_message.call_args
self.assertEqual(call_kwargs["stack_height_limit"], 123)
self.assertEqual(call_kwargs["path_length_limit"], 1200)
@test_util.run_in_graph_and_eager_modes
def testExpectedNaNOpOutputs(self):
"""Test calling operations with benign NaN output."""
check_numerics_callback.enable_check_numerics()
# Empty input tensor
x = constant_op.constant(1, dtype=dtypes.float32, shape=[0, 1, 1, 1])
scale = constant_op.constant([1], dtype=dtypes.float32)
offset = constant_op.constant([1], dtype=dtypes.float32)
# Calling fused_batch_norm with an empty input should output a NaN in the
# latter four outputs without triggering the check_numerics callback
batch_norm_res = gen_nn_ops._fused_batch_norm(
x=x, scale=scale, offset=offset, mean=[], variance=[])
_, batch_mean, batch_variance, _, _ = self.evaluate(batch_norm_res)
self.assertTrue(np.isnan(batch_mean.squeeze()))
self.assertTrue(np.isnan(batch_variance.squeeze()))
if __name__ == "__main__":
ops.enable_eager_execution()
googletest.main()
|
apache-2.0
|
NetApp/manila
|
manila/share/drivers/huawei/v3/rpcapi.py
|
1
|
1665
|
# Copyright (c) 2016 Huawei Technologies Co., Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import oslo_messaging as messaging
from manila import rpc
from manila.share import utils
class HuaweiV3API(object):
"""Client side of the huawei V3 rpc API.
API version history:
1.0 - Initial version.
"""
BASE_RPC_API_VERSION = '1.0'
def __init__(self):
self.topic = 'huawei_v3'
target = messaging.Target(topic=self.topic,
version=self.BASE_RPC_API_VERSION)
self.client = rpc.get_client(target, version_cap='1.0')
def create_replica_pair(self, context, host, local_share_info,
remote_device_wwn, remote_fs_id):
new_host = utils.extract_host(host)
call_context = self.client.prepare(server=new_host, version='1.0')
return call_context.call(
context, 'create_replica_pair',
local_share_info=local_share_info,
remote_device_wwn=remote_device_wwn,
remote_fs_id=remote_fs_id)
|
apache-2.0
|
dkubiak789/OpenUpgrade
|
addons/website_certification/certification.py
|
385
|
1789
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
class certification_type(osv.Model):
_name = 'certification.type'
_order = 'name ASC'
_columns = {
'name': fields.char("Certification Type", required=True)
}
class certification_certification(osv.Model):
_name = 'certification.certification'
_order = 'certification_date DESC'
_columns = {
'partner_id': fields.many2one('res.partner', string="Partner", required=True),
'type_id': fields.many2one('certification.type', string="Certification", required=True),
'certification_date': fields.date("Certification Date", required=True),
'certification_score': fields.char("Certification Score", required=True),
'certification_hidden_score': fields.boolean("Hide score on website?")
}
|
agpl-3.0
|
blakfeld/ansible
|
lib/ansible/galaxy/api.py
|
82
|
5143
|
#!/usr/bin/env python
########################################################################
#
# (C) 2013, James Cammarata <jcammarata@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
import json
from urllib2 import urlopen, quote as urlquote
from urlparse import urlparse
from ansible.errors import AnsibleError
class GalaxyAPI(object):
''' This class is meant to be used as a API client for an Ansible Galaxy server '''
SUPPORTED_VERSIONS = ['v1']
def __init__(self, galaxy, api_server):
self.galaxy = galaxy
try:
urlparse(api_server, scheme='https')
except:
raise AnsibleError("Invalid server API url passed: %s" % api_server)
server_version = self.get_server_api_version('%s/api/' % (api_server))
if not server_version:
raise AnsibleError("Could not retrieve server API version: %s" % api_server)
if server_version in self.SUPPORTED_VERSIONS:
self.baseurl = '%s/api/%s' % (api_server, server_version)
self.version = server_version # for future use
self.galaxy.display.vvvvv("Base API: %s" % self.baseurl)
else:
raise AnsibleError("Unsupported Galaxy server API version: %s" % server_version)
def get_server_api_version(self, api_server):
"""
Fetches the Galaxy API current version to ensure
the API server is up and reachable.
"""
#TODO: fix galaxy server which returns current_version path (/api/v1) vs actual version (v1)
# also should set baseurl using supported_versions which has path
return 'v1'
try:
data = json.load(urlopen(api_server))
return data.get("current_version", 'v1')
except Exception as e:
# TODO: report error
return None
def lookup_role_by_name(self, role_name, notify=True):
"""
Find a role by name
"""
role_name = urlquote(role_name)
try:
parts = role_name.split(".")
user_name = ".".join(parts[0:-1])
role_name = parts[-1]
if notify:
self.galaxy.display.display("- downloading role '%s', owned by %s" % (role_name, user_name))
except:
raise AnsibleError("- invalid role name (%s). Specify role as format: username.rolename" % role_name)
url = '%s/roles/?owner__username=%s&name=%s' % (self.baseurl, user_name, role_name)
self.galaxy.display.vvvv("- %s" % (url))
try:
data = json.load(urlopen(url))
if len(data["results"]) != 0:
return data["results"][0]
except:
# TODO: report on connection/availability errors
pass
return None
def fetch_role_related(self, related, role_id):
"""
Fetch the list of related items for the given role.
The url comes from the 'related' field of the role.
"""
try:
url = '%s/roles/%d/%s/?page_size=50' % (self.baseurl, int(role_id), related)
data = json.load(urlopen(url))
results = data['results']
done = (data.get('next', None) == None)
while not done:
url = '%s%s' % (self.baseurl, data['next'])
self.galaxy.display.display(url)
data = json.load(urlopen(url))
results += data['results']
done = (data.get('next', None) == None)
return results
except:
return None
def get_list(self, what):
"""
Fetch the list of items specified.
"""
try:
url = '%s/%s/?page_size' % (self.baseurl, what)
data = json.load(urlopen(url))
if "results" in data:
results = data['results']
else:
results = data
done = True
if "next" in data:
done = (data.get('next', None) == None)
while not done:
url = '%s%s' % (self.baseurl, data['next'])
self.galaxy.display.display(url)
data = json.load(urlopen(url))
results += data['results']
done = (data.get('next', None) == None)
return results
except Exception as error:
raise AnsibleError("Failed to download the %s list: %s" % (what, str(error)))
|
gpl-3.0
|
efortuna/AndroidSDKClone
|
ndk/prebuilt/linux-x86_64/lib/python2.7/distutils/tests/test_build_ext.py
|
11
|
20233
|
import sys
import os
from StringIO import StringIO
import textwrap
from distutils.core import Extension, Distribution
from distutils.command.build_ext import build_ext
from distutils import sysconfig
from distutils.tests import support
from distutils.errors import (DistutilsSetupError, CompileError,
DistutilsPlatformError)
import unittest
from test import test_support
# http://bugs.python.org/issue4373
# Don't load the xx module more than once.
ALREADY_TESTED = False
class BuildExtTestCase(support.TempdirManager,
support.LoggingSilencer,
unittest.TestCase):
def setUp(self):
super(BuildExtTestCase, self).setUp()
self.tmp_dir = self.mkdtemp()
self.xx_created = False
sys.path.append(self.tmp_dir)
self.addCleanup(sys.path.remove, self.tmp_dir)
if sys.version > "2.6":
import site
self.old_user_base = site.USER_BASE
site.USER_BASE = self.mkdtemp()
from distutils.command import build_ext
build_ext.USER_BASE = site.USER_BASE
def tearDown(self):
if self.xx_created:
test_support.unload('xx')
# XXX on Windows the test leaves a directory
# with xx module in TEMP
super(BuildExtTestCase, self).tearDown()
def test_build_ext(self):
global ALREADY_TESTED
support.copy_xxmodule_c(self.tmp_dir)
self.xx_created = True
xx_c = os.path.join(self.tmp_dir, 'xxmodule.c')
xx_ext = Extension('xx', [xx_c])
dist = Distribution({'name': 'xx', 'ext_modules': [xx_ext]})
dist.package_dir = self.tmp_dir
cmd = build_ext(dist)
support.fixup_build_ext(cmd)
cmd.build_lib = self.tmp_dir
cmd.build_temp = self.tmp_dir
old_stdout = sys.stdout
if not test_support.verbose:
# silence compiler output
sys.stdout = StringIO()
try:
cmd.ensure_finalized()
#Broken after issue 7712(r78136) : add a temp_cwd context manager to test_support ...
#Without current working dir: "...cannot find -lpython27"
#NOTE: [py3k svn r85559] First (uncontroversial) part of issue 9807, barry.warsaw, 2010-10-16 :
# new _fixup_command is bogus, so we will use own work-around
cmd.library_dirs.insert(0, test_support.SAVEDCWD)
cmd.run()
finally:
sys.stdout = old_stdout
if ALREADY_TESTED:
return
else:
ALREADY_TESTED = True
import xx
for attr in ('error', 'foo', 'new', 'roj'):
self.assertTrue(hasattr(xx, attr))
self.assertEqual(xx.foo(2, 5), 7)
self.assertEqual(xx.foo(13,15), 28)
self.assertEqual(xx.new().demo(), None)
if test_support.HAVE_DOCSTRINGS:
doc = 'This is a template module just for instruction.'
self.assertEqual(xx.__doc__, doc)
self.assertTrue(isinstance(xx.Null(), xx.Null))
self.assertTrue(isinstance(xx.Str(), xx.Str))
def test_solaris_enable_shared(self):
dist = Distribution({'name': 'xx'})
cmd = build_ext(dist)
old = sys.platform
sys.platform = 'sunos' # fooling finalize_options
from distutils.sysconfig import _config_vars
old_var = _config_vars.get('Py_ENABLE_SHARED')
_config_vars['Py_ENABLE_SHARED'] = 1
try:
cmd.ensure_finalized()
finally:
sys.platform = old
if old_var is None:
del _config_vars['Py_ENABLE_SHARED']
else:
_config_vars['Py_ENABLE_SHARED'] = old_var
# make sure we get some library dirs under solaris
self.assertTrue(len(cmd.library_dirs) > 0)
def test_user_site(self):
# site.USER_SITE was introduced in 2.6
if sys.version < '2.6':
return
import site
dist = Distribution({'name': 'xx'})
cmd = build_ext(dist)
# making sure the user option is there
options = [name for name, short, label in
cmd.user_options]
self.assertIn('user', options)
# setting a value
cmd.user = 1
# setting user based lib and include
lib = os.path.join(site.USER_BASE, 'lib')
incl = os.path.join(site.USER_BASE, 'include')
os.mkdir(lib)
os.mkdir(incl)
cmd.ensure_finalized()
# see if include_dirs and library_dirs were set
self.assertIn(lib, cmd.library_dirs)
self.assertIn(lib, cmd.rpath)
self.assertIn(incl, cmd.include_dirs)
def test_finalize_options(self):
# Make sure Python's include directories (for Python.h, pyconfig.h,
# etc.) are in the include search path.
modules = [Extension('foo', ['xxx'])]
dist = Distribution({'name': 'xx', 'ext_modules': modules})
cmd = build_ext(dist)
cmd.finalize_options()
py_include = sysconfig.get_python_inc()
self.assertTrue(py_include in cmd.include_dirs)
plat_py_include = sysconfig.get_python_inc(plat_specific=1)
self.assertTrue(plat_py_include in cmd.include_dirs)
# make sure cmd.libraries is turned into a list
# if it's a string
cmd = build_ext(dist)
cmd.libraries = 'my_lib, other_lib lastlib'
cmd.finalize_options()
self.assertEqual(cmd.libraries, ['my_lib', 'other_lib', 'lastlib'])
# make sure cmd.library_dirs is turned into a list
# if it's a string
cmd = build_ext(dist)
cmd.library_dirs = 'my_lib_dir%sother_lib_dir' % os.pathsep
cmd.finalize_options()
self.assertIn('my_lib_dir', cmd.library_dirs)
self.assertIn('other_lib_dir', cmd.library_dirs)
# make sure rpath is turned into a list
# if it's a string
cmd = build_ext(dist)
cmd.rpath = 'one%stwo' % os.pathsep
cmd.finalize_options()
self.assertEqual(cmd.rpath, ['one', 'two'])
# XXX more tests to perform for win32
# make sure define is turned into 2-tuples
# strings if they are ','-separated strings
cmd = build_ext(dist)
cmd.define = 'one,two'
cmd.finalize_options()
self.assertEqual(cmd.define, [('one', '1'), ('two', '1')])
# make sure undef is turned into a list of
# strings if they are ','-separated strings
cmd = build_ext(dist)
cmd.undef = 'one,two'
cmd.finalize_options()
self.assertEqual(cmd.undef, ['one', 'two'])
# make sure swig_opts is turned into a list
cmd = build_ext(dist)
cmd.swig_opts = None
cmd.finalize_options()
self.assertEqual(cmd.swig_opts, [])
cmd = build_ext(dist)
cmd.swig_opts = '1 2'
cmd.finalize_options()
self.assertEqual(cmd.swig_opts, ['1', '2'])
def test_check_extensions_list(self):
dist = Distribution()
cmd = build_ext(dist)
cmd.finalize_options()
#'extensions' option must be a list of Extension instances
self.assertRaises(DistutilsSetupError, cmd.check_extensions_list, 'foo')
# each element of 'ext_modules' option must be an
# Extension instance or 2-tuple
exts = [('bar', 'foo', 'bar'), 'foo']
self.assertRaises(DistutilsSetupError, cmd.check_extensions_list, exts)
# first element of each tuple in 'ext_modules'
# must be the extension name (a string) and match
# a python dotted-separated name
exts = [('foo-bar', '')]
self.assertRaises(DistutilsSetupError, cmd.check_extensions_list, exts)
# second element of each tuple in 'ext_modules'
# must be a ary (build info)
exts = [('foo.bar', '')]
self.assertRaises(DistutilsSetupError, cmd.check_extensions_list, exts)
# ok this one should pass
exts = [('foo.bar', {'sources': [''], 'libraries': 'foo',
'some': 'bar'})]
cmd.check_extensions_list(exts)
ext = exts[0]
self.assertTrue(isinstance(ext, Extension))
# check_extensions_list adds in ext the values passed
# when they are in ('include_dirs', 'library_dirs', 'libraries'
# 'extra_objects', 'extra_compile_args', 'extra_link_args')
self.assertEqual(ext.libraries, 'foo')
self.assertTrue(not hasattr(ext, 'some'))
# 'macros' element of build info dict must be 1- or 2-tuple
exts = [('foo.bar', {'sources': [''], 'libraries': 'foo',
'some': 'bar', 'macros': [('1', '2', '3'), 'foo']})]
self.assertRaises(DistutilsSetupError, cmd.check_extensions_list, exts)
exts[0][1]['macros'] = [('1', '2'), ('3',)]
cmd.check_extensions_list(exts)
self.assertEqual(exts[0].undef_macros, ['3'])
self.assertEqual(exts[0].define_macros, [('1', '2')])
def test_get_source_files(self):
modules = [Extension('foo', ['xxx'])]
dist = Distribution({'name': 'xx', 'ext_modules': modules})
cmd = build_ext(dist)
cmd.ensure_finalized()
self.assertEqual(cmd.get_source_files(), ['xxx'])
def test_compiler_option(self):
# cmd.compiler is an option and
# should not be overriden by a compiler instance
# when the command is run
dist = Distribution()
cmd = build_ext(dist)
cmd.compiler = 'unix'
cmd.ensure_finalized()
cmd.run()
self.assertEqual(cmd.compiler, 'unix')
def test_get_outputs(self):
tmp_dir = self.mkdtemp()
c_file = os.path.join(tmp_dir, 'foo.c')
self.write_file(c_file, 'void initfoo(void) {};\n')
ext = Extension('foo', [c_file])
dist = Distribution({'name': 'xx',
'ext_modules': [ext]})
cmd = build_ext(dist)
support.fixup_build_ext(cmd)
cmd.ensure_finalized()
self.assertEqual(len(cmd.get_outputs()), 1)
cmd.build_lib = os.path.join(self.tmp_dir, 'build')
cmd.build_temp = os.path.join(self.tmp_dir, 'tempt')
# issue #5977 : distutils build_ext.get_outputs
# returns wrong result with --inplace
other_tmp_dir = os.path.realpath(self.mkdtemp())
old_wd = os.getcwd()
#Without current working dir: "...cannot find -lpython27"
#NOTE: After issue #7712(r78136) test cannot use old_wd !
#cmd.library_dirs.insert(0, old_wd)
#NOTE: [py3k svn r85559] First (uncontroversial) part of issue 9807, barry.warsaw, 2010-10-16 :
# new _fixup_command is bogus, so we will use own work-around
cmd.library_dirs.insert(0, test_support.SAVEDCWD)
os.chdir(other_tmp_dir)
try:
cmd.inplace = 1
cmd.run()
so_file = cmd.get_outputs()[0]
finally:
os.chdir(old_wd)
self.assertTrue(os.path.exists(so_file))
self.assertEqual(os.path.splitext(so_file)[-1],
sysconfig.get_config_var('SO'))
so_dir = os.path.dirname(so_file)
self.assertEqual(so_dir, other_tmp_dir)
cmd.compiler = None
cmd.inplace = 0
cmd.run()
so_file = cmd.get_outputs()[0]
self.assertTrue(os.path.exists(so_file))
self.assertEqual(os.path.splitext(so_file)[-1],
sysconfig.get_config_var('SO'))
so_dir = os.path.dirname(so_file)
self.assertEqual(so_dir, cmd.build_lib)
# inplace = 0, cmd.package = 'bar'
build_py = cmd.get_finalized_command('build_py')
build_py.package_dir = {'': 'bar'}
path = cmd.get_ext_fullpath('foo')
# checking that the last directory is the build_dir
path = os.path.split(path)[0]
self.assertEqual(path, cmd.build_lib)
# inplace = 1, cmd.package = 'bar'
cmd.inplace = 1
other_tmp_dir = os.path.realpath(self.mkdtemp())
old_wd = os.getcwd()
os.chdir(other_tmp_dir)
try:
path = cmd.get_ext_fullpath('foo')
finally:
os.chdir(old_wd)
# checking that the last directory is bar
path = os.path.split(path)[0]
lastdir = os.path.split(path)[-1]
self.assertEqual(lastdir, 'bar')
def test_ext_fullpath(self):
ext = sysconfig.get_config_vars()['SO']
dist = Distribution()
cmd = build_ext(dist)
cmd.inplace = 1
cmd.distribution.package_dir = {'': 'src'}
cmd.distribution.packages = ['lxml', 'lxml.html']
curdir = os.getcwd()
wanted = os.path.join(curdir, 'src', 'lxml', 'etree' + ext)
path = cmd.get_ext_fullpath('lxml.etree')
self.assertEqual(wanted, path)
# building lxml.etree not inplace
cmd.inplace = 0
cmd.build_lib = os.path.join(curdir, 'tmpdir')
wanted = os.path.join(curdir, 'tmpdir', 'lxml', 'etree' + ext)
path = cmd.get_ext_fullpath('lxml.etree')
self.assertEqual(wanted, path)
# building twisted.runner.portmap not inplace
build_py = cmd.get_finalized_command('build_py')
build_py.package_dir = {}
cmd.distribution.packages = ['twisted', 'twisted.runner.portmap']
path = cmd.get_ext_fullpath('twisted.runner.portmap')
wanted = os.path.join(curdir, 'tmpdir', 'twisted', 'runner',
'portmap' + ext)
self.assertEqual(wanted, path)
# building twisted.runner.portmap inplace
cmd.inplace = 1
path = cmd.get_ext_fullpath('twisted.runner.portmap')
wanted = os.path.join(curdir, 'twisted', 'runner', 'portmap' + ext)
self.assertEqual(wanted, path)
def test_build_ext_inplace(self):
etree_c = os.path.join(self.tmp_dir, 'lxml.etree.c')
etree_ext = Extension('lxml.etree', [etree_c])
dist = Distribution({'name': 'lxml', 'ext_modules': [etree_ext]})
cmd = build_ext(dist)
cmd.ensure_finalized()
cmd.inplace = 1
cmd.distribution.package_dir = {'': 'src'}
cmd.distribution.packages = ['lxml', 'lxml.html']
curdir = os.getcwd()
ext = sysconfig.get_config_var("SO")
wanted = os.path.join(curdir, 'src', 'lxml', 'etree' + ext)
path = cmd.get_ext_fullpath('lxml.etree')
self.assertEqual(wanted, path)
def test_setuptools_compat(self):
import distutils.core, distutils.extension, distutils.command.build_ext
saved_ext = distutils.extension.Extension
try:
# on some platforms, it loads the deprecated "dl" module
test_support.import_module('setuptools_build_ext', deprecated=True)
# theses import patch Distutils' Extension class
from setuptools_build_ext import build_ext as setuptools_build_ext
from setuptools_extension import Extension
etree_c = os.path.join(self.tmp_dir, 'lxml.etree.c')
etree_ext = Extension('lxml.etree', [etree_c])
dist = Distribution({'name': 'lxml', 'ext_modules': [etree_ext]})
cmd = setuptools_build_ext(dist)
cmd.ensure_finalized()
cmd.inplace = 1
cmd.distribution.package_dir = {'': 'src'}
cmd.distribution.packages = ['lxml', 'lxml.html']
curdir = os.getcwd()
ext = sysconfig.get_config_var("SO")
wanted = os.path.join(curdir, 'src', 'lxml', 'etree' + ext)
path = cmd.get_ext_fullpath('lxml.etree')
self.assertEqual(wanted, path)
finally:
# restoring Distutils' Extension class otherwise its broken
distutils.extension.Extension = saved_ext
distutils.core.Extension = saved_ext
distutils.command.build_ext.Extension = saved_ext
def test_build_ext_path_with_os_sep(self):
dist = Distribution({'name': 'UpdateManager'})
cmd = build_ext(dist)
cmd.ensure_finalized()
ext = sysconfig.get_config_var("SO")
ext_name = os.path.join('UpdateManager', 'fdsend')
ext_path = cmd.get_ext_fullpath(ext_name)
wanted = os.path.join(cmd.build_lib, 'UpdateManager', 'fdsend' + ext)
self.assertEqual(ext_path, wanted)
def test_build_ext_path_cross_platform(self):
if sys.platform != 'win32':
return
dist = Distribution({'name': 'UpdateManager'})
cmd = build_ext(dist)
cmd.ensure_finalized()
ext = sysconfig.get_config_var("SO")
# this needs to work even under win32
ext_name = 'UpdateManager/fdsend'
ext_path = cmd.get_ext_fullpath(ext_name)
wanted = os.path.join(cmd.build_lib, 'UpdateManager', 'fdsend' + ext)
self.assertEqual(ext_path, wanted)
@unittest.skipUnless(sys.platform == 'darwin', 'test only relevant for MacOSX')
def test_deployment_target_default(self):
# Issue 9516: Test that, in the absence of the environment variable,
# an extension module is compiled with the same deployment target as
# the interpreter.
self._try_compile_deployment_target('==', None)
@unittest.skipUnless(sys.platform == 'darwin', 'test only relevant for MacOSX')
def test_deployment_target_too_low(self):
# Issue 9516: Test that an extension module is not allowed to be
# compiled with a deployment target less than that of the interpreter.
self.assertRaises(DistutilsPlatformError,
self._try_compile_deployment_target, '>', '10.1')
@unittest.skipUnless(sys.platform == 'darwin', 'test only relevant for MacOSX')
def test_deployment_target_higher_ok(self):
# Issue 9516: Test that an extension module can be compiled with a
# deployment target higher than that of the interpreter: the ext
# module may depend on some newer OS feature.
deptarget = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET')
if deptarget:
# increment the minor version number (i.e. 10.6 -> 10.7)
deptarget = [int(x) for x in deptarget.split('.')]
deptarget[-1] += 1
deptarget = '.'.join(str(i) for i in deptarget)
self._try_compile_deployment_target('<', deptarget)
def _try_compile_deployment_target(self, operator, target):
orig_environ = os.environ
os.environ = orig_environ.copy()
self.addCleanup(setattr, os, 'environ', orig_environ)
if target is None:
if os.environ.get('MACOSX_DEPLOYMENT_TARGET'):
del os.environ['MACOSX_DEPLOYMENT_TARGET']
else:
os.environ['MACOSX_DEPLOYMENT_TARGET'] = target
deptarget_c = os.path.join(self.tmp_dir, 'deptargetmodule.c')
with open(deptarget_c, 'w') as fp:
fp.write(textwrap.dedent('''\
#include <AvailabilityMacros.h>
int dummy;
#if TARGET %s MAC_OS_X_VERSION_MIN_REQUIRED
#else
#error "Unexpected target"
#endif
''' % operator))
# get the deployment target that the interpreter was built with
target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET')
target = tuple(map(int, target.split('.')))
target = '%02d%01d0' % target
deptarget_ext = Extension(
'deptarget',
[deptarget_c],
extra_compile_args=['-DTARGET=%s'%(target,)],
)
dist = Distribution({
'name': 'deptarget',
'ext_modules': [deptarget_ext]
})
dist.package_dir = self.tmp_dir
cmd = build_ext(dist)
cmd.build_lib = self.tmp_dir
cmd.build_temp = self.tmp_dir
try:
cmd.ensure_finalized()
cmd.run()
except CompileError:
self.fail("Wrong deployment target during compilation")
def test_suite():
return unittest.makeSuite(BuildExtTestCase)
if __name__ == '__main__':
test_support.run_unittest(test_suite())
|
apache-2.0
|
svn2github/exiv2_
|
website/bin/gen.py
|
2
|
1779
|
#! /usr/bin/env python
# ----------------------------------------------------------------------
# Settings
vardir = "./var"
date_format = "%d-%b-%Y"
# ----------------------------------------------------------------------
# functions
def usage():
print("""Usage: gen.py file.in [...]
Substitute placeholders in input files with content
""")
def gen_html(file):
"""Replace variables in the file with their content"""
text = open(file).read()
for var in vars:
vartext = open(vardir + "/" + var).read()
if var == "__navbar__":
# Mark the link for the current file as active
file2 = re.sub(r'.*/(.*).in', r'\1', file)
vartext = re.sub(r'<li><a href="('+file2+')', r'<li class="active"><a href="\1', vartext)
text = text.replace(var, vartext)
text = last_modified(text)
return text
def last_modified(text):
"""Substitute variable __last_modified__ with the current date"""
date = time.strftime(date_format, time.localtime())
text = text.replace("__last_modified__", date)
return text
# ----------------------------------------------------------------------
# main
import sys
import os
import re
import time
# Check command line arguments
if len(sys.argv) == 1:
usage()
sys.exit()
# The input files from the command line
input = sys.argv[1:]
# Get a list of all variables (files in the form __*__) from vardir
vars = os.listdir(vardir)
for i in range(len(vars)-1, -1, -1):
if re.match("^__.*__$", vars[i]): continue
del vars[i]
vars.sort()
# Substitute variables in all input files
print("Substituting variables {0}".format(vars))
for file in input:
print("Processing {0}...".format(file))
text = gen_html(file)
file = file.replace(".in", "")
open(file, 'w').write(text)
|
gpl-2.0
|
kgraney/msm-kernel
|
tools/perf/scripts/python/sctop.py
|
11180
|
1924
|
# system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
|
gpl-2.0
|
a-pertsev/flask-security
|
tests/test_configuration.py
|
21
|
1379
|
# -*- coding: utf-8 -*-
"""
test_configuration
~~~~~~~~~~~~~~~~~~
Basic configuration tests
"""
import base64
import pytest
from utils import authenticate, logout
@pytest.mark.settings(
logout_url='/custom_logout',
login_url='/custom_login',
post_login_view='/post_login',
post_logout_view='/post_logout',
default_http_auth_realm='Custom Realm')
def test_view_configuration(client):
response = client.get('/custom_login')
assert b"<h1>Login</h1>" in response.data
response = authenticate(client, endpoint='/custom_login')
assert 'location' in response.headers
assert response.headers['Location'] == 'http://localhost/post_login'
response = logout(client, endpoint='/custom_logout')
assert 'location' in response.headers
assert response.headers['Location'] == 'http://localhost/post_logout'
response = client.get('/http', headers={
'Authorization': 'Basic %s' % base64.b64encode(b"joe@lp.com:bogus")
})
assert b'<h1>Unauthorized</h1>' in response.data
assert 'WWW-Authenticate' in response.headers
assert 'Basic realm="Custom Realm"' == response.headers['WWW-Authenticate']
@pytest.mark.settings(login_user_template='custom_security/login_user.html')
def test_template_configuration(client):
response = client.get('/login')
assert b'CUSTOM LOGIN USER' in response.data
|
mit
|
381426068/MissionPlanner
|
Lib/gzip.py
|
50
|
18735
|
"""Functions that read and write gzipped files.
The user of the file doesn't have to worry about the compression,
but random access is not allowed."""
# based on Andrew Kuchling's minigzip.py distributed with the zlib module
import struct, sys, time, os
import zlib
import io
import __builtin__
__all__ = ["GzipFile","open"]
FTEXT, FHCRC, FEXTRA, FNAME, FCOMMENT = 1, 2, 4, 8, 16
READ, WRITE = 1, 2
def write32u(output, value):
# The L format writes the bit pattern correctly whether signed
# or unsigned.
output.write(struct.pack("<L", value))
def read32(input):
return struct.unpack("<I", input.read(4))[0]
def open(filename, mode="rb", compresslevel=9):
"""Shorthand for GzipFile(filename, mode, compresslevel).
The filename argument is required; mode defaults to 'rb'
and compresslevel defaults to 9.
"""
return GzipFile(filename, mode, compresslevel)
class GzipFile(io.BufferedIOBase):
"""The GzipFile class simulates most of the methods of a file object with
the exception of the readinto() and truncate() methods.
"""
myfileobj = None
max_read_chunk = 10 * 1024 * 1024 # 10Mb
def __init__(self, filename=None, mode=None,
compresslevel=9, fileobj=None, mtime=None):
"""Constructor for the GzipFile class.
At least one of fileobj and filename must be given a
non-trivial value.
The new class instance is based on fileobj, which can be a regular
file, a StringIO object, or any other object which simulates a file.
It defaults to None, in which case filename is opened to provide
a file object.
When fileobj is not None, the filename argument is only used to be
included in the gzip file header, which may includes the original
filename of the uncompressed file. It defaults to the filename of
fileobj, if discernible; otherwise, it defaults to the empty string,
and in this case the original filename is not included in the header.
The mode argument can be any of 'r', 'rb', 'a', 'ab', 'w', or 'wb',
depending on whether the file will be read or written. The default
is the mode of fileobj if discernible; otherwise, the default is 'rb'.
Be aware that only the 'rb', 'ab', and 'wb' values should be used
for cross-platform portability.
The compresslevel argument is an integer from 1 to 9 controlling the
level of compression; 1 is fastest and produces the least compression,
and 9 is slowest and produces the most compression. The default is 9.
The mtime argument is an optional numeric timestamp to be written
to the stream when compressing. All gzip compressed streams
are required to contain a timestamp. If omitted or None, the
current time is used. This module ignores the timestamp when
decompressing; however, some programs, such as gunzip, make use
of it. The format of the timestamp is the same as that of the
return value of time.time() and of the st_mtime member of the
object returned by os.stat().
"""
# guarantee the file is opened in binary mode on platforms
# that care about that sort of thing
if mode and 'b' not in mode:
mode += 'b'
if fileobj is None:
fileobj = self.myfileobj = __builtin__.open(filename, mode or 'rb')
if filename is None:
if hasattr(fileobj, 'name'): filename = fileobj.name
else: filename = ''
if mode is None:
if hasattr(fileobj, 'mode'): mode = fileobj.mode
else: mode = 'rb'
if mode[0:1] == 'r':
self.mode = READ
# Set flag indicating start of a new member
self._new_member = True
# Buffer data read from gzip file. extrastart is offset in
# stream where buffer starts. extrasize is number of
# bytes remaining in buffer from current stream position.
self.extrabuf = ""
self.extrasize = 0
self.extrastart = 0
self.name = filename
# Starts small, scales exponentially
self.min_readsize = 100
elif mode[0:1] == 'w' or mode[0:1] == 'a':
self.mode = WRITE
self._init_write(filename)
self.compress = zlib.compressobj(compresslevel,
zlib.DEFLATED,
-zlib.MAX_WBITS,
zlib.DEF_MEM_LEVEL,
0)
else:
raise IOError, "Mode " + mode + " not supported"
self.fileobj = fileobj
self.offset = 0
self.mtime = mtime
if self.mode == WRITE:
self._write_gzip_header()
@property
def filename(self):
import warnings
warnings.warn("use the name attribute", DeprecationWarning, 2)
if self.mode == WRITE and self.name[-3:] != ".gz":
return self.name + ".gz"
return self.name
def __repr__(self):
s = repr(self.fileobj)
return '<gzip ' + s[1:-1] + ' ' + hex(id(self)) + '>'
def _check_closed(self):
"""Raises a ValueError if the underlying file object has been closed.
"""
if self.closed:
raise ValueError('I/O operation on closed file.')
def _init_write(self, filename):
self.name = filename
self.crc = zlib.crc32("") & 0xffffffffL
self.size = 0
self.writebuf = []
self.bufsize = 0
def _write_gzip_header(self):
self.fileobj.write('\037\213') # magic header
self.fileobj.write('\010') # compression method
fname = os.path.basename(self.name)
if fname.endswith(".gz"):
fname = fname[:-3]
flags = 0
if fname:
flags = FNAME
self.fileobj.write(chr(flags))
mtime = self.mtime
if mtime is None:
mtime = time.time()
write32u(self.fileobj, long(mtime))
self.fileobj.write('\002')
self.fileobj.write('\377')
if fname:
self.fileobj.write(fname + '\000')
def _init_read(self):
self.crc = zlib.crc32("") & 0xffffffffL
self.size = 0
def _read_gzip_header(self):
magic = self.fileobj.read(2)
if magic != '\037\213':
raise IOError, 'Not a gzipped file'
method = ord( self.fileobj.read(1) )
if method != 8:
raise IOError, 'Unknown compression method'
flag = ord( self.fileobj.read(1) )
self.mtime = read32(self.fileobj)
# extraflag = self.fileobj.read(1)
# os = self.fileobj.read(1)
self.fileobj.read(2)
if flag & FEXTRA:
# Read & discard the extra field, if present
xlen = ord(self.fileobj.read(1))
xlen = xlen + 256*ord(self.fileobj.read(1))
self.fileobj.read(xlen)
if flag & FNAME:
# Read and discard a null-terminated string containing the filename
while True:
s = self.fileobj.read(1)
if not s or s=='\000':
break
if flag & FCOMMENT:
# Read and discard a null-terminated string containing a comment
while True:
s = self.fileobj.read(1)
if not s or s=='\000':
break
if flag & FHCRC:
self.fileobj.read(2) # Read & discard the 16-bit header CRC
def write(self,data):
self._check_closed()
if self.mode != WRITE:
import errno
raise IOError(errno.EBADF, "write() on read-only GzipFile object")
if self.fileobj is None:
raise ValueError, "write() on closed GzipFile object"
# Convert data type if called by io.BufferedWriter.
if isinstance(data, memoryview):
data = data.tobytes()
if len(data) > 0:
self.size = self.size + len(data)
self.crc = zlib.crc32(data, self.crc) & 0xffffffffL
self.fileobj.write( self.compress.compress(data) )
self.offset += len(data)
return len(data)
def read(self, size=-1):
self._check_closed()
if self.mode != READ:
import errno
raise IOError(errno.EBADF, "read() on write-only GzipFile object")
if self.extrasize <= 0 and self.fileobj is None:
return ''
readsize = 1024
if size < 0: # get the whole thing
try:
while True:
self._read(readsize)
readsize = min(self.max_read_chunk, readsize * 2)
except EOFError:
size = self.extrasize
else: # just get some more of it
try:
while size > self.extrasize:
self._read(readsize)
readsize = min(self.max_read_chunk, readsize * 2)
except EOFError:
if size > self.extrasize:
size = self.extrasize
offset = self.offset - self.extrastart
chunk = self.extrabuf[offset: offset + size]
self.extrasize = self.extrasize - size
self.offset += size
return chunk
def _unread(self, buf):
self.extrasize = len(buf) + self.extrasize
self.offset -= len(buf)
def _read(self, size=1024):
if self.fileobj is None:
raise EOFError, "Reached EOF"
if self._new_member:
# If the _new_member flag is set, we have to
# jump to the next member, if there is one.
#
# First, check if we're at the end of the file;
# if so, it's time to stop; no more members to read.
pos = self.fileobj.tell() # Save current position
self.fileobj.seek(0, 2) # Seek to end of file
if pos == self.fileobj.tell():
raise EOFError, "Reached EOF"
else:
self.fileobj.seek( pos ) # Return to original position
self._init_read()
self._read_gzip_header()
self.decompress = zlib.decompressobj(-zlib.MAX_WBITS)
self._new_member = False
# Read a chunk of data from the file
buf = self.fileobj.read(size)
# If the EOF has been reached, flush the decompression object
# and mark this object as finished.
if buf == "":
uncompress = self.decompress.flush()
self._read_eof()
self._add_read_data( uncompress )
raise EOFError, 'Reached EOF'
uncompress = self.decompress.decompress(buf)
self._add_read_data( uncompress )
if self.decompress.unused_data != "":
# Ending case: we've come to the end of a member in the file,
# so seek back to the start of the unused data, finish up
# this member, and read a new gzip header.
# (The number of bytes to seek back is the length of the unused
# data, minus 8 because _read_eof() will rewind a further 8 bytes)
self.fileobj.seek( -len(self.decompress.unused_data)+8, 1)
# Check the CRC and file size, and set the flag so we read
# a new member on the next call
self._read_eof()
self._new_member = True
def _add_read_data(self, data):
self.crc = zlib.crc32(data, self.crc) & 0xffffffffL
offset = self.offset - self.extrastart
self.extrabuf = self.extrabuf[offset:] + data
self.extrasize = self.extrasize + len(data)
self.extrastart = self.offset
self.size = self.size + len(data)
def _read_eof(self):
# We've read to the end of the file, so we have to rewind in order
# to reread the 8 bytes containing the CRC and the file size.
# We check the that the computed CRC and size of the
# uncompressed data matches the stored values. Note that the size
# stored is the true file size mod 2**32.
self.fileobj.seek(-8, 1)
crc32 = read32(self.fileobj)
isize = read32(self.fileobj) # may exceed 2GB
if crc32 != self.crc:
raise IOError("CRC check failed %s != %s" % (hex(crc32),
hex(self.crc)))
elif isize != (self.size & 0xffffffffL):
raise IOError, "Incorrect length of data produced"
# Gzip files can be padded with zeroes and still have archives.
# Consume all zero bytes and set the file position to the first
# non-zero byte. See http://www.gzip.org/#faq8
c = "\x00"
while c == "\x00":
c = self.fileobj.read(1)
if c:
self.fileobj.seek(-1, 1)
@property
def closed(self):
return self.fileobj is None
def close(self):
if self.fileobj is None:
return
if self.mode == WRITE:
self.fileobj.write(self.compress.flush())
write32u(self.fileobj, self.crc)
# self.size may exceed 2GB, or even 4GB
write32u(self.fileobj, self.size & 0xffffffffL)
self.fileobj = None
elif self.mode == READ:
self.fileobj = None
if self.myfileobj:
self.myfileobj.close()
self.myfileobj = None
def flush(self,zlib_mode=zlib.Z_SYNC_FLUSH):
self._check_closed()
if self.mode == WRITE:
# Ensure the compressor's buffer is flushed
self.fileobj.write(self.compress.flush(zlib_mode))
self.fileobj.flush()
def fileno(self):
"""Invoke the underlying file object's fileno() method.
This will raise AttributeError if the underlying file object
doesn't support fileno().
"""
return self.fileobj.fileno()
def rewind(self):
'''Return the uncompressed stream file position indicator to the
beginning of the file'''
if self.mode != READ:
raise IOError("Can't rewind in write mode")
self.fileobj.seek(0)
self._new_member = True
self.extrabuf = ""
self.extrasize = 0
self.extrastart = 0
self.offset = 0
def readable(self):
return self.mode == READ
def writable(self):
return self.mode == WRITE
def seekable(self):
return True
def seek(self, offset, whence=0):
if whence:
if whence == 1:
offset = self.offset + offset
else:
raise ValueError('Seek from end not supported')
if self.mode == WRITE:
if offset < self.offset:
raise IOError('Negative seek in write mode')
count = offset - self.offset
for i in range(count // 1024):
self.write(1024 * '\0')
self.write((count % 1024) * '\0')
elif self.mode == READ:
if offset < self.offset:
# for negative seek, rewind and do positive seek
self.rewind()
count = offset - self.offset
for i in range(count // 1024):
self.read(1024)
self.read(count % 1024)
return self.offset
def readline(self, size=-1):
if size < 0:
# Shortcut common case - newline found in buffer.
offset = self.offset - self.extrastart
i = self.extrabuf.find('\n', offset) + 1
if i > 0:
self.extrasize -= i - offset
self.offset += i - offset
return self.extrabuf[offset: i]
size = sys.maxint
readsize = self.min_readsize
else:
readsize = size
bufs = []
while size != 0:
c = self.read(readsize)
i = c.find('\n')
# We set i=size to break out of the loop under two
# conditions: 1) there's no newline, and the chunk is
# larger than size, or 2) there is a newline, but the
# resulting line would be longer than 'size'.
if (size <= i) or (i == -1 and len(c) > size):
i = size - 1
if i >= 0 or c == '':
bufs.append(c[:i + 1]) # Add portion of last chunk
self._unread(c[i + 1:]) # Push back rest of chunk
break
# Append chunk to list, decrease 'size',
bufs.append(c)
size = size - len(c)
readsize = min(size, readsize * 2)
if readsize > self.min_readsize:
self.min_readsize = min(readsize, self.min_readsize * 2, 512)
return ''.join(bufs) # Return resulting line
def _test():
# Act like gzip; with -d, act like gunzip.
# The input file is not deleted, however, nor are any other gzip
# options or features supported.
args = sys.argv[1:]
decompress = args and args[0] == "-d"
if decompress:
args = args[1:]
if not args:
args = ["-"]
for arg in args:
if decompress:
if arg == "-":
f = GzipFile(filename="", mode="rb", fileobj=sys.stdin)
g = sys.stdout
else:
if arg[-3:] != ".gz":
print "filename doesn't end in .gz:", repr(arg)
continue
f = open(arg, "rb")
g = __builtin__.open(arg[:-3], "wb")
else:
if arg == "-":
f = sys.stdin
g = GzipFile(filename="", mode="wb", fileobj=sys.stdout)
else:
f = __builtin__.open(arg, "rb")
g = open(arg + ".gz", "wb")
while True:
chunk = f.read(1024)
if not chunk:
break
g.write(chunk)
if g is not sys.stdout:
g.close()
if f is not sys.stdin:
f.close()
if __name__ == '__main__':
_test()
|
gpl-3.0
|
shawger/s-kape
|
lib/requests/packages/urllib3/util/url.py
|
713
|
5879
|
from __future__ import absolute_import
from collections import namedtuple
from ..exceptions import LocationParseError
url_attrs = ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment']
class Url(namedtuple('Url', url_attrs)):
"""
Datastructure for representing an HTTP URL. Used as a return value for
:func:`parse_url`.
"""
slots = ()
def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None,
query=None, fragment=None):
if path and not path.startswith('/'):
path = '/' + path
return super(Url, cls).__new__(cls, scheme, auth, host, port, path,
query, fragment)
@property
def hostname(self):
"""For backwards-compatibility with urlparse. We're nice like that."""
return self.host
@property
def request_uri(self):
"""Absolute path including the query string."""
uri = self.path or '/'
if self.query is not None:
uri += '?' + self.query
return uri
@property
def netloc(self):
"""Network location including host and port"""
if self.port:
return '%s:%d' % (self.host, self.port)
return self.host
@property
def url(self):
"""
Convert self into a url
This function should more or less round-trip with :func:`.parse_url`. The
returned url may not be exactly the same as the url inputted to
:func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls
with a blank port will have : removed).
Example: ::
>>> U = parse_url('http://google.com/mail/')
>>> U.url
'http://google.com/mail/'
>>> Url('http', 'username:password', 'host.com', 80,
... '/path', 'query', 'fragment').url
'http://username:password@host.com:80/path?query#fragment'
"""
scheme, auth, host, port, path, query, fragment = self
url = ''
# We use "is not None" we want things to happen with empty strings (or 0 port)
if scheme is not None:
url += scheme + '://'
if auth is not None:
url += auth + '@'
if host is not None:
url += host
if port is not None:
url += ':' + str(port)
if path is not None:
url += path
if query is not None:
url += '?' + query
if fragment is not None:
url += '#' + fragment
return url
def __str__(self):
return self.url
def split_first(s, delims):
"""
Given a string and an iterable of delimiters, split on the first found
delimiter. Return two split parts and the matched delimiter.
If not found, then the first part is the full input string.
Example::
>>> split_first('foo/bar?baz', '?/=')
('foo', 'bar?baz', '/')
>>> split_first('foo/bar?baz', '123')
('foo/bar?baz', '', None)
Scales linearly with number of delims. Not ideal for large number of delims.
"""
min_idx = None
min_delim = None
for d in delims:
idx = s.find(d)
if idx < 0:
continue
if min_idx is None or idx < min_idx:
min_idx = idx
min_delim = d
if min_idx is None or min_idx < 0:
return s, '', None
return s[:min_idx], s[min_idx + 1:], min_delim
def parse_url(url):
"""
Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
performed to parse incomplete urls. Fields not provided will be None.
Partly backwards-compatible with :mod:`urlparse`.
Example::
>>> parse_url('http://google.com/mail/')
Url(scheme='http', host='google.com', port=None, path='/mail/', ...)
>>> parse_url('google.com:80')
Url(scheme=None, host='google.com', port=80, path=None, ...)
>>> parse_url('/foo?bar')
Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
"""
# While this code has overlap with stdlib's urlparse, it is much
# simplified for our needs and less annoying.
# Additionally, this implementations does silly things to be optimal
# on CPython.
if not url:
# Empty
return Url()
scheme = None
auth = None
host = None
port = None
path = None
fragment = None
query = None
# Scheme
if '://' in url:
scheme, url = url.split('://', 1)
# Find the earliest Authority Terminator
# (http://tools.ietf.org/html/rfc3986#section-3.2)
url, path_, delim = split_first(url, ['/', '?', '#'])
if delim:
# Reassemble the path
path = delim + path_
# Auth
if '@' in url:
# Last '@' denotes end of auth part
auth, url = url.rsplit('@', 1)
# IPv6
if url and url[0] == '[':
host, url = url.split(']', 1)
host += ']'
# Port
if ':' in url:
_host, port = url.split(':', 1)
if not host:
host = _host
if port:
# If given, ports must be integers.
if not port.isdigit():
raise LocationParseError(url)
port = int(port)
else:
# Blank ports are cool, too. (rfc3986#section-3.2.3)
port = None
elif not host and url:
host = url
if not path:
return Url(scheme, auth, host, port, path, query, fragment)
# Fragment
if '#' in path:
path, fragment = path.split('#', 1)
# Query
if '?' in path:
path, query = path.split('?', 1)
return Url(scheme, auth, host, port, path, query, fragment)
def get_host(url):
"""
Deprecated. Use :func:`.parse_url` instead.
"""
p = parse_url(url)
return p.scheme or 'http', p.hostname, p.port
|
gpl-3.0
|
TheMadCoder1/Spambot
|
commands/data.py
|
2
|
1974
|
from classes import *
from helper_functions import *
def method(servers_and_threads, arguments):
print # Line break
# Argument Check
if len(arguments) != 0:
email = arguments[0]
if not helpers_for_commands.email_is_valid(email):
print "You did not enter a valid email."
return
elif email not in servers_and_threads.keys():
print "Email %s is not an existing email." % (email)
return
else:
# Get email if no email is supplied as argument
print "Existing emails:"
for email_index, email in enumerate(servers_and_threads.keys()):
print "[%d] - %s" % (email_index+1, email)
print # Line break
try:
email_num = int(raw_input("What is the number of the email you would like to see the data of? "))
except ValueError:
print "You did not enter a number."
else:
if email_num > 0 and email_num <= len(servers_and_threads):
email = servers_and_threads.keys()[email_num-1]
else:
print "Invalid number entered."
return
server = servers_and_threads[email]["Server"]
print "Email: %s" % (server.email)
print "Password: %s" % (server.password)
print "Status: %s" % (server.status)
print "Total messages sent: %s" % (server.messages_sent)
print "Targets: %s" % (", ".join(server.targets))
# Prints messages - linebreaks may be present so each is printed on a separate line
print "Messages:"
for index, message in enumerate(server.messages):
print "%d - %s" % (index, message)
print # Line break
# Uses the Command class to set up your command in a predefined format and makes it accessible in memory
command_object = command.Command("data", "Get all the data of a specified email", " <email>", 1, method) # " <email>" needs space at the beginning to be printed correctly in documentation
|
mit
|
rogersprates/word2vec-financial-sentiment
|
pmi/pmi.py
|
1
|
3966
|
#BRUNO IOCHINS GRISCI
import json
import sys
import math
import os
def create_vocabulary(news):
vocabulary = []
for ide in news:
vocabulary = vocabulary + news[ide]["text"]
return set(vocabulary)
def count_labels(news):
n_positive_news = 0.0
n_negative_news = 0.0
for ide in news:
if news[ide]["label"] == "positive":
n_positive_news += 1.0
else:
n_negative_news += 1.0
return n_positive_news, n_negative_news
def count_words(news, vocabulary):
positive_words = dict.fromkeys(vocabulary, 0.0)
negative_words = dict.fromkeys(vocabulary, 0.0)
total_words = 0.0
for ide in news:
if news[ide]["label"] == "positive":
for w in news[ide]["text"]:
positive_words[w] += 1.0
else:
for w in news[ide]["text"]:
negative_words[w] += 1.0
total_words += len(news[ide]["text"])
return positive_words, negative_words, total_words
def compute_pmi(p_word, p_label, p_word_label):
#print(p_word, p_label, p_word_label)
if p_word_label / (p_word * p_label) != 0.0:
pmi = math.log(p_word_label / (p_word * p_label), 2.0)
else:
pmi = 0.0
return pmi
def pmi(path_input,path_output):
#news_file_path = os.getcwd() +'/../files/training_with_duplicates.json'
news_file = open(path_input, "r")
training_set = json.load(news_file)
news_file.close()
#print(training_set)
vocabulary = create_vocabulary(training_set)
#print(vocabulary)
n_positive_news, n_negative_news = count_labels(training_set)
#print(n_positive_news, n_negative_news)
positive_words, negative_words, total_words = count_words(training_set, vocabulary)
#print(positive_words)
#print(negative_words)
#print(total_words)
total_positive_words = 0.0
for word in positive_words:
total_positive_words += positive_words[word]
total_negative_words = 0.0
for word in negative_words:
total_negative_words += negative_words[word]
terms = {"positive":[], "negative":[]}
for word in vocabulary:
positive_pmi = compute_pmi((positive_words[word] + negative_words[word]) / total_words, n_positive_news / len(training_set), positive_words[word] / total_positive_words)
terms["positive"].append((word, positive_pmi))
negative_pmi = compute_pmi((positive_words[word] + negative_words[word]) / total_words, n_negative_news / len(training_set), negative_words[word] / total_negative_words)
terms["negative"].append((word, negative_pmi))
terms["positive"].sort(key=lambda tup: tup[1], reverse=True)
terms["negative"].sort(key=lambda tup: tup[1], reverse=True)
terms_file = open(path_output, "w")
json.dump(terms, terms_file)
terms_file.close()
def pmi_daily():
#train data
path_datatrain_json_1=os.getcwd() +'/../files/training_with_duplicates.json'
path_datatrain_json_2=os.getcwd() +'/../files/training_without_duplicates.json'
#output kterms
pathoutput_k_terms1 = os.getcwd() +'/../files/terms01.json'
pathoutput_k_terms2 = os.getcwd() +'/../files/terms02.json'
pmi(path_datatrain_json_1,pathoutput_k_terms1)
pmi(path_datatrain_json_2,pathoutput_k_terms2)
pmi(path_datatrain_json_1,pathoutput_k_terms1)
pmi(path_datatrain_json_2,pathoutput_k_terms2)
def pmi_weekly():
#train data
path_datatrain_json_1=os.getcwd() +'/../files2/weekly_with_duplicates_training.json'
path_datatrain_json_2=os.getcwd() +'/../files2/weekly_without_duplicates_training.json'
#output kterms
pathoutput_k_terms1 = os.getcwd() +'/../files2/terms01.json'
pathoutput_k_terms2 = os.getcwd() +'/../files2/terms02.json'
pmi(path_datatrain_json_1,pathoutput_k_terms1)
pmi(path_datatrain_json_2,pathoutput_k_terms2)
pmi(path_datatrain_json_1,pathoutput_k_terms1)
pmi(path_datatrain_json_2,pathoutput_k_terms2)
|
mit
|
sameeptandon/sail-car-log
|
mapping/pipeline/ldr_to_h5.py
|
1
|
2958
|
import os
import argparse
import numpy as np
import h5py
from Q50_config import LoadParameters
from GPSReader import GPSReader
from GPSTransforms import IMUTransforms
from LidarTransforms import loadLDR
from pipeline_config import LANE_FILTER, PARAMS_TO_LOAD, OPT_POS_FILE
from LidarIntegrator import transform_points_in_sweep
'''
Essentially just pieces from LidarIntegrator except avoids
storing the data for all time steps in memory
Writes full point clouds for scan matching later
'''
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert ldr files to h5 files containing points')
parser.add_argument('gps_file')
parser.add_argument('ldr_file')
parser.add_argument('h5_file')
parser.add_argument('--no_transform', action='store_true')
args = parser.parse_args()
if not args.no_transform:
gps_reader = GPSReader(args.gps_file)
gps_data = gps_reader.getNumericData()
#imu_transforms = IMUTransforms(gps_data)
imu_transforms = np.load(OPT_POS_FILE)['data']
params = LoadParameters(PARAMS_TO_LOAD)
# FIXME Assuming that ldr file named after frame num
fnum = int(os.path.splitext(os.path.basename(args.ldr_file))[0])
data = loadLDR(args.ldr_file)
if data.shape[0] == 0:
print '%d data empty' % fnum
raise
# Filter
dist = np.sqrt(np.sum(data[:, 0:3] ** 2, axis=1))
if LANE_FILTER:
data_filter_mask = (dist > 3) & \
(data[:, 3] > 40) & \
(np.abs(data[:, 1]) < 2.2) & \
(np.abs(data[:, 1]) > 1.2) & \
(data[:, 2] < -1.8) & \
(data[:, 2] > -2.5)
else:
data_filter_mask = (dist > 3) & \
(data[:, 2] > -5)
filtered_data = data[data_filter_mask, :]
if filtered_data.shape[0] == 0:
print '%d data empty after filtering' % fnum
# FIXME, hack, just include a single point
data = data[0:1, :]
#raise
else:
data = filtered_data
pts = data[:, 0:3].transpose()
if not args.no_transform:
# Transform data into IMU frame at time t
pts = np.vstack((pts, np.ones((1, pts.shape[1]))))
T_from_l_to_i = params['lidar']['T_from_l_to_i']
pts = np.dot(T_from_l_to_i, pts)
# Microseconds till end of the sweep
# TODO Switch everything to use transform_points_by_times
times = data[:, 5]
transform_points_in_sweep(pts, times, fnum, imu_transforms)
pts = pts.transpose()
# for exporting purposes
pts_copy = np.array(pts[:, 0:3])
pts_copy = np.column_stack((pts_copy, np.array(data[:, 3])))
pts_copy = np.column_stack((pts_copy, fnum*np.ones((pts.shape[0], 1))))
h5f = h5py.File(args.h5_file, 'w')
dset = h5f.create_dataset('points', pts_copy.shape, dtype='f')
dset[...] = pts_copy
h5f.close()
|
bsd-2-clause
|
mnahm5/django-estore
|
Lib/site-packages/boto/route53/domains/exceptions.py
|
151
|
1482
|
# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.exception import BotoServerError
class DuplicateRequest(BotoServerError):
pass
class DomainLimitExceeded(BotoServerError):
pass
class InvalidInput(BotoServerError):
pass
class OperationLimitExceeded(BotoServerError):
pass
class UnsupportedTLD(BotoServerError):
pass
class TLDRulesViolation(BotoServerError):
pass
|
mit
|
OECFHTW/oecluster
|
ConfigReader.py
|
1
|
1281
|
#!/usr/bin/env python3
# Author: Dennis Strasser mailto:dennis.f.strasser@gmail.com
import configparser as cp
__version__ = "1.0"
class ConfigReader(object):
"""This class holds information on devices in the network
This class holds information about people and their contact information: first- and last name,
the cell phone number and the email address.
"""
def __init__(self, config_file="./OECluster.cfg"):
"""Initializes and declares class attributes from the given parameters
:param config_file: The config file to be parsed by ConfigReader
"""
self._config_file = config_file
self._config_parser = cp.ConfigParser()
self._config_parser.read(config_file)
def get_config_section(self, section):
dict1 = {}
options = self._config_parser.options(section)
for option in options:
try:
dict1[option] = self._config_parser.get(section, option)
if dict1[option] == -1:
print("skip: %s" % option)
except:
print("exception on %s!" % option)
dict1[option] = None
return dict1
if __name__ == "__main__":
print("This class should not be called directly.")
|
gpl-3.0
|
YourCyborg/Sun-RPI
|
contrib/procpools/ampoule/util.py
|
11
|
1349
|
"""
some utilities
"""
import os
import sys
import __main__
from twisted.python.filepath import FilePath
from twisted.python.reflect import namedAny
# from twisted.python.modules import theSystemPath
def findPackagePath(modulePath):
"""
Try to find the sys.path entry from a modulePath object, simultaneously
computing the module name of the targetted file.
"""
p = modulePath
l = [p.basename().split(".")[0]]
while p.parent() != p:
for extension in ['py', 'pyc', 'pyo', 'pyd', 'dll']:
sib = p.sibling("__init__."+extension)
if sib.exists():
p = p.parent()
l.insert(0, p.basename())
break
else:
return p.parent(), '.'.join(l)
def mainpoint(function):
"""
Decorator which declares a function to be an object's mainpoint.
"""
if function.__module__ == '__main__':
# OK time to run a function
p = FilePath(__main__.__file__)
p, mn = findPackagePath(p)
pname = p.path
if pname not in map(os.path.abspath, sys.path):
sys.path.insert(0, pname)
# Maybe remove the module's path?
exitcode = namedAny(mn+'.'+function.__name__)(sys.argv)
if exitcode is None:
exitcode = 0
sys.exit(exitcode)
return function
|
bsd-3-clause
|
bigdatauniversity/edx-platform
|
cms/djangoapps/contentstore/tests/test_course_create_rerun.py
|
56
|
6545
|
"""
Test view handler for rerun (and eventually create)
"""
import ddt
from mock import patch
from django.test.client import RequestFactory
from django.core.urlresolvers import reverse
from opaque_keys.edx.keys import CourseKey
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from student.roles import CourseInstructorRole, CourseStaffRole
from student.tests.factories import UserFactory
from contentstore.tests.utils import AjaxEnabledTestClient, parse_json
from datetime import datetime
from xmodule.course_module import CourseFields
from util.organizations_helpers import (
add_organization,
get_course_organizations,
)
@ddt.ddt
class TestCourseListing(ModuleStoreTestCase):
"""
Unit tests for getting the list of courses for a logged in user
"""
def setUp(self):
"""
Add a user and a course
"""
super(TestCourseListing, self).setUp()
# create and log in a staff user.
# create and log in a non-staff user
self.user = UserFactory()
self.factory = RequestFactory()
self.client = AjaxEnabledTestClient()
self.client.login(username=self.user.username, password='test')
self.course_create_rerun_url = reverse('course_handler')
source_course = CourseFactory.create(
org='origin',
number='the_beginning',
run='first',
display_name='the one and only',
start=datetime.utcnow()
)
self.source_course_key = source_course.id
for role in [CourseInstructorRole, CourseStaffRole]:
role(self.source_course_key).add_users(self.user)
def tearDown(self):
"""
Reverse the setup
"""
self.client.logout()
ModuleStoreTestCase.tearDown(self)
def test_rerun(self):
"""
Just testing the functionality the view handler adds over the tasks tested in test_clone_course
"""
response = self.client.ajax_post(self.course_create_rerun_url, {
'source_course_key': unicode(self.source_course_key),
'org': self.source_course_key.org, 'course': self.source_course_key.course, 'run': 'copy',
'display_name': 'not the same old name',
})
self.assertEqual(response.status_code, 200)
data = parse_json(response)
dest_course_key = CourseKey.from_string(data['destination_course_key'])
self.assertEqual(dest_course_key.run, 'copy')
dest_course = self.store.get_course(dest_course_key)
self.assertEqual(dest_course.start, CourseFields.start.default)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_newly_created_course_has_web_certs_enabled(self, store):
"""
Tests newly created course has web certs enabled by default.
"""
with modulestore().default_store(store):
response = self.client.ajax_post(self.course_create_rerun_url, {
'org': 'orgX',
'number': 'CS101',
'display_name': 'Course with web certs enabled',
'run': '2015_T2'
})
self.assertEqual(response.status_code, 200)
data = parse_json(response)
new_course_key = CourseKey.from_string(data['course_key'])
course = self.store.get_course(new_course_key)
self.assertTrue(course.cert_html_view_enabled)
@patch.dict('django.conf.settings.FEATURES', {'ORGANIZATIONS_APP': False})
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_course_creation_without_org_app_enabled(self, store):
"""
Tests course creation workflow should not create course to org
link if organizations_app is not enabled.
"""
with modulestore().default_store(store):
response = self.client.ajax_post(self.course_create_rerun_url, {
'org': 'orgX',
'number': 'CS101',
'display_name': 'Course with web certs enabled',
'run': '2015_T2'
})
self.assertEqual(response.status_code, 200)
data = parse_json(response)
new_course_key = CourseKey.from_string(data['course_key'])
course_orgs = get_course_organizations(new_course_key)
self.assertEqual(course_orgs, [])
@patch.dict('django.conf.settings.FEATURES', {'ORGANIZATIONS_APP': True})
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_course_creation_with_org_not_in_system(self, store):
"""
Tests course creation workflow when course organization does not exist
in system.
"""
with modulestore().default_store(store):
response = self.client.ajax_post(self.course_create_rerun_url, {
'org': 'orgX',
'number': 'CS101',
'display_name': 'Course with web certs enabled',
'run': '2015_T2'
})
self.assertEqual(response.status_code, 400)
data = parse_json(response)
self.assertIn(u'Organization you selected does not exist in the system', data['error'])
@patch.dict('django.conf.settings.FEATURES', {'ORGANIZATIONS_APP': True})
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_course_creation_with_org_in_system(self, store):
"""
Tests course creation workflow when course organization exist in system.
"""
add_organization({
'name': 'Test Organization',
'short_name': 'orgX',
'description': 'Testing Organization Description',
})
with modulestore().default_store(store):
response = self.client.ajax_post(self.course_create_rerun_url, {
'org': 'orgX',
'number': 'CS101',
'display_name': 'Course with web certs enabled',
'run': '2015_T2'
})
self.assertEqual(response.status_code, 200)
data = parse_json(response)
new_course_key = CourseKey.from_string(data['course_key'])
course_orgs = get_course_organizations(new_course_key)
self.assertEqual(len(course_orgs), 1)
self.assertEqual(course_orgs[0]['short_name'], 'orgX')
|
agpl-3.0
|
collects/VTK
|
IO/Parallel/Testing/Python/TestPImageWriter.py
|
26
|
1106
|
#!/usr/bin/env python
# Image pipeline
image1 = vtk.vtkTIFFReader()
image1.SetFileName("" + str(VTK_DATA_ROOT) + "/Data/beach.tif")
# "beach.tif" image contains ORIENTATION tag which is
# ORIENTATION_TOPLEFT (row 0 top, col 0 lhs) type. The TIFF
# reader parses this tag and sets the internal TIFF image
# orientation accordingly. To overwrite this orientation with a vtk
# convention of ORIENTATION_BOTLEFT (row 0 bottom, col 0 lhs ), invoke
# SetOrientationType method with parameter value of 4.
image1.SetOrientationType(4)
image1.Update()
#
# If the current directory is writable, then test the witers
#
if (catch.catch(globals(),"""channel = open(test.tmp, w)""") == 0):
channel.close()
file.delete("-force", test.tmp)
piw = vtk.vtkPImageWriter()
piw.SetInputConnection(image1.GetOutputPort())
piw.SetFileName(piw.raw)
piw.SetMemoryLimit(1)
piw.Write()
file.delete("-force", piw.raw)
pass
viewer = vtk.vtkImageViewer()
viewer.SetInputConnection(image1.GetOutputPort())
viewer.SetColorWindow(255)
viewer.SetColorLevel(127.5)
viewer.Render()
# --- end of script --
|
bsd-3-clause
|
vmax-feihu/hue
|
desktop/core/ext-py/requests-2.6.0/requests/compat.py
|
1039
|
1469
|
# -*- coding: utf-8 -*-
"""
pythoncompat
"""
from .packages import chardet
import sys
# -------
# Pythons
# -------
# Syntax sugar.
_ver = sys.version_info
#: Python 2.x?
is_py2 = (_ver[0] == 2)
#: Python 3.x?
is_py3 = (_ver[0] == 3)
try:
import simplejson as json
except (ImportError, SyntaxError):
# simplejson does not support Python 3.2, it throws a SyntaxError
# because of u'...' Unicode literals.
import json
# ---------
# Specifics
# ---------
if is_py2:
from urllib import quote, unquote, quote_plus, unquote_plus, urlencode, getproxies, proxy_bypass
from urlparse import urlparse, urlunparse, urljoin, urlsplit, urldefrag
from urllib2 import parse_http_list
import cookielib
from Cookie import Morsel
from StringIO import StringIO
from .packages.urllib3.packages.ordered_dict import OrderedDict
builtin_str = str
bytes = str
str = unicode
basestring = basestring
numeric_types = (int, long, float)
elif is_py3:
from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag
from urllib.request import parse_http_list, getproxies, proxy_bypass
from http import cookiejar as cookielib
from http.cookies import Morsel
from io import StringIO
from collections import OrderedDict
builtin_str = str
str = str
bytes = bytes
basestring = (str, bytes)
numeric_types = (int, float)
|
apache-2.0
|
redhat-openstack/nova
|
nova/compute/task_states.py
|
96
|
3443
|
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Possible task states for instances.
Compute instance task states represent what is happening to the instance at the
current moment. These tasks can be generic, such as 'spawning', or specific,
such as 'block_device_mapping'. These task states allow for a better view into
what an instance is doing and should be displayed to users/administrators as
necessary.
"""
# possible task states during create()
SCHEDULING = 'scheduling'
BLOCK_DEVICE_MAPPING = 'block_device_mapping'
NETWORKING = 'networking'
SPAWNING = 'spawning'
# possible task states during snapshot()
IMAGE_SNAPSHOT = 'image_snapshot'
IMAGE_SNAPSHOT_PENDING = 'image_snapshot_pending'
IMAGE_PENDING_UPLOAD = 'image_pending_upload'
IMAGE_UPLOADING = 'image_uploading'
# possible task states during backup()
IMAGE_BACKUP = 'image_backup'
# possible task states during set_admin_password()
UPDATING_PASSWORD = 'updating_password'
# possible task states during resize()
RESIZE_PREP = 'resize_prep'
RESIZE_MIGRATING = 'resize_migrating'
RESIZE_MIGRATED = 'resize_migrated'
RESIZE_FINISH = 'resize_finish'
# possible task states during revert_resize()
RESIZE_REVERTING = 'resize_reverting'
# possible task states during confirm_resize()
RESIZE_CONFIRMING = 'resize_confirming'
# possible task states during reboot()
REBOOTING = 'rebooting'
REBOOT_PENDING = 'reboot_pending'
REBOOT_STARTED = 'reboot_started'
REBOOTING_HARD = 'rebooting_hard'
REBOOT_PENDING_HARD = 'reboot_pending_hard'
REBOOT_STARTED_HARD = 'reboot_started_hard'
# possible task states during pause()
PAUSING = 'pausing'
# possible task states during unpause()
UNPAUSING = 'unpausing'
# possible task states during suspend()
SUSPENDING = 'suspending'
# possible task states during resume()
RESUMING = 'resuming'
# possible task states during power_off()
POWERING_OFF = 'powering-off'
# possible task states during power_on()
POWERING_ON = 'powering-on'
# possible task states during rescue()
RESCUING = 'rescuing'
# possible task states during unrescue()
UNRESCUING = 'unrescuing'
# possible task states during rebuild()
REBUILDING = 'rebuilding'
REBUILD_BLOCK_DEVICE_MAPPING = "rebuild_block_device_mapping"
REBUILD_SPAWNING = 'rebuild_spawning'
# possible task states during live_migrate()
MIGRATING = "migrating"
# possible task states during delete()
DELETING = 'deleting'
# possible task states during soft_delete()
SOFT_DELETING = 'soft-deleting'
# possible task states during restore()
RESTORING = 'restoring'
# possible task states during shelve()
SHELVING = 'shelving'
SHELVING_IMAGE_PENDING_UPLOAD = 'shelving_image_pending_upload'
SHELVING_IMAGE_UPLOADING = 'shelving_image_uploading'
# possible task states during shelve_offload()
SHELVING_OFFLOADING = 'shelving_offloading'
# possible task states during unshelve()
UNSHELVING = 'unshelving'
|
apache-2.0
|
Molecular-Image-Recognition/Molecular-Image-Recognition
|
code/rmgpy/molecule/inchiparsingTest.py
|
2
|
10760
|
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2002-2017 Prof. William H. Green (whgreen@mit.edu),
# Prof. Richard H. West (r.west@neu.edu) and the RMG Team (rmg_dev@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
import re
import unittest
from external.wip import work_in_progress
from rmgpy.species import Species
from .adjlist import ConsistencyChecker
from .molecule import Molecule
from .util import retrieveElementCount
from .inchi import compose_aug_inchi, P_LAYER_PREFIX, P_LAYER_SEPARATOR, U_LAYER_PREFIX, U_LAYER_SEPARATOR
from .parser import *
class InChIParsingTest(unittest.TestCase):
def compare(self, inchi, u_indices=None, p_indices = None):
u_layer = U_LAYER_PREFIX + U_LAYER_SEPARATOR.join(map(str, u_indices)) if u_indices else None
p_layer = P_LAYER_PREFIX + P_LAYER_SEPARATOR.join(map(str, p_indices)) if p_indices else None
aug_inchi = compose_aug_inchi(inchi, u_layer, p_layer)
mol = fromAugmentedInChI(Molecule(), aug_inchi)
ConsistencyChecker.check_multiplicity(mol.getRadicalCount(), mol.multiplicity)
for at in mol.atoms:
ConsistencyChecker.check_partial_charge(at)
spc = Species(molecule=[mol])
spc.generate_resonance_structures()
ignore_prefix = r"(InChI=1+)(S*)/"
aug_inchi_expected = re.split(ignore_prefix, aug_inchi)[-1]
aug_inchi_computed = re.split(ignore_prefix, spc.getAugmentedInChI())[-1]
self.assertEquals(aug_inchi_expected, aug_inchi_computed)
return mol
def test_Ethane_parsing(self):
inchi = 'C2H6/c1-2/h1-2H3'
self.compare(inchi)
def test_Ethyl_parsing(self):
inchi = 'C2H5/c1-2/h1H2,2H3'
u_indices = [1]
self.compare(inchi, u_indices)
def test_CH3_parsing(self):
inchi = 'CH3/h1H3'
u_indices = [1]
self.compare(inchi, u_indices)
def test_H2_parsing(self):
inchi = 'H2/h1H'
self.compare(inchi)
def test_C2H4_biradical_parsing(self):
inchi = 'C2H4/c1-2/h1-2H2'
u_indices = [1,2]
self.compare(inchi, u_indices)
def test_C2H3_triradical_parsing(self):
inchi = 'C2H3/c1-2/h1H,2H2'
u_indices = [1,1,2]
self.compare(inchi, u_indices)
def test_C3H6_biradical_parsing(self):
inchi = 'C3H6/c1-3-2/h1-3H2'
u_indices = [1,2]
self.compare(inchi, u_indices)
def testC2H3O3(self):
adjlist = '''
1 C u0 p0 c0 {2,D} {6,S} {7,S}
2 C u0 p0 c0 {1,D} {3,S} {5,S}
3 O u1 p2 c0 {2,S}
4 O u0 p2 c0 {5,S} {8,S}
5 O u0 p2 c0 {2,S} {4,S}
6 H u0 p0 c0 {1,S}
7 H u0 p0 c0 {1,S}
8 H u0 p0 c0 {4,S}
'''
inchi = 'C2H3O3/c1-2(3)5-4/h4H,1H2'
u_indices = [1]
self.compare(inchi, u_indices)
def testC2H2(self):
inchi = 'C2H2/c1-2/h1-2H'
u_indices = [1,2]
mol = self.compare(inchi, u_indices)
def testO2(self):
inchi = 'O2/c1-2'
u_indices = [1,2]
self.compare(inchi, u_indices)
def testTriRadicalZwitterMult4(self):
inchi = 'C6H11/c1-3-5-6-4-2/h5H,1-4,6H2'
u_indices = [1,2,5]
self.compare(inchi, u_indices)
def testTriRadicalDoubleBondMult4(self):
inchi = 'C4H7/c1-3-4-2/h3H,1-2,4H2'
u_indices = [1,2,3]
self.compare(inchi, u_indices)
def testTriRadical2DoubleBondMult4(self):
inchi = 'C6H9/c1-4-6(3)5-2/h1,4-6H,2H2,3H3'
u_indices = [1, 2, 5]
self.compare(inchi, u_indices)
def testQuadriRadicalDoubleBondZwitterMult5(self):
inchi = 'C8H14/c1-4-6-7-8(3)5-2/h5-6,8H,1-2,4,7H2,3H3'
u_indices = [1, 2, 5, 6]
mol = self.compare(inchi, u_indices)
def testQuadri2DoubleBondMult5(self):
inchi = 'C8H14/c1-5-7(3)8(4)6-2/h5-8H,1-2H2,3-4H3'
u_indices = [1, 2, 5, 6]
self.compare(inchi, u_indices)
def testC5H6O(self):
inchi = 'C5H6O/c6-5-3-1-2-4-5/h1-3,5H,4H2'
u_indices = [2, 6]
self.compare(inchi, u_indices)
def testC5H6O_2(self):
inchi = 'C5H6O/c1-5-3-2-4-6-5/h2-5H,1H2'
u_indices = [1,3]
self.compare(inchi, u_indices)
def testC5H6O_3(self):
inchi = 'C5H6O/c1-5-3-2-4-6-5/h2-5H,1H2'
u_indices = [1,2,3,4]
self.compare(inchi, u_indices)
@work_in_progress
def testCO(self):
inchi = 'CO/c1-2'
p_indices = [1,2]
mol = self.compare(inchi, [], p_indices)
assert mol.atoms[1].lonePairs == 1 # Oxygen
assert mol.atoms[0].charge == -1
assert mol.atoms[1].charge == +1
def testTripletMethylene(self):
inchi = 'CH2/h1H2'
u_indices = [1,1]
self.compare(inchi, u_indices)
def testSingletMethylene(self):
inchi = 'CH2/h1H2'
p_indices = [1]
self.compare(inchi, u_indices=[], p_indices=p_indices)
def testC4H6O(self):
inchi = 'C4H6O/c1-2-3-4-5/h2H,3H2,1H3'
u_indices = [2,4]
mol = self.compare(inchi, u_indices)
for at in mol.atoms:
if at.isOxygen():
self.assertTrue(at.lonePairs == 2)
def testC6H6(self):
inchi = 'C6H6/c1-3-5-6-4-2/h1,6H,2,5H2'
u_indices = [1, 3]
mol = self.compare(inchi, u_indices)
def testC4H6O_2(self):
inchi = 'C4H6O/c1-2-3-4-5/h2,4H,1,3H2'
u_indices = [4, 5]
mol = self.compare(inchi, u_indices)
def test_CO_triplet(self):
adjlist = """
multiplicity 3
1 C u2 p0 c0 {2,D}
2 O u0 p2 c0 {1,D}
"""
spc = Species(molecule=[Molecule().fromAdjacencyList(adjlist)])
aug_inchi = spc.getAugmentedInChI()
self.assertEqual(Species(molecule=[Molecule().fromAugmentedInChI(aug_inchi)]).isIsomorphic(spc), True)
def test_CCCO_triplet(self):
adjlist = """
multiplicity 3
1 C u0 p0 c0 {2,D} {5,S} {6,S}
2 C u0 p0 c0 {1,D} {3,S} {7,S}
3 C u1 p0 c0 {2,S} {4,S} {8,S}
4 O u1 p2 c0 {3,S}
5 H u0 p0 c0 {1,S}
6 H u0 p0 c0 {1,S}
7 H u0 p0 c0 {2,S}
8 H u0 p0 c0 {3,S}
"""
mol = Molecule().fromAdjacencyList(adjlist)
spc = Species(molecule=[mol])
spc.generate_resonance_structures()
aug_inchi = spc.getAugmentedInChI()
self.assertEqual(Species(molecule=[Molecule().fromAugmentedInChI(aug_inchi)]).isIsomorphic(spc), True)
def testC3H4(self):
inchi = 'C3H4/c1-3-2/h1,3H,2H2'
u_indices = [1, 1]
mol = self.compare(inchi, u_indices)
def test_C6H8O2(self):
inchi = 'C6H8O2/c1-3-5(7)6(8)4-2/h3-6H,1-2H2'
u_indices = [7,8]
self.compare(inchi, u_indices)
def test_C3H3O3(self):
inchi = 'C3H3O3/c1-2-5-3-6-4/h1-3H'
u_indices = [1,3,4]
self.compare(inchi, u_indices)
def test_CH2O2(self):
inchi = 'CH2O2/c2-1-3/h1H,(H,2,3)'
u_indices = [1,2]
self.compare(inchi, u_indices)
def test_C2H2O3(self):
inchi = 'C2H2O3/c1-5-2(3)4/h1H2'
u_indices = [1,3]
self.compare(inchi, u_indices)
def test_C3H4O4(self):
inchi = 'C3H4O4/c4-3(5)1-2-7-6/h1-3,6H'
u_indices = [4,5]
self.compare(inchi, u_indices)
def test_C6H6O4(self):
inchi = 'InChI=1S/C6H6O4/c1-2-4-9-6(7)3-5-10-8/h2-3H,1,5H2'
u_indices = [1,3,4,8]
self.compare(inchi, u_indices)
def test_C3H2O3(self):
inchi = 'InChI=1S/C3H2O3/c1-2-3(4)6-5/h1H2'
u_indices = [2,5]
self.compare(inchi, u_indices)
def test_C6H6O6(self):
inchi = 'C6H6O6/c7-6(2-5-12-9)10-3-1-4-11-8/h1,7H,4-5H2'
u_indices = [2,3,8,9]
self.compare(inchi, u_indices)
def test_C3H2(self):
inchi = 'C3H2/c1-3-2/h1-2H'
u_indices = [1,1]
self.compare(inchi, u_indices)
def test_C3H4(self):
inchi = 'InChI=1S/C3H4/c1-3-2/h1,3H,2H2'
u_indices = [1,1]
self.compare(inchi, u_indices)
def test_C6H8(self):
inchi = 'InChI=1S/C6H8/c1-3-5-6-4-2/h1,4H,2,5-6H2'
u_indices = [1,1,3,3]
self.compare(inchi, u_indices)
def test_C6H10(self):
inchi = 'InChI=1S/C6H10/c1-3-5-6-4-2/h3-4H,1-2,5-6H2'
u_indices = [1,3]
self.compare(inchi, u_indices)
def test_ammonia(self):
inchi = 'InChI=1S/H3N/h1H3'
self.compare(inchi)
@work_in_progress
def test_ammonium(self):
"""
has same inchi as ammonia but gets a proton layer: /p+1
"""
inchi = 'InChI=1S/H3N/h1H3/p+1'
self.compare(inchi)
def test_H2S(self):
inchi = 'InChI=1S/H2S/h1H2'
self.compare(inchi)
def test_pyridine(self):
inchi = 'InChI=1S/C5H5N/c1-2-4-6-5-3-1/h1-5H'
self.compare(inchi)
def test_pyrimidine(self):
inchi = 'InChI=1S/C4H4N2/c1-2-5-4-6-3-1/h1-4H'
self.compare(inchi)
@work_in_progress
def test_nitrate(self):
"""
- Mobile H spread over oxygen 2, 3, 4
- Negative charge (3 lone pairs) spread out over oxygen 2, 3, 4
- Nitrogen 1 positively charged
"""
inchi = 'InChI=1S/HNO3/c2-1(3)4/h(H,2,3,4)'
p_indices = [-1, 3, 3, 3]#???
mol = self.compare(inchi, [], p_indices)
def test_NO(self):
inchi = 'InChI=1S/NO/c1-2'
u_indices = [1]
mol = self.compare(inchi, u_indices)
if __name__ == '__main__':
unittest.main()
|
mit
|
s20121035/rk3288_android5.1_repo
|
external/chromium_org/chrome/common/extensions/docs/server2/github_file_system.py
|
94
|
7358
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
from StringIO import StringIO
import posixpath
from appengine_blobstore import AppEngineBlobstore, BLOBSTORE_GITHUB
from appengine_url_fetcher import AppEngineUrlFetcher
from appengine_wrappers import urlfetch, blobstore
from docs_server_utils import StringIdentity
from file_system import FileSystem, StatInfo
from future import Future
from path_util import IsDirectory
import url_constants
from zipfile import ZipFile, BadZipfile
ZIP_KEY = 'zipball'
USERNAME = None
PASSWORD = None
def _MakeBlobstoreKey(version):
return ZIP_KEY + '.' + str(version)
def _GetAsyncFetchCallback(fetcher,
username,
password,
blobstore,
key_to_set,
key_to_delete=None):
fetch = fetcher.FetchAsync(ZIP_KEY, username=username, password=password)
def resolve():
try:
result = fetch.Get()
# Check if Github authentication failed.
if result.status_code == 401:
logging.error('Github authentication failed for %s, falling back to '
'unauthenticated.' % USERNAME)
blob = fetcher.Fetch(ZIP_KEY).content
else:
blob = result.content
except urlfetch.DownloadError as e:
logging.error('Bad github zip file: %s' % e)
return None
if key_to_delete is not None:
blobstore.Delete(_MakeBlobstoreKey(key_to_delete, BLOBSTORE_GITHUB))
try:
return_zip = ZipFile(StringIO(blob))
except BadZipfile as e:
logging.error('Bad github zip file: %s' % e)
return None
blobstore.Set(_MakeBlobstoreKey(key_to_set), blob, BLOBSTORE_GITHUB)
return return_zip
return resolve
class GithubFileSystem(FileSystem):
@staticmethod
def CreateChromeAppsSamples(object_store_creator):
return GithubFileSystem(
'%s/GoogleChrome/chrome-app-samples' % url_constants.GITHUB_REPOS,
AppEngineBlobstore(),
object_store_creator)
def __init__(self, url, blobstore, object_store_creator):
# If we key the password store on the app version then the whole advantage
# of having it in the first place is greatly lessened (likewise it should
# always start populated).
password_store = object_store_creator.Create(
GithubFileSystem,
app_version=None,
category='password',
start_empty=False)
if USERNAME is None:
password_data = password_store.GetMulti(('username', 'password')).Get()
self._username, self._password = (password_data.get('username'),
password_data.get('password'))
else:
password_store.SetMulti({'username': USERNAME, 'password': PASSWORD})
self._username, self._password = (USERNAME, PASSWORD)
self._url = url
self._fetcher = AppEngineUrlFetcher(url)
self._blobstore = blobstore
self._stat_object_store = object_store_creator.Create(GithubFileSystem)
self._version = None
self._GetZip(self.Stat(ZIP_KEY).version)
def _GetZip(self, version):
try:
blob = self._blobstore.Get(_MakeBlobstoreKey(version), BLOBSTORE_GITHUB)
except blobstore.BlobNotFoundError:
self._zip_file = Future(value=None)
return
if blob is not None:
try:
self._zip_file = Future(value=ZipFile(StringIO(blob)))
except BadZipfile as e:
self._blobstore.Delete(_MakeBlobstoreKey(version), BLOBSTORE_GITHUB)
logging.error('Bad github zip file: %s' % e)
self._zip_file = Future(value=None)
else:
self._zip_file = Future(
callback=_GetAsyncFetchCallback(self._fetcher,
self._username,
self._password,
self._blobstore,
version,
key_to_delete=self._version))
self._version = version
def _ReadFile(self, path):
try:
zip_file = self._zip_file.Get()
except Exception as e:
logging.error('Github ReadFile error: %s' % e)
return ''
if zip_file is None:
logging.error('Bad github zip file.')
return ''
prefix = zip_file.namelist()[0]
return zip_file.read(prefix + path)
def _ListDir(self, path):
try:
zip_file = self._zip_file.Get()
except Exception as e:
logging.error('Github ListDir error: %s' % e)
return []
if zip_file is None:
logging.error('Bad github zip file.')
return []
filenames = zip_file.namelist()
# Take out parent directory name (GoogleChrome-chrome-app-samples-c78a30f)
filenames = [f[len(filenames[0]):] for f in filenames]
# Remove the path of the directory we're listing from the filenames.
filenames = [f[len(path):] for f in filenames
if f != path and f.startswith(path)]
# Remove all files not directly in this directory.
return [f for f in filenames if f[:-1].count('/') == 0]
def Read(self, paths, skip_not_found=False):
version = self.Stat(ZIP_KEY).version
if version != self._version:
self._GetZip(version)
result = {}
for path in paths:
if IsDirectory(path):
result[path] = self._ListDir(path)
else:
result[path] = self._ReadFile(path)
return Future(value=result)
def _DefaultStat(self, path):
version = 0
# TODO(kalman): we should replace all of this by wrapping the
# GithubFileSystem in a CachingFileSystem. A lot of work has been put into
# CFS to be robust, and GFS is missing out.
# For example: the following line is wrong, but it could be moot.
self._stat_object_store.Set(path, version)
return StatInfo(version)
def Stat(self, path):
version = self._stat_object_store.Get(path).Get()
if version is not None:
return StatInfo(version)
try:
result = self._fetcher.Fetch('commits/HEAD',
username=USERNAME,
password=PASSWORD)
except urlfetch.DownloadError as e:
logging.warning('GithubFileSystem Stat: %s' % e)
return self._DefaultStat(path)
# Check if Github authentication failed.
if result.status_code == 401:
logging.warning('Github authentication failed for %s, falling back to '
'unauthenticated.' % USERNAME)
try:
result = self._fetcher.Fetch('commits/HEAD')
except urlfetch.DownloadError as e:
logging.warning('GithubFileSystem Stat: %s' % e)
return self._DefaultStat(path)
# Parse response JSON - but sometimes github gives us invalid JSON.
try:
version = json.loads(result.content)['sha']
self._stat_object_store.Set(path, version)
return StatInfo(version)
except StandardError as e:
logging.warning(
('%s: got invalid or unexpected JSON from github. Response status ' +
'was %s, content %s') % (e, result.status_code, result.content))
return self._DefaultStat(path)
def GetIdentity(self):
return '%s@%s' % (self.__class__.__name__, StringIdentity(self._url))
|
gpl-3.0
|
matiasbastos/OpenBazaar
|
db/migrations/migration4.py
|
13
|
1172
|
#!/usr/bin/env python
from sqlite3 import dbapi2
from db.migrations import migrations_util
from node import constants
def upgrade(db_path):
with dbapi2.connect(db_path) as con:
cur = con.cursor()
# Use PRAGMA key to encrypt / decrypt database.
cur.execute("PRAGMA key = '%s';" % constants.DB_PASSPHRASE)
try:
cur.execute("ALTER TABLE settings "
"ADD COLUMN namecoin_id TEXT")
print 'Upgraded'
con.commit()
except dbapi2.Error as exc:
print 'Exception: %s' % exc
def downgrade(db_path):
with dbapi2.connect(db_path) as con:
cur = con.cursor()
# Use PRAGMA key to encrypt / decrypt database.
cur.execute("PRAGMA key = '%s';" % constants.DB_PASSPHRASE)
cur.execute("ALTER TABLE settings DROP COLUMN namecoin_id")
print 'Downgraded'
con.commit()
def main():
parser = migrations_util.make_argument_parser(constants.DB_PATH)
args = parser.parse_args()
if args.action == "upgrade":
upgrade(args.path)
else:
downgrade(args.path)
if __name__ == "__main__":
main()
|
mit
|
ApuliaSoftware/odoo
|
addons/document/wizard/__init__.py
|
444
|
1084
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import document_configuration
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
gnieboer/tensorflow
|
tensorflow/python/kernel_tests/sparse_split_op_test.py
|
138
|
13629
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SparseReorder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
class SparseSplitOpTest(test.TestCase):
def _SparseTensor_4x6(self):
# [0 | |2 | |4 |5 ]
# [ |11| |13|14| ]
# [20| | |23| |25]
# [30| |32|33| |35]
ind = np.array([[0, 0], [0, 2], [0, 4], [0, 5], [1, 1], [1, 3], [1, 4],
[2, 0], [2, 3], [2, 5], [3, 0], [3, 2], [3, 3],
[3, 5]]).astype(np.int64)
val = np.array(
[0, 2, 4, 5, 11, 13, 14, 20, 23, 25, 30, 32, 33, 35]).astype(np.int64)
shape = np.array([4, 6]).astype(np.int64)
return sparse_tensor.SparseTensor(ind, val, shape)
def _SparseTensor_5x7(self):
# [0 | |2 | |4 |5 | ]
# [ |11| |13|14| |16]
# [20| | |23| |25| ]
# [30| |32|33| |35| ]
# [ |41| | |44| |46]
ind = np.array([[0, 0], [0, 2], [0, 4], [0, 5], [1, 1], [1, 3], [1, 4],
[1, 6], [2, 0], [2, 3], [2, 5], [3, 0], [3, 2], [3, 3],
[3, 5], [4, 1], [4, 4], [4, 6]]).astype(np.int64)
val = np.array(
[0, 2, 4, 5, 11, 13, 14, 16, 20, 23, 25, 30, 32, 33, 35, 41, 44,
46]).astype(np.int64)
shape = np.array([5, 7]).astype(np.int64)
return sparse_tensor.SparseTensor(ind, val, shape)
def _SparseTensorValue_3x4x2(self):
# slice(:,:, 0)
# ['a0'| |'b0'| ]
# [ |'c0'| |'d0']
# [ | |'e0'| ]
# slice(:,:, 1)
# ['a1'| |'b1'| ]
# [ |'c1'| |'d1']
# [ | |'e1'| ]
ind = np.array([[0, 0, 0], [0, 0, 1], [0, 2, 0], [0, 2, 1], [1, 1, 0],
[1, 1, 1], [1, 3, 0], [1, 3, 1], [2, 2, 0],
[2, 2, 1]]).astype(np.int64)
val = np.array(['a0', 'a1', 'b0', 'b1', 'c0', 'c1', 'd0', 'd1', 'e0', 'e1'])
shape = np.array([3, 4, 2]).astype(np.int64)
return sparse_tensor.SparseTensorValue(ind, val, shape)
def _SparseTensor_3x4x2(self):
return sparse_tensor.SparseTensor.from_value(self._SparseTensorValue_3x4x2(
))
def testSplitMatrixRows(self):
with self.test_session(use_gpu=False):
sp_tensors = sparse_ops.sparse_split(
sp_input=self._SparseTensor_4x6(), num_split=2, axis=0)
self.assertAllEqual(len(sp_tensors), 2)
self.assertAllEqual(sp_tensors[0].indices.eval(), [[0, 0], [0, 2], [0, 4],
[0, 5], [1, 1], [1, 3],
[1, 4]])
self.assertAllEqual(sp_tensors[0].values.eval(), [0, 2, 4, 5, 11, 13, 14])
self.assertAllEqual(sp_tensors[0].dense_shape.eval(), [2, 6])
self.assertAllEqual(sp_tensors[1].indices.eval(), [[0, 0], [0, 3], [0, 5],
[1, 0], [1, 2], [1, 3],
[1, 5]])
self.assertAllEqual(sp_tensors[1].values.eval(),
[20, 23, 25, 30, 32, 33, 35])
self.assertAllEqual(sp_tensors[1].dense_shape.eval(), [2, 6])
def testSplitMatrixUnevenCols(self):
with self.test_session(use_gpu=False):
sp_tensors_3 = sparse_ops.sparse_split(
sp_input=self._SparseTensor_5x7(), num_split=3, axis=1)
self.assertAllEqual(len(sp_tensors_3), 3)
self.assertAllEqual(sp_tensors_3[0].indices.eval(),
[[0, 0], [0, 2], [1, 1], [2, 0], [3, 0], [3, 2],
[4, 1]])
self.assertAllEqual(sp_tensors_3[0].values.eval(),
[0, 2, 11, 20, 30, 32, 41])
self.assertAllEqual(sp_tensors_3[0].dense_shape.eval(), [5, 3])
self.assertAllEqual(sp_tensors_3[1].indices.eval(),
[[0, 1], [1, 0], [1, 1], [2, 0], [3, 0], [4, 1]])
self.assertAllEqual(sp_tensors_3[1].values.eval(),
[4, 13, 14, 23, 33, 44])
self.assertAllEqual(sp_tensors_3[1].dense_shape.eval(), [5, 2])
self.assertAllEqual(sp_tensors_3[2].indices.eval(),
[[0, 0], [1, 1], [2, 0], [3, 0], [4, 1]])
self.assertAllEqual(sp_tensors_3[2].values.eval(), [5, 16, 25, 35, 46])
self.assertAllEqual(sp_tensors_3[2].dense_shape.eval(), [5, 2])
sp_tensors_4 = sparse_ops.sparse_split(
sp_input=self._SparseTensor_5x7(), num_split=4, axis=1)
self.assertAllEqual(len(sp_tensors_4), 4)
self.assertAllEqual(sp_tensors_4[0].indices.eval(),
[[0, 0], [1, 1], [2, 0], [3, 0], [4, 1]])
self.assertAllEqual(sp_tensors_4[0].values.eval(), [0, 11, 20, 30, 41])
self.assertAllEqual(sp_tensors_4[0].dense_shape.eval(), [5, 2])
self.assertAllEqual(sp_tensors_4[1].indices.eval(),
[[0, 0], [1, 1], [2, 1], [3, 0], [3, 1]])
self.assertAllEqual(sp_tensors_4[1].values.eval(), [2, 13, 23, 32, 33])
self.assertAllEqual(sp_tensors_4[1].dense_shape.eval(), [5, 2])
self.assertAllEqual(sp_tensors_4[2].indices.eval(),
[[0, 0], [0, 1], [1, 0], [2, 1], [3, 1], [4, 0]])
self.assertAllEqual(sp_tensors_4[2].values.eval(), [4, 5, 14, 25, 35, 44])
self.assertAllEqual(sp_tensors_4[2].dense_shape.eval(), [5, 2])
self.assertAllEqual(sp_tensors_4[3].indices.eval(), [[1, 0], [4, 0]])
self.assertAllEqual(sp_tensors_4[3].values.eval(), [16, 46])
self.assertAllEqual(sp_tensors_4[3].dense_shape.eval(), [5, 1])
def testSplitMatrixUnevenRows(self):
with self.test_session(use_gpu=False):
sp_tensors_2 = sparse_ops.sparse_split(
sp_input=self._SparseTensor_5x7(), num_split=2, axis=0)
self.assertAllEqual(sp_tensors_2[0].indices.eval(),
[[0, 0], [0, 2], [0, 4], [0, 5], [1, 1], [1, 3],
[1, 4], [1, 6], [2, 0], [2, 3], [2, 5]])
self.assertAllEqual(sp_tensors_2[0].values.eval(),
[0, 2, 4, 5, 11, 13, 14, 16, 20, 23, 25])
self.assertAllEqual(sp_tensors_2[0].dense_shape.eval(), [3, 7])
self.assertAllEqual(sp_tensors_2[1].indices.eval(),
[[0, 0], [0, 2], [0, 3], [0, 5], [1, 1], [1, 4],
[1, 6]])
self.assertAllEqual(sp_tensors_2[1].values.eval(),
[30, 32, 33, 35, 41, 44, 46])
self.assertAllEqual(sp_tensors_2[1].dense_shape.eval(), [2, 7])
self.assertAllEqual(len(sp_tensors_2), 2)
sp_tensors_3 = sparse_ops.sparse_split(
sp_input=self._SparseTensor_5x7(), num_split=3, axis=0)
self.assertAllEqual(len(sp_tensors_3), 3)
self.assertAllEqual(sp_tensors_3[0].indices.eval(),
[[0, 0], [0, 2], [0, 4], [0, 5], [1, 1], [1, 3],
[1, 4], [1, 6]])
self.assertAllEqual(sp_tensors_3[0].values.eval(),
[0, 2, 4, 5, 11, 13, 14, 16])
self.assertAllEqual(sp_tensors_3[0].dense_shape.eval(), [2, 7])
self.assertAllEqual(sp_tensors_3[1].values.eval(),
[20, 23, 25, 30, 32, 33, 35])
self.assertAllEqual(sp_tensors_3[1].dense_shape.eval(), [2, 7])
self.assertAllEqual(sp_tensors_3[2].indices.eval(), [[0, 1], [0, 4],
[0, 6]])
self.assertAllEqual(sp_tensors_3[2].values.eval(), [41, 44, 46])
self.assertAllEqual(sp_tensors_3[2].dense_shape.eval(), [1, 7])
return
def testSplitAllRows(self):
with self.test_session(use_gpu=False):
sp_tensors = sparse_ops.sparse_split(
sp_input=self._SparseTensor_4x6(), num_split=4, axis=0)
self.assertAllEqual(len(sp_tensors), 4)
self.assertAllEqual(sp_tensors[0].indices.eval(), [[0, 0], [0, 2], [0, 4],
[0, 5]])
self.assertAllEqual(sp_tensors[0].values.eval(), [0, 2, 4, 5])
self.assertAllEqual(sp_tensors[0].dense_shape.eval(), [1, 6])
self.assertAllEqual(sp_tensors[1].indices.eval(), [[0, 1], [0, 3], [0,
4]])
self.assertAllEqual(sp_tensors[1].values.eval(), [11, 13, 14])
self.assertAllEqual(sp_tensors[1].dense_shape.eval(), [1, 6])
self.assertAllEqual(sp_tensors[2].indices.eval(), [[0, 0], [0, 3], [0,
5]])
self.assertAllEqual(sp_tensors[2].values.eval(), [20, 23, 25])
self.assertAllEqual(sp_tensors[2].dense_shape.eval(), [1, 6])
self.assertAllEqual(sp_tensors[3].indices.eval(), [[0, 0], [0, 2], [0, 3],
[0, 5]])
self.assertAllEqual(sp_tensors[3].values.eval(), [30, 32, 33, 35])
self.assertAllEqual(sp_tensors[3].dense_shape.eval(), [1, 6])
def testSplitColumns(self):
with self.test_session(use_gpu=False):
sparse_tensors = sparse_ops.sparse_split(
sp_input=self._SparseTensor_4x6(), num_split=3, axis=1)
self.assertAllEqual(len(sparse_tensors), 3)
self.assertAllEqual(sparse_tensors[0].indices.eval(), [[0, 0], [1, 1],
[2, 0], [3, 0]])
self.assertAllEqual(sparse_tensors[0].values.eval(), [0, 11, 20, 30])
self.assertAllEqual(sparse_tensors[0].dense_shape.eval(), [4, 2])
self.assertAllEqual(sparse_tensors[1].indices.eval(),
[[0, 0], [1, 1], [2, 1], [3, 0], [3, 1]])
self.assertAllEqual(sparse_tensors[1].values.eval(), [2, 13, 23, 32, 33])
self.assertAllEqual(sparse_tensors[1].dense_shape.eval(), [4, 2])
self.assertAllEqual(sparse_tensors[2].indices.eval(),
[[0, 0], [0, 1], [1, 0], [2, 1], [3, 1]])
self.assertAllEqual(sparse_tensors[2].values.eval(), [4, 5, 14, 25, 35])
self.assertAllEqual(sparse_tensors[2].dense_shape.eval(), [4, 2])
def testSplitAllColumns(self):
with self.test_session(use_gpu=False):
sparse_tensors = sparse_ops.sparse_split(
sp_input=self._SparseTensor_4x6(), num_split=6, axis=1)
self.assertAllEqual(len(sparse_tensors), 6)
self.assertAllEqual(sparse_tensors[0].indices.eval(), [[0, 0], [2, 0],
[3, 0]])
self.assertAllEqual(sparse_tensors[0].values.eval(), [0, 20, 30])
self.assertAllEqual(sparse_tensors[0].dense_shape.eval(), [4, 1])
self.assertAllEqual(sparse_tensors[1].indices.eval(), [[1, 0]])
self.assertAllEqual(sparse_tensors[1].values.eval(), [11])
self.assertAllEqual(sparse_tensors[1].dense_shape.eval(), [4, 1])
self.assertAllEqual(sparse_tensors[2].indices.eval(), [[0, 0], [3, 0]])
self.assertAllEqual(sparse_tensors[2].values.eval(), [2, 32])
self.assertAllEqual(sparse_tensors[2].dense_shape.eval(), [4, 1])
self.assertAllEqual(sparse_tensors[3].indices.eval(), [[1, 0], [2, 0],
[3, 0]])
self.assertAllEqual(sparse_tensors[3].dense_shape.eval(), [4, 1])
self.assertAllEqual(sparse_tensors[3].values.eval(), [13, 23, 33])
self.assertAllEqual(sparse_tensors[4].indices.eval(), [[0, 0], [1, 0]])
self.assertAllEqual(sparse_tensors[4].values.eval(), [4, 14])
self.assertAllEqual(sparse_tensors[4].dense_shape.eval(), [4, 1])
self.assertAllEqual(sparse_tensors[5].indices.eval(), [[0, 0], [2, 0],
[3, 0]])
self.assertAllEqual(sparse_tensors[5].values.eval(), [5, 25, 35])
self.assertAllEqual(sparse_tensors[5].dense_shape.eval(), [4, 1])
def testSliceConcat(self):
for sp_input in (self._SparseTensorValue_3x4x2(),
self._SparseTensor_3x4x2()):
with self.test_session(use_gpu=False):
sparse_tensors = sparse_ops.sparse_split(
sp_input=sp_input, num_split=2, axis=1)
concat_tensor = sparse_ops.sparse_concat(1, sparse_tensors)
expected_output = self._SparseTensor_3x4x2()
self.assertAllEqual(concat_tensor.indices.eval(),
expected_output.indices.eval())
def testArgumentErrors(self):
with self.assertRaisesRegexp(ValueError, 'Keyword arguments are required'):
sparse_ops.sparse_split(3, 2, 1)
with self.assertRaisesRegexp(ValueError, 'sp_input is required'):
sparse_ops.sparse_split()
with self.assertRaisesRegexp(ValueError, 'num_split is required'):
sparse_ops.sparse_split(sp_input=1)
with self.assertRaisesRegexp(ValueError, 'axis is required'):
sparse_ops.sparse_split(num_split=2, sp_input=1)
if __name__ == '__main__':
test.main()
|
apache-2.0
|
giorgiop/scipy
|
scipy/weave/c_spec.py
|
69
|
17274
|
from __future__ import absolute_import, print_function
import types
from .base_spec import base_converter
from . import base_info
#----------------------------------------------------------------------------
# C++ code template for converting code from python objects to C++ objects
#
# This is silly code. There is absolutely no reason why these simple
# conversion functions should be classes. However, some versions of
# Mandrake Linux ship with broken C++ compilers (or libraries) that do not
# handle exceptions correctly when they are thrown from functions. However,
# exceptions thrown from class methods always work, so we make everything
# a class method to solve this error.
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
# speed note
# the convert_to_int macro below takes about 25 ns per conversion on my
# 850 MHz PIII. A slightly more sophisticated macro version can trim this
# to 20 ns, but this savings is dang near useless because the other
# overhead swamps it...
#----------------------------------------------------------------------------
py_to_c_template = \
"""
class %(type_name)s_handler
{
public:
%(return_type)s convert_to_%(type_name)s(PyObject* py_obj, const char* name)
{
// Incref occurs even if conversion fails so that
// the decref in cleanup_code has a matching incref.
%(inc_ref_count)s
if (!py_obj || !%(check_func)s(py_obj))
handle_conversion_error(py_obj,"%(type_name)s", name);
return %(to_c_return)s;
}
%(return_type)s py_to_%(type_name)s(PyObject* py_obj, const char* name)
{
// !! Pretty sure INCREF should only be called on success since
// !! py_to_xxx is used by the user -- not the code generator.
if (!py_obj || !%(check_func)s(py_obj))
handle_bad_type(py_obj,"%(type_name)s", name);
%(inc_ref_count)s
return %(to_c_return)s;
}
};
%(type_name)s_handler x__%(type_name)s_handler = %(type_name)s_handler();
#define convert_to_%(type_name)s(py_obj,name) \\
x__%(type_name)s_handler.convert_to_%(type_name)s(py_obj,name)
#define py_to_%(type_name)s(py_obj,name) \\
x__%(type_name)s_handler.py_to_%(type_name)s(py_obj,name)
"""
#----------------------------------------------------------------------------
# C++ code template for converting code from C++ objects to Python objects
#
#----------------------------------------------------------------------------
simple_c_to_py_template = \
"""
PyObject* %(type_name)s_to_py(PyObject* obj)
{
return (PyObject*) obj;
}
"""
class common_base_converter(base_converter):
def __init__(self):
self.init_info()
self._build_information = [self.generate_build_info()]
def init_info(self):
self.matching_types = []
self.headers = []
self.include_dirs = []
self.libraries = []
self.library_dirs = []
self.sources = []
self.support_code = []
self.module_init_code = []
self.warnings = []
self.define_macros = []
self.extra_compile_args = []
self.extra_link_args = []
self.use_ref_count = 1
self.name = "no_name"
self.c_type = 'PyObject*'
self.return_type = 'PyObject*'
self.to_c_return = 'py_obj'
def info_object(self):
return base_info.custom_info()
def generate_build_info(self):
info = self.info_object()
for header in self.headers:
info.add_header(header)
for d in self.include_dirs:
info.add_include_dir(d)
for lib in self.libraries:
info.add_library(lib)
for d in self.library_dirs:
info.add_library_dir(d)
for source in self.sources:
info.add_source(source)
for code in self.support_code:
info.add_support_code(code)
info.add_support_code(self.py_to_c_code())
info.add_support_code(self.c_to_py_code())
for init_code in self.module_init_code:
info.add_module_init_code(init_code)
for macro in self.define_macros:
info.add_define_macro(macro)
for warning in self.warnings:
info.add_warning(warning)
for arg in self.extra_compile_args:
info.add_extra_compile_arg(arg)
for arg in self.extra_link_args:
info.add_extra_link_arg(arg)
return info
def type_match(self,value):
return type(value) in self.matching_types
def get_var_type(self,value):
return type(value)
def type_spec(self,name,value):
# factory
new_spec = self.__class__()
new_spec.name = name
new_spec.var_type = self.get_var_type(value)
return new_spec
def template_vars(self,inline=0):
d = {}
d['type_name'] = self.type_name
d['check_func'] = self.check_func
d['c_type'] = self.c_type
d['return_type'] = self.return_type
d['to_c_return'] = self.to_c_return
d['name'] = self.name
d['py_var'] = self.py_variable()
d['var_lookup'] = self.retrieve_py_variable(inline)
code = 'convert_to_%(type_name)s(%(py_var)s,"%(name)s")' % d
d['var_convert'] = code
if self.use_ref_count:
d['inc_ref_count'] = "Py_XINCREF(py_obj);"
else:
d['inc_ref_count'] = ""
return d
def py_to_c_code(self):
return py_to_c_template % self.template_vars()
def c_to_py_code(self):
return simple_c_to_py_template % self.template_vars()
def declaration_code(self,templatize=0,inline=0):
code = '%(py_var)s = %(var_lookup)s;\n' \
'%(c_type)s %(name)s = %(var_convert)s;\n' % \
self.template_vars(inline=inline)
return code
def cleanup_code(self):
if self.use_ref_count:
code = 'Py_XDECREF(%(py_var)s);\n' % self.template_vars()
# code += 'printf("cleaning up %(py_var)s\\n");\n' % self.template_vars()
else:
code = ""
return code
def __repr__(self):
msg = "(file:: name: %s)" % self.name
return msg
def __cmp__(self,other):
# only works for equal
result = -1
try:
result = cmp(self.name,other.name) or \
cmp(self.__class__, other.__class__)
except AttributeError:
pass
return result
#----------------------------------------------------------------------------
# Module Converter
#----------------------------------------------------------------------------
class module_converter(common_base_converter):
def init_info(self):
common_base_converter.init_info(self)
self.type_name = 'module'
self.check_func = 'PyModule_Check'
# probably should test for callable classes here also.
self.matching_types = [types.ModuleType]
#----------------------------------------------------------------------------
# String Converter
#----------------------------------------------------------------------------
class string_converter(common_base_converter):
def init_info(self):
common_base_converter.init_info(self)
self.type_name = 'string'
self.check_func = 'PyString_Check'
self.c_type = 'std::string'
self.return_type = 'std::string'
self.to_c_return = "std::string(PyString_AsString(py_obj))"
self.matching_types = [types.StringType]
self.headers.append('<string>')
def c_to_py_code(self):
# !! Need to dedent returned code.
code = """
PyObject* string_to_py(std::string s)
{
return PyString_FromString(s.c_str());
}
"""
return code
#----------------------------------------------------------------------------
# Unicode Converter
#----------------------------------------------------------------------------
class unicode_converter(common_base_converter):
def init_info(self):
common_base_converter.init_info(self)
self.type_name = 'unicode'
self.check_func = 'PyUnicode_Check'
# This isn't supported by gcc 2.95.3 -- MSVC works fine with it.
# self.c_type = 'std::wstring'
# self.to_c_return = "std::wstring(PyUnicode_AS_UNICODE(py_obj))"
self.c_type = 'Py_UNICODE*'
self.return_type = self.c_type
self.to_c_return = "PyUnicode_AS_UNICODE(py_obj)"
self.matching_types = [types.UnicodeType]
# self.headers.append('<string>')
def declaration_code(self,templatize=0,inline=0):
# since wstring doesn't seem to work everywhere, we need to provide
# the length variable Nxxx for the unicode string xxx.
code = '%(py_var)s = %(var_lookup)s;\n' \
'%(c_type)s %(name)s = %(var_convert)s;\n' \
'int N%(name)s = PyUnicode_GET_SIZE(%(py_var)s);\n' \
% self.template_vars(inline=inline)
return code
#----------------------------------------------------------------------------
# File Converter
#----------------------------------------------------------------------------
class file_converter(common_base_converter):
def init_info(self):
common_base_converter.init_info(self)
self.type_name = 'file'
self.check_func = 'PyFile_Check'
self.c_type = 'FILE*'
self.return_type = self.c_type
self.to_c_return = "PyFile_AsFile(py_obj)"
self.headers = ['<stdio.h>']
self.matching_types = [types.FileType]
def c_to_py_code(self):
# !! Need to dedent returned code.
code = """
PyObject* file_to_py(FILE* file, const char* name,
const char* mode)
{
return (PyObject*) PyFile_FromFile(file,
const_cast<char*>(name),
const_cast<char*>(mode), fclose);
}
"""
return code
#----------------------------------------------------------------------------
#
# Scalar Number Conversions
#
#----------------------------------------------------------------------------
# the following typemaps are for 32 bit platforms. A way to do this
# general case? maybe ask numeric types how long they are and base
# the decisions on that.
#----------------------------------------------------------------------------
# Standard Python numeric --> C type maps
#----------------------------------------------------------------------------
num_to_c_types = {}
num_to_c_types[type(1)] = 'long'
num_to_c_types[type(1.)] = 'double'
num_to_c_types[type(1.+1.j)] = 'std::complex<double> '
# !! hmmm. The following is likely unsafe...
num_to_c_types[long] = 'npy_longlong'
#----------------------------------------------------------------------------
# Numeric array Python numeric --> C type maps
#----------------------------------------------------------------------------
num_to_c_types['T'] = 'T' # for templates
num_to_c_types['G'] = 'std::complex<longdouble> '
num_to_c_types['F'] = 'std::complex<float> '
num_to_c_types['D'] = 'std::complex<double> '
num_to_c_types['g'] = 'npy_longdouble'
num_to_c_types['f'] = 'float'
num_to_c_types['d'] = 'double'
num_to_c_types['b'] = 'char'
num_to_c_types['B'] = 'npy_uchar'
num_to_c_types['B'] = 'npy_ubyte' # numpy
num_to_c_types['h'] = 'short'
num_to_c_types['H'] = 'npy_ushort'
num_to_c_types['i'] = 'int'
num_to_c_types['I'] = 'npy_uint'
num_to_c_types['?'] = 'bool'
num_to_c_types['l'] = 'long'
num_to_c_types['L'] = 'npy_ulong'
num_to_c_types['q'] = 'npy_longlong'
num_to_c_types['Q'] = 'npy_ulonglong'
class scalar_converter(common_base_converter):
def init_info(self):
common_base_converter.init_info(self)
self.warnings = ['disable: 4275', 'disable: 4101']
self.headers = ['<complex>','<math.h>']
self.use_ref_count = 0
# This has to be int for SCXX to work.
class int_converter(scalar_converter):
def init_info(self):
scalar_converter.init_info(self)
self.type_name = 'int'
self.check_func = 'PyInt_Check'
self.c_type = 'int'
self.return_type = 'int'
self.to_c_return = "(int) PyInt_AsLong(py_obj)"
self.matching_types = [types.IntType]
class long_converter(scalar_converter):
def init_info(self):
scalar_converter.init_info(self)
# !! long to int conversion isn't safe!
self.type_name = 'long'
self.check_func = 'PyLong_Check'
self.c_type = 'longlong'
self.return_type = 'longlong'
self.to_c_return = "(longlong) PyLong_AsLongLong(py_obj)"
self.matching_types = [types.LongType]
class float_converter(scalar_converter):
def init_info(self):
scalar_converter.init_info(self)
# Not sure this is really that safe...
self.type_name = 'float'
self.check_func = 'PyFloat_Check'
self.c_type = 'double'
self.return_type = 'double'
self.to_c_return = "PyFloat_AsDouble(py_obj)"
self.matching_types = [types.FloatType]
class complex_converter(scalar_converter):
def init_info(self):
scalar_converter.init_info(self)
self.type_name = 'complex'
self.check_func = 'PyComplex_Check'
self.c_type = 'std::complex<double>'
self.return_type = 'std::complex<double>'
self.to_c_return = "std::complex<double>(PyComplex_RealAsDouble(py_obj),"\
"PyComplex_ImagAsDouble(py_obj))"
self.matching_types = [types.ComplexType]
#----------------------------------------------------------------------------
#
# List, Tuple, and Dict converters.
#
# Based on SCXX by Gordon McMillan
#----------------------------------------------------------------------------
import os
local_dir,junk = os.path.split(os.path.abspath(__file__))
scxx_dir = os.path.join(local_dir,'scxx')
class scxx_converter(common_base_converter):
def init_info(self):
common_base_converter.init_info(self)
self.headers = ['"scxx/object.h"','"scxx/list.h"','"scxx/tuple.h"',
'"scxx/dict.h"','<iostream>']
self.include_dirs = [local_dir,scxx_dir]
self.sources = [os.path.join(scxx_dir,'weave_imp.cpp'),]
class list_converter(scxx_converter):
def init_info(self):
scxx_converter.init_info(self)
self.type_name = 'list'
self.check_func = 'PyList_Check'
self.c_type = 'py::list'
self.return_type = 'py::list'
self.to_c_return = 'py::list(py_obj)'
self.matching_types = [types.ListType]
# ref counting handled by py::list
self.use_ref_count = 0
class tuple_converter(scxx_converter):
def init_info(self):
scxx_converter.init_info(self)
self.type_name = 'tuple'
self.check_func = 'PyTuple_Check'
self.c_type = 'py::tuple'
self.return_type = 'py::tuple'
self.to_c_return = 'py::tuple(py_obj)'
self.matching_types = [types.TupleType]
# ref counting handled by py::tuple
self.use_ref_count = 0
class dict_converter(scxx_converter):
def init_info(self):
scxx_converter.init_info(self)
self.type_name = 'dict'
self.check_func = 'PyDict_Check'
self.c_type = 'py::dict'
self.return_type = 'py::dict'
self.to_c_return = 'py::dict(py_obj)'
self.matching_types = [types.DictType]
# ref counting handled by py::dict
self.use_ref_count = 0
#----------------------------------------------------------------------------
# Instance Converter
#----------------------------------------------------------------------------
class instance_converter(scxx_converter):
def init_info(self):
scxx_converter.init_info(self)
self.type_name = 'instance'
self.check_func = 'PyInstance_Check'
self.c_type = 'py::object'
self.return_type = 'py::object'
self.to_c_return = 'py::object(py_obj)'
self.matching_types = [types.InstanceType]
# ref counting handled by py::object
self.use_ref_count = 0
#----------------------------------------------------------------------------
# Catchall Converter
#
# catch all now handles callable objects
#----------------------------------------------------------------------------
class catchall_converter(scxx_converter):
def init_info(self):
scxx_converter.init_info(self)
self.type_name = 'catchall'
self.check_func = ''
self.c_type = 'py::object'
self.return_type = 'py::object'
self.to_c_return = 'py::object(py_obj)'
# ref counting handled by py::object
self.use_ref_count = 0
def type_match(self,value):
return 1
if __name__ == "__main__":
x = list_converter().type_spec("x",1)
print(x.py_to_c_code())
print()
print(x.c_to_py_code())
print()
print(x.declaration_code(inline=1))
print()
print(x.cleanup_code())
|
bsd-3-clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.