code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
import xml.etree.ElementTree as etree
import base64
from struct import unpack, pack
import sys
import io
import os
import time
import itertools
import xbmcaddon
import xbmc
import urllib2,urllib
import traceback
import urlparse
import posixpath
import re
import hmac
import hashlib
import binascii
import zlib
from hashlib import sha256
import cookielib
#import youtube_dl
#from youtube_dl.utils import *
addon_id = 'script.video.F4mProxy'
selfAddon = xbmcaddon.Addon(id=addon_id)
__addonname__ = selfAddon.getAddonInfo('name')
__icon__ = selfAddon.getAddonInfo('icon')
downloadPath = xbmc.translatePath(selfAddon.getAddonInfo('profile'))#selfAddon["profile"])
F4Mversion=''
#from Crypto.Cipher import AES
value_unsafe = '%+&;#'
VALUE_SAFE = ''.join(chr(c) for c in range(33, 127)
if chr(c) not in value_unsafe)
def urlencode_param(value):
"""Minimal URL encoding for query parameter"""
return urllib.quote_plus(value, safe=VALUE_SAFE)
class FlvReader(io.BytesIO):
"""
Reader for Flv files
The file format is documented in https://www.adobe.com/devnet/f4v.html
"""
# Utility functions for reading numbers and strings
def read_unsigned_long_long(self):
return unpack('!Q', self.read(8))[0]
def read_unsigned_int(self):
return unpack('!I', self.read(4))[0]
def read_unsigned_char(self):
return unpack('!B', self.read(1))[0]
def read_string(self):
res = b''
while True:
char = self.read(1)
if char == b'\x00':
break
res+=char
return res
def read_box_info(self):
"""
Read a box and return the info as a tuple: (box_size, box_type, box_data)
"""
real_size = size = self.read_unsigned_int()
box_type = self.read(4)
header_end = 8
if size == 1:
real_size = self.read_unsigned_long_long()
header_end = 16
return real_size, box_type, self.read(real_size-header_end)
def read_asrt(self, debug=False):
version = self.read_unsigned_char()
self.read(3) # flags
quality_entry_count = self.read_unsigned_char()
quality_modifiers = []
for i in range(quality_entry_count):
quality_modifier = self.read_string()
quality_modifiers.append(quality_modifier)
segment_run_count = self.read_unsigned_int()
segments = []
#print 'segment_run_count',segment_run_count
for i in range(segment_run_count):
first_segment = self.read_unsigned_int()
fragments_per_segment = self.read_unsigned_int()
segments.append((first_segment, fragments_per_segment))
#print 'segments',segments
return {'version': version,
'quality_segment_modifiers': quality_modifiers,
'segment_run': segments,
}
def read_afrt(self, debug=False):
version = self.read_unsigned_char()
self.read(3) # flags
time_scale = self.read_unsigned_int()
quality_entry_count = self.read_unsigned_char()
quality_entries = []
for i in range(quality_entry_count):
mod = self.read_string()
quality_entries.append(mod)
fragments_count = self.read_unsigned_int()
#print 'fragments_count',fragments_count
fragments = []
for i in range(fragments_count):
first = self.read_unsigned_int()
first_ts = self.read_unsigned_long_long()
duration = self.read_unsigned_int()
if duration == 0:
discontinuity_indicator = self.read_unsigned_char()
else:
discontinuity_indicator = None
fragments.append({'first': first,
'ts': first_ts,
'duration': duration,
'discontinuity_indicator': discontinuity_indicator,
})
#print 'fragments',fragments
return {'version': version,
'time_scale': time_scale,
'fragments': fragments,
'quality_entries': quality_entries,
}
def read_abst(self, debug=False):
version = self.read_unsigned_char()
self.read(3) # flags
bootstrap_info_version = self.read_unsigned_int()
streamType=self.read_unsigned_char()#self.read(1) # Profile,Live,Update,Reserved
islive=False
if (streamType & 0x20) >> 5:
islive=True
print 'LIVE',streamType,islive
time_scale = self.read_unsigned_int()
current_media_time = self.read_unsigned_long_long()
smpteTimeCodeOffset = self.read_unsigned_long_long()
movie_identifier = self.read_string()
server_count = self.read_unsigned_char()
servers = []
for i in range(server_count):
server = self.read_string()
servers.append(server)
quality_count = self.read_unsigned_char()
qualities = []
for i in range(server_count):
quality = self.read_string()
qualities.append(server)
drm_data = self.read_string()
metadata = self.read_string()
segments_count = self.read_unsigned_char()
#print 'segments_count11',segments_count
segments = []
for i in range(segments_count):
box_size, box_type, box_data = self.read_box_info()
assert box_type == b'asrt'
segment = FlvReader(box_data).read_asrt()
segments.append(segment)
fragments_run_count = self.read_unsigned_char()
#print 'fragments_run_count11',fragments_run_count
fragments = []
for i in range(fragments_run_count):
# This info is only useful for the player, it doesn't give more info
# for the download process
box_size, box_type, box_data = self.read_box_info()
assert box_type == b'afrt'
fragments.append(FlvReader(box_data).read_afrt())
return {'segments': segments,
'movie_identifier': movie_identifier,
'drm_data': drm_data,
'fragments': fragments,
},islive
def read_bootstrap_info(self):
"""
Read the bootstrap information from the stream,
returns a dict with the following keys:
segments: A list of dicts with the following keys
segment_run: A list of (first_segment, fragments_per_segment) tuples
"""
total_size, box_type, box_data = self.read_box_info()
assert box_type == b'abst'
return FlvReader(box_data).read_abst()
def read_bootstrap_info(bootstrap_bytes):
return FlvReader(bootstrap_bytes).read_bootstrap_info()
def build_fragments_list(boot_info, startFromFregment=None, live=True):
""" Return a list of (segment, fragment) for each fragment in the video """
res = []
segment_run_table = boot_info['segments'][0]
#print 'segment_run_table',segment_run_table
# I've only found videos with one segment
#if len(segment_run_table['segment_run'])>1:
# segment_run_table['segment_run']=segment_run_table['segment_run'][-2:] #pick latest
frag_start = boot_info['fragments'][0]['fragments']
#print boot_info['fragments']
# sum(j for i, j in segment_run_table['segment_run'])
first_frag_number=frag_start[0]['first']
last_frag_number=frag_start[-1]['first']
if last_frag_number==0:
last_frag_number=frag_start[-2]['first']
endfragment=0
segment_to_start=None
for current in range (len(segment_run_table['segment_run'])):
seg,fregCount=segment_run_table['segment_run'][current]
#print 'segmcount',seg,fregCount
if (not live):
frag_end=last_frag_number
else:
frag_end=first_frag_number+fregCount-1
if fregCount>10000:
frag_end=last_frag_number
#if frag_end
segment_run_table['segment_run'][current]=(seg,fregCount,first_frag_number,frag_end)
if (not startFromFregment==None) and startFromFregment>=first_frag_number and startFromFregment<=frag_end:
segment_to_start=current
first_frag_number+=fregCount
print 'current status',segment_run_table['segment_run']
#if we have no index then take the last segment
if segment_to_start==None:
segment_to_start=len(segment_run_table['segment_run'])-1
#if len(segment_run_table['segment_run'])>2:
# segment_to_start=len(segment_run_table['segment_run'])-2;
if live:
if len(boot_info['fragments'][0]['fragments'])>1: #go bit back
startFromFregment= boot_info['fragments'][0]['fragments'][-1]['first']
else:
startFromFregment= boot_info['fragments'][0]['fragments'][0]['first'] #start from begining
#if len(boot_info['fragments'][0]['fragments'])>2: #go little bit back
# startFromFregment= boot_info['fragments'][0]['fragments'][-2]['first']
#print 'segment_to_start',segment_to_start
for currentIndex in range (segment_to_start,len(segment_run_table['segment_run'])):
currentSegment=segment_run_table['segment_run'][currentIndex]
#print 'currentSegment',currentSegment
(seg,fregCount,frag_start,frag_end)=currentSegment
#print 'startFromFregment',startFromFregment,
if (not startFromFregment==None) and startFromFregment>=frag_start and startFromFregment<=frag_end:
frag_start=startFromFregment
#print 'frag_start',frag_start,frag_end
for currentFreg in range(frag_start,frag_end+1):
res.append((seg,currentFreg ))
print 'fragmentlist',res,boot_info
return res
#totalFrags=sum(j for i, j in segment_run_table['segment_run'])
#lastSegment=segment_run_table['segment_run'][-1]
#lastSegmentStart= lastSegment[0]
#lastSegmentFragCount = lastSegment[1]
#print 'totalFrags',totalFrags
#first_frag_number = frag_start[0]['first']
#startFragOfLastSegment= first_frag_number +totalFrags - lastSegmentFragCount
#for (i, frag_number) in zip(range(1, lastSegmentFragCount+1), itertools.count(startFragOfLastSegment)):
# res.append((lastSegmentStart,frag_number )) #this was i, i am using first segement start
#return res
#segment_run_entry = segment_run_table['segment_run'][0]
#print 'segment_run_entry',segment_run_entry,segment_run_table
#n_frags = segment_run_entry[1]
#startingPoint = segment_run_entry[0]
#fragment_run_entry_table = boot_info['fragments'][0]['fragments']
#frag_entry_index = 0
#first_frag_number = fragment_run_entry_table[0]['first']
#first_frag_number=(startingPoint*n_frags) -(n_frags)+1
#print 'THENUMBERS',startingPoint,n_frags,first_frag_number
#for (i, frag_number) in zip(range(1, n_frags+1), itertools.count(first_frag_number)):
# res.append((startingPoint,frag_number )) #this was i, i am using first segement start
#return res
def join(base,url):
join = urlparse.urljoin(base,url)
url = urlparse.urlparse(join)
path = posixpath.normpath(url[2])
return urlparse.urlunparse(
(url.scheme,url.netloc,path,url.params,url.query,url.fragment)
)
def _add_ns(prop):
#print 'F4Mversion',F4Mversion
return '{http://ns.adobe.com/f4m/%s}%s' %(F4Mversion, prop)
#class ReallyQuietDownloader(youtube_dl.FileDownloader):
# def to_screen(sef, *args, **kargs):
# pass
class F4MDownloader():
"""
A downloader for f4m manifests or AdobeHDS.
"""
outputfile =''
clientHeader=None
cookieJar=cookielib.LWPCookieJar()
def __init__(self):
self.init_done=False
def getUrl(self,url, ischunkDownloading=False):
try:
post=None
print 'url',url
openner = urllib2.build_opener(urllib2.HTTPHandler, urllib2.HTTPSHandler)
#cookie_handler = urllib2.HTTPCookieProcessor(self.cookieJar)
#openner = urllib2.build_opener(cookie_handler, urllib2.HTTPBasicAuthHandler(), urllib2.HTTPHandler())
if post:
req = urllib2.Request(url, post)
else:
req = urllib2.Request(url)
ua_header=False
if self.clientHeader:
for n,v in self.clientHeader:
req.add_header(n,v)
if n=='User-Agent':
ua_header=True
if not ua_header:
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64; rv:30.0) Gecko/20100101 Firefox/30.0')
#response = urllib2.urlopen(req)
if self.proxy and ( (not ischunkDownloading) or self.use_proxy_for_chunks ):
req.set_proxy(self.proxy, 'http')
response = openner.open(req)
data=response.read()
return data
except:
print 'Error in getUrl'
traceback.print_exc()
return None
def _write_flv_header2(self, stream):
"""Writes the FLV header and the metadata to stream"""
# FLV header
stream.write(b'FLV\x01')
stream.write(b'\x01')
stream.write(b'\x00\x00\x00\x09')
# FLV File body
stream.write(b'\x00\x00\x00\x09')
def _write_flv_header(self, stream, metadata):
"""Writes the FLV header and the metadata to stream"""
# FLV header
stream.write(b'FLV\x01')
stream.write(b'\x05')
stream.write(b'\x00\x00\x00\x09')
# FLV File body
stream.write(b'\x00\x00\x00\x00')
# FLVTAG
if metadata:
stream.write(b'\x12') # Script data
stream.write(pack('!L',len(metadata))[1:]) # Size of the metadata with 3 bytes
stream.write(b'\x00\x00\x00\x00\x00\x00\x00')
stream.write(metadata)
# All this magic numbers have been extracted from the output file
# produced by AdobeHDS.php (https://github.com/K-S-V/Scripts)
stream.write(b'\x00\x00\x01\x73')
def init(self, out_stream, url, proxy=None,use_proxy_for_chunks=True,g_stopEvent=None, maxbitrate=0, auth=''):
try:
self.init_done=False
self.total_frags=0
self.init_url=url
self.clientHeader=None
self.status='init'
self.proxy = proxy
self.auth=auth
#self.auth="pvtoken=exp%3D9999999999%7Eacl%3D%252f%252a%7Edata%3DZXhwPTE0MDYzMDMxMTV+YWNsPSUyZip+ZGF0YT1wdmMsc35obWFjPWQxODA5MWVkYTQ4NDI3NjFjODhjOWQwY2QxNTk3YTI0MWQwOWYwNWI1N2ZmMDE0ZjcxN2QyMTVjZTJkNmJjMDQ%3D%2196e4sdLWrezE46RaCBzzP43/LEM5en2KujAosbeDimQ%3D%7Ehmac%3DACF8A1E4467676C9BCE2721CA5EFF840BD6ED1780046954039373A3B0D942ADC&hdntl=exp=1406303115~acl=%2f*~data=hdntl~hmac=4ab96fa533fd7c40204e487bfc7befaf31dd1f49c27eb1f610673fed9ff97a5f&als=0,2,0,0,0,NaN,0,0,0,37,f,52293145.57,52293155.9,t,s,GARWLHLMHNGA,2.11.3,37&hdcore=2.11.3"
if self.auth ==None or self.auth =='None' :
self.auth=''
if self.proxy and len(self.proxy)==0:
self.proxy=None
self.use_proxy_for_chunks=use_proxy_for_chunks
self.out_stream=out_stream
self.g_stopEvent=g_stopEvent
self.maxbitrate=maxbitrate
if '|' in url:
sp = url.split('|')
url = sp[0]
self.clientHeader = sp[1]
self.clientHeader= urlparse.parse_qsl(self.clientHeader)
print 'header recieved now url and headers are',url, self.clientHeader
self.status='init done'
self.url=url
#self.downloadInternal( url)
return self.preDownoload()
#os.remove(self.outputfile)
except:
traceback.print_exc()
self.status='finished'
return False
def preDownoload(self):
global F4Mversion
try:
self.seqNumber=0
self.live=False #todo find if its Live or not
man_url = self.url
url=self.url
print 'Downloading f4m manifest'
manifest = self.getUrl(man_url)#.read()
if not manifest:
return False
print len(manifest)
try:
print manifest
except: pass
self.status='manifest done'
#self.report_destination(filename)
#dl = ReallyQuietDownloader(self.ydl, {'continuedl': True, 'quiet': True, 'noprogress':True})
version_fine="xmlns=\".*?\/([0-9].*?)\""
F4Mversion =re.findall(version_fine, manifest)[0]
#print F4Mversion,_add_ns('media')
auth_patt='<pv-2.0>(.*?)<'
auth_obj =re.findall(auth_patt, manifest)
self.auth20=''
if auth_obj and len(auth_obj)>0:
self.auth20=auth_obj[0] #not doing anything for time being
print 'auth',self.auth,self.auth20
#quick for one example where the xml was wrong.
if '\"bootstrapInfoId' in manifest:
manifest=manifest.replace('\"bootstrapInfoId','\" bootstrapInfoId')
doc = etree.fromstring(manifest)
print doc
# Added the-one 05082014
# START
# Check if manifest defines a baseURL tag
baseURL_tag = doc.find(_add_ns('baseURL'))
if baseURL_tag != None:
man_url = baseURL_tag.text
url = man_url
self.url = url
print 'base url defined as: %s' % man_url
# END
try:
#formats = [(int(f.attrib.get('bitrate', -1)),f) for f in doc.findall(_add_ns('media'))]
formats=[]
for f in doc.findall(_add_ns('media')):
vtype=f.attrib.get('type', '')
if f.attrib.get('type', '')=='video' or vtype=='' :
formats.append([int(f.attrib.get('bitrate', -1)),f])
print 'format works',formats
except:
formats=[(int(0),f) for f in doc.findall(_add_ns('media'))]
#print 'formats',formats
formats = sorted(formats, key=lambda f: f[0])
if self.maxbitrate==0:
rate, media = formats[-1]
elif self.maxbitrate==-1:
rate, media = formats[0]
else: #find bitrate
brselected=None
rate, media=None,None
for r, m in formats:
if r<=self.maxbitrate:
rate, media=r,m
else:
break
if media==None:
rate, media = formats[-1]
dest_stream = self.out_stream
print 'rate selected',rate
self.metadata=None
try:
self.metadata = base64.b64decode(media.find(_add_ns('metadata')).text)
print 'metadata stream read done'#,media.find(_add_ns('metadata')).text
#self._write_flv_header(dest_stream, metadata)
#dest_stream.flush()
except: pass
# Modified the-one 05082014
# START
# url and href can be used interchangeably
# so if url attribute is not present
# check for href attribute
try:
mediaUrl=media.attrib['url']
except:
mediaUrl=media.attrib['href']
# END
# Added the-one 05082014
# START
# if media url/href points to another f4m file
if '.f4m' in mediaUrl:
sub_f4m_url = join(man_url,mediaUrl)
print 'media points to another f4m file: %s' % sub_f4m_url
print 'Downloading f4m sub manifest'
sub_manifest = self.getUrl(sub_f4m_url)#.read()
if not sub_manifest:
return False
print len(sub_manifest)
try:
print sub_manifest
except: pass
self.status='sub manifest done'
F4Mversion =re.findall(version_fine, sub_manifest)[0]
doc = etree.fromstring(sub_manifest)
print doc
media = doc.find(_add_ns('media'))
if media == None:
return False
try:
self.metadata = base64.b64decode(media.find(_add_ns('metadata')).text)
print 'metadata stream read done'
except: pass
try:
mediaUrl=media.attrib['url']
except:
mediaUrl=media.attrib['href']
# END
try:
bootStrapID = media.attrib['bootstrapInfoId']
except: bootStrapID='xx'
#print 'mediaUrl',mediaUrl
base_url = join(man_url,mediaUrl)#compat_urlparse.urljoin(man_url,media.attrib['url'])
if mediaUrl.endswith('/') and not base_url.endswith('/'):
base_url += '/'
self.base_url=base_url
bsArray=doc.findall(_add_ns('bootstrapInfo'))
print 'bootStrapID',bootStrapID
#bootStrapID='bootstrap_450'
bootstrap=self.getBootStrapWithId(bsArray,bootStrapID)
if bootstrap==None: #if not available then find any!
print 'bootStrapID NOT Found'
bootstrap=doc.findall(_add_ns('bootstrapInfo'))[0]
else:
print 'found bootstrap with id',bootstrap
#print 'bootstrap',bootstrap
bootstrapURL1=''
try:
bootstrapURL1=bootstrap.attrib['url']
except: pass
bootstrapURL=''
bootstrapData=None
queryString=None
if bootstrapURL1=='':
bootstrapData=base64.b64decode(doc.findall(_add_ns('bootstrapInfo'))[0].text)
#
else:
from urlparse import urlparse
queryString = urlparse(url).query
print 'queryString11',queryString
if len(queryString)==0: queryString=None
if queryString==None or '?' in bootstrap.attrib['url']:
bootstrapURL = join(man_url,bootstrap.attrib['url'])# take out querystring for later
queryString = urlparse(bootstrapURL).query
print 'queryString override',queryString
if len(queryString)==0:
queryString=None
if len(self.auth)>0:
bootstrapURL+='?'+self.auth
queryString=self.auth#self._pv_params('',self.auth20)#not in use
else:
print 'queryString!!',queryString
bootstrapURL = join(man_url,bootstrap.attrib['url'])+'?'+queryString
if len(self.auth)>0:
authval=self.auth#self._pv_params('',self.auth20)#not in use
bootstrapURL = join(man_url,bootstrap.attrib['url'])+'?'+authval
queryString=authval
print 'bootstrapURL',bootstrapURL
if queryString==None:
queryString=''
self.bootstrapURL=bootstrapURL
self.queryString=queryString
self.bootstrap, self.boot_info, self.fragments_list,self.total_frags=self.readBootStrapInfo(bootstrapURL,bootstrapData)
self.init_done=True
return True
except:
traceback.print_exc()
return False
def keep_sending_video(self,dest_stream, segmentToStart=None, totalSegmentToSend=0):
try:
self.status='download Starting'
self.downloadInternal(self.url,dest_stream,segmentToStart,totalSegmentToSend)
except:
traceback.print_exc()
self.status='finished'
def downloadInternal(self,url,dest_stream ,segmentToStart=None,totalSegmentToSend=0):
global F4Mversion
try:
#dest_stream = self.out_stream
queryString=self.queryString
print 'segmentToStart',segmentToStart
if self.live or segmentToStart==0 or segmentToStart==None:
print 'writing metadata'#,len(self.metadata)
self._write_flv_header(dest_stream, self.metadata)
dest_stream.flush()
#elif segmentToStart>0 and not self.live:
# self._write_flv_header2(dest_stream)
# dest_stream.flush()
url=self.url
bootstrap, boot_info, fragments_list,total_frags=(self.bootstrap, self.boot_info, self.fragments_list,self.total_frags)
print boot_info, fragments_list,total_frags
self.status='bootstrap done'
self.status='file created'
self.downloaded_bytes = 0
self.bytes_in_disk = 0
self.frag_counter = 0
start = time.time()
frags_filenames = []
self.seqNumber=0
if segmentToStart and not self.live :
self.seqNumber=segmentToStart
if self.seqNumber>=total_frags:
self.seqNumber=total_frags-1
#for (seg_i, frag_i) in fragments_list:
#for seqNumber in range(0,len(fragments_list)):
self.segmentAvailable=0
frameSent=0
while True:
#if not self.live:
# _write_flv_header2
if self.g_stopEvent and self.g_stopEvent.isSet():
return
seg_i, frag_i=fragments_list[self.seqNumber]
self.seqNumber+=1
frameSent+=1
name = u'Seg%d-Frag%d' % (seg_i, frag_i)
#print 'base_url',base_url,name
url = self.base_url + name
if queryString and '?' not in url:
url+='?'+queryString
#print(url),base_url,name
#frag_filename = u'%s-%s' % (tmpfilename, name)
#success = dl._do_download(frag_filename, {'url': url})
print 'downloading....',url
success=False
urlTry=0
while not success and urlTry<5:
success = self.getUrl(url,True)
if not success: xbmc.sleep(300)
urlTry+=1
print 'downloaded',not success==None,url
if not success:
return False
#with open(frag_filename, 'rb') as down:
if 1==1:
down_data = success#down.read()
reader = FlvReader(down_data)
while True:
_, box_type, box_data = reader.read_box_info()
print 'box_type',box_type,len(box_data)
#if box_type == b'afra':
# dest_stream.write(box_data)
# dest_stream.flush()
# break
if box_type == b'mdat':
isDrm=True if ord(box_data[0])&1 else False
#print 'isDrm',isDrm,repr(box_data)
if 1==2 and isDrm:
print 'drm',repr(box_data[1:17])
box_data=box_data[17:]
dest_stream.write(box_data)
dest_stream.flush()
break
# Using the following code may fix some videos, but
# only in mplayer, VLC won't play the sound.
# mdat_reader = FlvReader(box_data)
# media_type = mdat_reader.read_unsigned_char()
# while True:
# if mdat_reader.read_unsigned_char() == media_type:
# if mdat_reader.read_unsigned_char() == 0x00:
# break
# dest_stream.write(pack('!B', media_type))
# dest_stream.write(b'\x00')
# dest_stream.write(mdat_reader.read())
# break
self.status='play'
if self.seqNumber==len(fragments_list) or (totalSegmentToSend>0 and frameSent==totalSegmentToSend):
if not self.live:
break
self.seqNumber=0
#todo if the url not available then get manifest and get the data again
total_frags=None
try:
bootstrap, boot_info, fragments_list,total_frags=self.readBootStrapInfo(self.bootstrapURL,None,updateMode=True,lastSegment=seg_i, lastFragement=frag_i)
except:
traceback.print_exc()
pass
if total_frags==None:
break
del self.downloaded_bytes
del self.frag_counter
except:
traceback.print_exc()
return
def getBootStrapWithId (self,BSarray, id):
try:
for bs in BSarray:
print 'compare val is ',bs.attrib['id'], 'id', id
if bs.attrib['id']==id:
print 'gotcha'
return bs
except: pass
return None
def readBootStrapInfo(self,bootstrapUrl,bootStrapData, updateMode=False, lastFragement=None,lastSegment=None):
try:
retries=0
while retries<=5:
if self.g_stopEvent and self.g_stopEvent.isSet():
return
if not bootStrapData:
bootStrapData =self.getUrl(bootstrapUrl)
if bootStrapData==None:
retries+=1
continue
#print 'bootstrapData',len(bootStrapData)
bootstrap = bootStrapData#base64.b64decode(bootStrapData)#doc.findall(_add_ns('bootstrapInfo'))[0].text)
#print 'boot stream read done'
boot_info,self.live = read_bootstrap_info(bootstrap)
#print 'boot_info read done',boot_info
newFragement=None
if not lastFragement==None:
newFragement=lastFragement+1
fragments_list = build_fragments_list(boot_info,newFragement,self.live)
total_frags = len(fragments_list)
#print 'fragments_list',fragments_list, newFragement
#print lastSegment
if updateMode and (len(fragments_list)==0 or ( newFragement and newFragement>fragments_list[0][1])):
#todo check lastFragement to see if we got valid data
print 'retrying......'
bootStrapData=None
retries+=1
xbmc.sleep(2000)
continue
return bootstrap, boot_info, fragments_list,total_frags
except:
traceback.print_exc()
def _pv_params(self, pvswf, pv):
"""Returns any parameters needed for Akamai HD player verification.
Algorithm originally documented by KSV, source:
http://stream-recorder.com/forum/showpost.php?p=43761&postcount=13
"""
pv="ZXhwPTE0MDYyODMxOTF+YWNsPSUyZip+ZGF0YT1wdmMsc35obWFjPTgwNTA0N2E1Yjk5ZmFjMjMzMDY0N2MxMzkyNGM0MDNiYzY1YjZmYzgyYTZhMjYyZDIxNDdkZTExZjI1MzQ5ZDI=;hdntl=exp=1406283191~acl=%2f*~data=hdntl~hmac=b65dc0c5ae60570f105984f0cc5ec6ce3a51422a7a1442e09f55513718ba80bf"
(data, hdntl) = pv.split(";")
SWF_VERIFICATION_KEY = b"Genuine Adobe Flash Player 001"
#SWF_VERIFICATION_KEY=binascii.unhexlify("9b673b13fa4682ed14c3cfa5af5310274b514c4133e9b3a81e6e3aba009l2564")
SWF_VERIFICATION_KEY = binascii.unhexlify(b"BD938D5EE6D9F42016F9C56577B6FDCF415FE4B184932B785AB32BCADC9BB592")
swf = self.getUrl('http://www.wat.tv/images/v70/PlayerLite.swf',True)
#AKAMAIHD_PV_KEY = unhexlify(b"BD938D5EE6D9F42016F9C56577B6FDCF415FE4B184932B785AB32BCADC9BB592")
AKAMAIHD_PV_KEY = "9b673b13fa4682ed14c3cfa5af5310274b514c4133e9b3a81e6e3aba009l2564"
hash = hashlib.sha256()
hash.update(self.swfdecompress(swf))
hash = base64.b64encode(hash.digest()).decode("ascii")
print 'hash',hash
hash="96e4sdLWrezE46RaCBzzP43/LEM5en2KujAosbeDimQ="
print 'hash',hash
#data="ZXhwPTE0MDYyMDQ3NjB+YWNsPSUyZip+ZGF0YT1wdmMsc35obWFjPWEzMjBlZDI5YjI1MDkwN2ExODcyMTJlOWJjNGFlNGUzZjA3MTM3ODk1ZDk4NmI2ZDVkMzczNzNhYzNiNDgxOWU="
msg = "exp=9999999999~acl=%2f%2a~data={0}!{1}".format(data, hash)
auth = hmac.new(AKAMAIHD_PV_KEY, msg.encode("ascii"), sha256)
pvtoken = "{0}~hmac={1}".format(msg, auth.hexdigest())
# The "hdntl" parameter can be accepted as a cookie or passed in the
# query string, but the "pvtoken" parameter can only be in the query
# string
print 'pvtoken',pvtoken
#return "pvtoken={}&{}".format(
#urlencode_param(pvtoken), urlencode_param(hdntl))
params=urllib.urlencode({'pvtoken':pvtoken})+'&'+hdntl+'&hdcore=2.11.3'
#params='pvtoken=exp%3D9999999999%7Eacl%3D%252f%252a%7Edata%3DZXhwPTE0MDYwNDMzOTN+YWNsPSUyZip+ZGF0YT1wdmMsc35obWFjPWQxMTk0ZDc4NDExMDYwNjZlNDI5OWU2NTc3ODA0Mzk0ODU5NGZiMDQ5Njk2OGNiYzJiOGU2OTI2MjIzMjczZTA%3D%2196e4sdLWrezE46RaCBzzP43/LEM5en2KujAosbeDimQ%3D%7Ehmac%3D1BE9DEB8262AB4886A0CB9E8376D04652F015751B88DD3D2201DE463D9E47733&hdntl=exp=1406043393~acl=%2f*~data=hdntl~hmac=28d5e28f47b7b3821fafae0250ba37091f2fc66d1a9d39b76b925c423458c537'+'&hdcore=2.11.3'
#php AdobeHDS.php --manifest "http://nt1livhdsweb-lh.akamaihd.net/z/live_1@90590/manifest.f4m?hdnea=st=1405958620~exp=1405960420~acl=/*~hmac=5ca0d2521a99c897fb9ffaf6ed9c2e40e5d0300cdcdd9dfb7302d9e32a84f98d&hdcore=2.11.3&g=VQYTYCFRUDRA"
#params="pvtoken=exp%3D9999999999%7Eacl%3D%252f%252a%7Edata%3DZXhwPTE0MDYwNDUwNDZ+YWNsPSUyZip+ZGF0YT1wdmMsc35obWFjPWYwYWQ5ZGQyNDJlYjdiYjQ2YmZhMzk3MjY3MzE0ZWZiOWVlYTY5MDMzYWE2ODM5ZDM1ZWVjMWM1ZDUzZTk3ZjA%3D%2196e4sdLWrezE46RaCBzzP43/LEM5en2KujAosbeDimQ%3D%7Ehmac%3D9FCCB6BC90C17E8057EE52CD53DDF0C6D07B20638D68B8FFCE98ED74153AA960&hdntl=exp=1406045046~acl=%2f*~data=hdntl~hmac=11e323633ad708a11e57a91e8c685011292f42936f5f7f3b1cb0fb8d2266586a&als=0,2,0,0,0,NaN,0,0,0,52,f,52035079.57,52035089.9,t,s,VQYTYCFRUDRA,2.11.3,52&hdcore=2.11.3"
#--useragent "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:30.0) Gecko/20100101 Firefox/30.0"
#+'&als=0,2,0,0,0,NaN,0,0,0,47,f,52018363.57,52018373.9,t,s,HPFXDUMCMNPG,2.11.3,47&hdcore=2.11.3'
params=params.replace('%2B','+')
params=params.replace('%2F','/')
#params='pvtoken=' +pvtoken+'&'+hdntl
#params = [("pvtoken", pvtoken)]
#params.extend(parse_qsl(hdntl, keep_blank_values=True))
#params='pvtoken=exp%3D9999999999%7Eacl%3D%252f%252a%7Edata%3DZXhwPTE0MDYwMzc2Njl+YWNsPSUyZip+ZGF0YT1wdmMsc35obWFjPWZjYzY5OTVkYjE5ODIxYTJlNDM4YTdhMWNmZjMyN2RhNTViOWNhMWM4NjZhZjYxM2ZkNDI4MTMwNjU4MjFjMjM%3D%2196e4sdLWrezE46RaCBzzP43/LEM5en2KujAosbeDimQ%3D%7Ehmac%3DFA3BCC1CF6466CAFFCC6EF5CB2855ED065F36687CBFCD11570B7D702F71F10A6&hdntl=exp=1406037669~acl=%2f*~data=hdntl~hmac=4ab5ad38849b952ae93721af7451936b4c5906258d575eda11e52a05f78c7d75&als=0,2,0,0,0,NaN,0,0,0,96,f,52027699.57,52027709.89,t,s,RUIDLGQGDHVH,2.11.3,90&hdcore=2.11.3'
#print '_pv_params params',params
print params
print "pvtoken=exp%3D9999999999%7Eacl%3D%252f%252a%7Edata%3DZXhwPTE0MDYyODMxOTF+YWNsPSUyZip+ZGF0YT1wdmMsc35obWFjPTgwNTA0N2E1Yjk5ZmFjMjMzMDY0N2MxMzkyNGM0MDNiYzY1YjZmYzgyYTZhMjYyZDIxNDdkZTExZjI1MzQ5ZDI%3D%2196e4sdLWrezE46RaCBzzP43/LEM5en2KujAosbeDimQ%3D%7Ehmac%3D47A2B2AA9570ECFB37966C884174D608D86A7DE2466DE7EB48A6F118A155BD80&hdntl=exp=1406283191~acl=%2f*~data=hdntl~hmac=b65dc0c5ae60570f105984f0cc5ec6ce3a51422a7a1442e09f55513718ba80bf"
return "pvtoken=exp%3D9999999999%7Eacl%3D%252f%252a%7Edata%3DZXhwPTE0MDYzMDMxMTV+YWNsPSUyZip+ZGF0YT1wdmMsc35obWFjPWQxODA5MWVkYTQ4NDI3NjFjODhjOWQwY2QxNTk3YTI0MWQwOWYwNWI1N2ZmMDE0ZjcxN2QyMTVjZTJkNmJjMDQ%3D%2196e4sdLWrezE46RaCBzzP43/LEM5en2KujAosbeDimQ%3D%7Ehmac%3DACF8A1E4467676C9BCE2721CA5EFF840BD6ED1780046954039373A3B0D942ADC&hdntl=exp=1406303115~acl=%2f*~data=hdntl~hmac=4ab96fa533fd7c40204e487bfc7befaf31dd1f49c27eb1f610673fed9ff97a5f&als=0,2,0,0,0,NaN,0,0,0,37,f,52293145.57,52293155.9,t,s,GARWLHLMHNGA,2.11.3,37&hdcore=2.11.3"
return params
def swfdecompress(self,data):
if data[:3] == b"CWS":
data = b"F" + data[1:8] + zlib.decompress(data[8:])
return data
|
sshnaidm/ru
|
script.video.F4mProxy/lib/f4mDownloader.py
|
Python
|
gpl-2.0
| 38,716
|
# /*
# * File: shuffle.cpp
# * -----------------
# * Implementation of the functions in shuffle.h.
# * See shuffle.h for documentation of each function.
# *
# * @author Marty Stepp
# * @version 2014/10/08
# * - removed 'using namespace' statement
# * @since 2014/02/01
# */
#include "shuffle.h"
def shuffle_string(str):
"""Randomly reararnges the characters of the given string and returns the rearranged version."""
pass
# for (int i = 0, length = s.length(); i < length; i++) {
# int j = randomInteger(i, length - 1);
# if (i != j) {
# std::string::value_type temp = s[i];
# s[i] = s[j];
# s[j] = temp;
# }
# }
# return s;
# }
|
SarahPythonista/acmpy
|
spgl/datastructures/shuffle.py
|
Python
|
mit
| 721
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
## src/lastfm.py
##
## Copyright (C) 2007 Olivier Mehani <shtrom AT ssji.net>
## Yann Leboulanger <asterix AT lagaule.org>
## Copyright (C) 2007-2008 Jean-Marie Traissard <jim AT lapin.org>
##
## This file is part of Gajim.
##
## Gajim is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published
## by the Free Software Foundation; version 3 only.
##
## Gajim is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Gajim. If not, see <http://www.gnu.org/licenses/>.
##
__version__ = '$Revision: 64 $'
from urllib import urlopen
from xml.dom import minidom
from time import time
class LastFM:
# Where to fetch the played song information
LASTFM_FORMAT_URL = \
'http://ws.audioscrobbler.com/1.0/user/%s/recenttracks.xml'
# Delay in seconds after which the last song entry is considered too old tox
# be displayed.
MAX_DELAY = 600
ARTIST = 0
NAME = 1
ALBUM = 2
TIME = 3
def __init__(self, username, proxies=None):
"""
Create a new LastFM object.
username, the Last.fm username
proxies, the list of proxies to use to connect to the Last.fm data, as
expected by urllib.urlopen()
"""
self.setUsername(username)
self._proxies = proxies
self.scrobbling = False
self.updateData()
def __str__(self):
return 'Last.fm song tracker for user %s.%s' % (self._username,
self.formatSongTitle(
' Last song was \"%(n)s\" by \"%(a)s\" in album \"%(b)s\".'))
def getUsername(self):
return self._username
def setUsername(self, username):
self._username = username
self.lastSongs = []
def updateData(self):
"""
Fetch the last recent tracks list and update the object accordingly.
Return True if the last played time has changed, False otherwise.
"""
try:
xmldocument = urlopen(self.LASTFM_FORMAT_URL % self._username,
self._proxies)
xmltree = minidom.parse(xmldocument)
except Exception:
print 'Error parsing XML from Last.fm...'
return False
if xmltree.childNodes.length != 1:
raise Exception('XML document not formed as expected')
recenttracks = xmltree.childNodes[0]
tracklist = recenttracks.getElementsByTagName('track')
# do not update if nothing more has been scrobbled since last time
if len(tracklist) > 0 and \
int(tracklist[0].getElementsByTagName('date')[0].
getAttribute('uts')) != self.getLastScrobbledTime():
self.lastSongs = []
for track in tracklist:
artistNode = track.getElementsByTagName('artist')[0]
if artistNode.firstChild:
artist = artistNode.firstChild.data
else:
artist = None
nameNode = track.getElementsByTagName('name')[0]
if nameNode.firstChild:
name = nameNode.firstChild.data
else:
name = None
albumNode = track.getElementsByTagName('album')[0]
if albumNode.firstChild:
album = albumNode.firstChild.data
else:
album = None
timeNode = track.getElementsByTagName('date')[0]
self.lastSongs.append((artist, name, album,
int(timeNode.getAttribute('uts'))))
self.scrobbling = True
return True
# if nothing has been scrobbled for too long, an update to the
# "currently" playing song should be made
if self.scrobbling and not self.lastSongIsRecent():
self.scrobbling = False
return True
return False
def getLastSong(self):
"""
Return the last played song as a tuple of (ARTIST, SONG, ALBUM, TIME).
"""
if len(self.lastSongs) < 1:
return None
return self.lastSongs[0]
def getLastScrobbledTime(self):
"""
Return the Unix time the last song was played.
"""
if len(self.lastSongs) < 1:
return 0
return self.lastSongs[0][self.TIME]
def timeSinceLastScrobbled(self, lst=None):
"""
Return the time in seconds since the last song has been scrobbled.
lst, the Unix time at which a song has been scrobbled, defaults to that
of the last song
"""
if lst is None:
lst = self.getLastScrobbledTime()
return int(time()) - lst
def lastSongIsRecent(self, delay=None):
"""
Return a boolean stating whether the last song has been played less
the specified delay earlier.
delay, the delay to use, defaults to self.MAX_DELAY
"""
if delay is None:
delay = self.MAX_DELAY
return self.timeSinceLastScrobbled() < delay
def getLastRecentSong(self, delay=None):
"""
Return the last *recently* played song.
"Recently" means that the song has been played less than delay
earlier.
delay, the delay to use, see lastSongIsRecent for the semantics
"""
self.updateData()
if self.lastSongIsRecent(delay):
return self.getLastSong()
return None
def formatSongTitle(self, formatString='%(a)s - %(n)s', songTuple=None):
"""
Format a song tuple according to a format string. This makes use of the
basic Python string formatting operations.
formatString, the string according to which the song should be formated:
"%(a)s" is replaced by the artist;
"%(n)s" is replaced by the name of the song;
"%(b)s" is replaced by the album;
defaults to "%s - %t".
songTuple, the tuple representing the song, defaults to the last song
"""
str = ''
if songTuple is None:
songTuple = self.getLastRecentSong()
if songTuple is not None:
dict = {
'a': songTuple[0],
'n': songTuple[1],
'b': songTuple[2]
}
str = formatString % dict
return str
# Fallback if the script is called directly
if __name__ == '__main__':
from sys import argv
from time import sleep
if len(argv) != 2:
raise Exception('Incorrect number of arguments. Only the Last.fm username is required.')
lfm = LastFM(argv[1])
print lfm
while True:
if lfm.updateData():
print lfm.formatSongTitle()
sleep(60)
# vim: se ts=3:
|
sgala/gajim
|
src/lastfm.py
|
Python
|
gpl-3.0
| 6,006
|
"""
Django settings for files project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
import socket
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/dev/howto/deployment/checklist/
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get('DEBUG', '1') == '1'
# SECURITY WARNING: keep the secret key used in production secret!
if DEBUG:
SECRET_KEY = 'n4k+i6&sp9%)5thl!#5pfd#htmp+8**=ken+*w*kj4sa_^0mnn'
else:
with open(os.path.join(BASE_DIR, '..', 'key')) as key_file:
SECRET_KEY = key_file.read()
TEMPLATE_DEBUG = DEBUG
if DEBUG:
ALLOWED_HOSTS = []
else:
ALLOWED_HOSTS = ['files.%s' % socket.gethostname()]
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'files.urls'
WSGI_APPLICATION = 'files.wsgi.application'
# Database
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
if DEBUG:
db = {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
else:
db = {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'HOST': '',
'NAME': 'files',
'USER': 'files',
}
DATABASES = {
'default': db
}
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/dev/howto/static-files/
STATIC_URL = '/static/'
if not DEBUG:
STATIC_ROOT = os.environ['STATIC_ROOT']
# Media files
MEDIA_URL = '/f/'
if DEBUG:
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
else:
MEDIA_ROOT = os.environ['MEDIA_ROOT']
|
swarmer/files
|
files/settings.py
|
Python
|
mit
| 2,671
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from twisted.trial import unittest
from twisted.internet import defer
from buildbot.changes import base
import buildbot.status.web.change_hook as change_hook
from buildbot.test.fake.web import FakeRequest
from buildbot.changes.manager import ChangeManager
class TestPollingChangeHook(unittest.TestCase):
class Subclass(base.PollingChangeSource):
pollInterval = None
called = False
def poll(self):
self.called = True
def setUpRequest(self, args, options=True):
self.changeHook = change_hook.ChangeHookResource(dialects={'poller' : options})
self.request = FakeRequest(args=args)
self.request.uri = "/change_hook/poller"
self.request.method = "GET"
master = self.request.site.buildbot_service.master
master.change_svc = ChangeManager(master)
self.changesrc = self.Subclass("example", None)
self.changesrc.setServiceParent(master.change_svc)
self.disabledChangesrc = self.Subclass("disabled", None)
self.disabledChangesrc.setServiceParent(master.change_svc)
anotherchangesrc = base.ChangeSource()
anotherchangesrc.setName("notapoller")
anotherchangesrc.setServiceParent(master.change_svc)
return self.request.test_render(self.changeHook)
@defer.inlineCallbacks
def test_no_args(self):
yield self.setUpRequest({})
self.assertEqual(self.request.written, "no changes found")
self.assertEqual(self.changesrc.called, True)
self.assertEqual(self.disabledChangesrc.called, True)
@defer.inlineCallbacks
def test_no_poller(self):
yield self.setUpRequest({"poller": ["nosuchpoller"]})
expected = "Could not find pollers: nosuchpoller"
self.assertEqual(self.request.written, expected)
self.request.setResponseCode.assert_called_with(400, expected)
self.assertEqual(self.changesrc.called, False)
self.assertEqual(self.disabledChangesrc.called, False)
@defer.inlineCallbacks
def test_invalid_poller(self):
yield self.setUpRequest({"poller": ["notapoller"]})
expected = "Could not find pollers: notapoller"
self.assertEqual(self.request.written, expected)
self.request.setResponseCode.assert_called_with(400, expected)
self.assertEqual(self.changesrc.called, False)
self.assertEqual(self.disabledChangesrc.called, False)
@defer.inlineCallbacks
def test_trigger_poll(self):
yield self.setUpRequest({"poller": ["example"]})
self.assertEqual(self.request.written, "no changes found")
self.assertEqual(self.changesrc.called, True)
self.assertEqual(self.disabledChangesrc.called, False)
@defer.inlineCallbacks
def test_allowlist_deny(self):
yield self.setUpRequest({"poller": ["disabled"]}, options={"allowed": ["example"]})
expected = "Could not find pollers: disabled"
self.assertEqual(self.request.written, expected)
self.request.setResponseCode.assert_called_with(400, expected)
self.assertEqual(self.changesrc.called, False)
self.assertEqual(self.disabledChangesrc.called, False)
@defer.inlineCallbacks
def test_allowlist_allow(self):
yield self.setUpRequest({"poller": ["example"]}, options={"allowed": ["example"]})
self.assertEqual(self.request.written, "no changes found")
self.assertEqual(self.changesrc.called, True)
self.assertEqual(self.disabledChangesrc.called, False)
@defer.inlineCallbacks
def test_allowlist_all(self):
yield self.setUpRequest({}, options={"allowed": ["example"]})
self.assertEqual(self.request.written, "no changes found")
self.assertEqual(self.changesrc.called, True)
self.assertEqual(self.disabledChangesrc.called, False)
|
denny820909/builder
|
lib/python2.7/site-packages/buildbot-0.8.8-py2.7.egg/buildbot/test/unit/test_status_web_change_hooks_poller.py
|
Python
|
mit
| 4,543
|
import unittest
from malcolm.core import Controller, Process
from malcolm.modules.builtin.parts import ChoicePart
class TestChoicePart(unittest.TestCase):
def setUp(self):
self.o = ChoicePart(
name="cp", description="desc", choices=["a", "b"], value="a", writeable=True
)
self.c = Controller("mri")
self.c.add_part(self.o)
self.c.setup(Process("proc"))
def test_init(self):
assert self.o.name == "cp"
assert self.o.attr.value == "a"
assert self.o.attr.meta.description == "desc"
assert self.o.attr.meta.choices == ["a", "b"]
assert self.o.attr.meta.tags == ["widget:combo", "config:1"]
assert self.c.field_registry.fields[self.o] == [
("cp", self.o.attr, self.o.attr.set_value, False)
]
def test_setter(self):
assert self.o.attr.value == "a"
self.o.attr.set_value("b")
assert self.o.attr.value == "b"
with self.assertRaises(ValueError):
self.o.attr.set_value("c")
|
dls-controls/pymalcolm
|
tests/test_modules/test_builtin/test_choicepart.py
|
Python
|
apache-2.0
| 1,045
|
#! python3
# renameDates.py - Renames filenames with American MM-DD-YYYY date format
# to European DD-MM-YYYY.
import shutil, os, re
# Create a regex that matches files with the American date format.
datePattern = re.compile(r"""^(.*?) # all text before the date
((0|1)?\d)- # one or two digits for the month
((0|1|2|3)?\d)- # one or two digits for the day
((19|20)\d\d) # four digits for the year
(.*?)$ # all text after the date
""",re.VERBOSE)
# Loop over the files in the working directory.
for amerFilename in os.listdir('.'):
mo = datePattern.search(amerFilename)
# Skip files without a date.
if mo == None:
continue
# Get different parts of the filename.
beforePart = mo.group(1)
monthPart = mo.group(2)
dayPart = mo.group(4)
yearPart = mo.group(6)
afterPart = mo.group(8)
# Form the European-style filename.
euroFilename = beforePart + dayPart + '-' + monthPart + '-' + yearPart + afterPart
# Get the full, absolute file paths.
absWorkingDir = os.path.abspath('.')
amerFilename = os.path.join(absWorkingDir, amerFilename)
euroFilename = os.path.join(absWorkingDir,euroFilename)
# Rename the files.
print('Renameing "%s" to "%s"...' % (amerFilename, euroFilename))
shutil.move(amerFilename,euroFilename)
|
yehanghanGE/DirtyPython
|
renameDates.py
|
Python
|
gpl-3.0
| 1,447
|
import sys
from numpy import *
import numpy.matlib as matlib
def eprint(*v):
sys.stderr.write(" ".join(map(str, v)) + "\n")
class Image:
def __init__(self, w, h):
self.h, self.w = h, w
self.pixels = zeros((h, w, 3), uint8)
self.S = translate(w/2, h/2, 1) * scale(w/2, -h/2, 1)
def totga(self):
h, w = self.h, self.w
header = array([0, 0, 2, 0, 0, 0, 0, 0, 0, 0, h % 256, h / 256,
w % 256, w / 256, h % 256, h / 256, 24, 32], uint8)
return header.tostring() + self.pixels.tostring()
def vertex(self, pos, col=255):
o = self.S * pos
x, y, _, _ = map(int, o)
if not ((0 <= y < self.h) and (0 <= x < self.w)):
return
self.pixels[y][x] = col
def scale(x, y, z):
return matrix([[x,0,0,0],
[0,y,0,0],
[0,0,z,0],
[0,0,0,1]])
def translate(x, y, z):
T = matlib.identity(4)
T[3] = x, y, z, 1
return T.T
def project(v):
V = matrix(v)
V[0][0] /= V[2][0]
V[1][0] /= V[2][0]
return V
def vec(*c):
return matrix(c).T
def mapmul(A, Bs):
return [A * B for B in Bs]
def draw_points(M, ps, cs=None):
vs = map(project, mapmul(M, ps))
for i in xrange(len(vs)):
v = vs[i]
c = cs[i] if cs else 255
fb.vertex(v, c)
fb = Image(800,800)
if 0:
cube = array([
vec(-1,-1,-1, 1),
vec(-1,-1, 1, 1),
vec(-1, 1,-1, 1),
vec(-1, 1, 1, 1),
vec( 1,-1,-1, 1),
vec( 1,-1, 1, 1),
vec( 1, 1,-1, 1),
vec( 1, 1, 1, 1),
])
draw_points(translate(2, -2, 4) * scale(.25, .25, .25), cube)
else:
sphere = []
colors = []
I = 32
J = 4 * I
for i in xrange(I):
s = pi * i / I
y = cos(s)
r = sin(s)
for j in xrange(J):
t = 2 * pi * (j-J/2.) / (J/2.)
x = r * cos(t)
z = r * sin(t)
v = vec(x,y,z,1)
sphere.append(v)
c = 127 * (float(v.T * vec(0,0,-.8,1)))
colors.append(c)
draw_points(translate(0, 0, 2), sphere, colors)
sys.stdout.write(fb.totga())
|
mastensg/52
|
04/4.py
|
Python
|
isc
| 2,220
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2015-2018 by Exopy Authors, see AUTHORS for more details.
#
# Distributed under the terms of the BSD license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Payload to use when notifying the system about a container change.
"""
from atom.api import (Atom, Value, Str, List)
class ContainerChange(Atom):
"""Payload to use when notifying the system about a container change.
"""
#: Reference to object from which this event originate.
obj = Value()
#: Name of the modified container.
name = Str()
#: List of added entries. Should not be manipulated directly by user code.
#: Use the add_operation method to add operations.
added = List()
#: List of moved entries with their old and new positions. Should not be
#: manipulated directly by user code. Use the add_operation method to add
#: operations.
moved = List()
#: List of removed entries. Should not be manipulated directly by user
#: code. Use the add_operation method to add operations.
removed = List()
#: List of ContainerChange representing an ordered sequence of change.
collapsed = List()
#: Private member used to store the last kind of added operation.
_last_added = Value()
def add_operation(self, typ, op_desc):
"""Add an operation.
If two operations of different types they are represented by two
ContainerChange added in the collapsed list. Using this method ensure
that only one list is non empty. Consumer should always check the
collapsed list first.
Parameters
----------
typ : {'added', 'moved', removed'}
The type of operation to add to the change set.
op_desc : tuple
Tuple describing the operation it should be of the form:
- 'added' : (index, obj)
- 'moved' : (old_index, new_index, obj)
- 'removed' : (index, obj)
"""
# If we are already working with a collapsed change simply check the
# last one to see if we can append to its changes or create a new
# entry.
if self.collapsed:
if typ != self.collapsed[-1]._last_added:
self.collapsed.append(ContainerChange(obj=self.obj,
name=self.name))
self.collapsed[-1].add_operation(typ, op_desc)
return
if self._last_added and typ != self._last_added:
# Clone ourself and clean all lists
clone = ContainerChange(obj=self.obj, name=self.name,
added=self.added,
moved=self.moved, removed=self.removed,
_last_added=self._last_added)
del self.added, self.moved, self.removed
self.collapsed.append(clone)
# We are now in a collapsed state so add_operation will do its
# job
self.add_operation(typ, op_desc)
return
if typ not in ('moved', 'added', 'removed'):
msg = "typ argument must be in 'moved', 'added', 'removed' not {}"
raise ValueError(msg.format(typ))
if typ == 'moved':
if not len(op_desc) == 3:
raise ValueError('Moved operation should be described by :'
'(old, new, obj) not {}'.format(op_desc))
elif typ in ('added', 'removed'):
if not len(op_desc) == 2:
t = typ.capitalize()
raise ValueError(t + ' operation should be described by :'
'(index, obj) not {}'.format(op_desc))
# Otherwise simply append the operation.
getattr(self, typ).append(op_desc)
self._last_added = typ
|
Ecpy/ecpy
|
exopy/utils/container_change.py
|
Python
|
bsd-3-clause
| 4,033
|
from flask_wtf import FlaskForm
from kqueen_ui.api import get_service_client
from slugify import slugify
from wtforms import PasswordField, StringField
from wtforms.fields.html5 import EmailField
from wtforms.validators import DataRequired, Email, EqualTo, Length
class UserRegistrationForm(FlaskForm):
organization_name = StringField('Organization Name', validators=[DataRequired()])
email = EmailField('Email', validators=[Email()])
password_1 = PasswordField(
'Password',
validators=[
DataRequired(),
Length(min=6, message='Password must be at least 6 characters long.')
]
)
password_2 = PasswordField(
'Repeat Password',
validators=[
DataRequired(),
EqualTo('password_1', message='Passwords does not match.')
]
)
def validate(self):
if not FlaskForm.validate(self):
return False
# Cannot allow this Organization name, because it would cause issues on backend
if self.organization_name.data == 'global':
self.organization_name.errors.append('Cannot allow this Organization name for secret reasons, shush.')
return False
# TODO: remove these uniqueness checks after introduction of unique constraint
# in ETCD storage class on backend
client = get_service_client()
# Check if organization exists on backend
response = client.organization.list()
if response.status > 200:
self.organization_name.errors.append('Can not contact backend at this time.')
return False
organizations = response.data
organization_names = [org['name'] for org in organizations]
organization_namespaces = [o['namespace'] for o in organizations]
if self.organization_name.data in organization_names or slugify(self.organization_name.data) in organization_namespaces:
self.organization_name.errors.append('Organization {} already exists.'.format(self.organization_name.data))
return False
# Check if e-mail and username exists on backend
response = client.user.list()
if response.status > 200:
self.email.errors.append('Can not contact backend at this time.')
return False
users = response.data
user_emails = [u['email'] for u in users if 'email' in u]
if self.email.data in user_emails:
self.email.errors.append('This e-mail is already registered.')
return False
user_usernames = [u['username'] for u in users]
if self.email.data in user_usernames:
self.email.errors.append('This username is already registered.')
return False
return True
|
atengler/kqueen-ui
|
kqueen_ui/blueprints/registration/forms.py
|
Python
|
mit
| 2,776
|
# -*- coding: utf-8 -*-
"""The enemy Sprite. With shooting capability.
"""
import random
import pygame
from . import bullet
from . import const
from . import misc
class Enemy(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self._images = misc.load_sliced_sprites(100, 70, "enemy_ng.png")
self.rect = self._images[0].get_rect()
self.rect.centerx = const.DISPLAY_WIDTH / 2
self.orig_image = self._images[0]
self._frame = 0
self.image = self._images[self._frame]
self.shooting = False
self.bullet = bullet.Bullet(self.rect.center, const.BULLET_DIR_DOWN)
self.bullets = pygame.sprite.RenderPlain()
def update(self):
if random.random() < 0.3:
if random.random() < 0.5:
self.rect.move_ip (3, 0)
else:
self.rect.move_ip (-3, 0)
if random.random() > 0.1:
self.shooting = True
self.shoot()
if self.shooting:
self._frame += 1
if self._frame >= len(self._images): self._frame = 0
self.image = self._images[self._frame]
def reset_bullet(self):
self.bullet.rect.center = self.rect.center
def shoot(self):
if not self.bullet.alive():
self.reset_bullet()
self.bullets.add(self.bullet)
|
jnumm/fuzzy-octo-wookie
|
octowookielib/enemy.py
|
Python
|
gpl-3.0
| 1,387
|
# scanner.py - tokenize Wisent's input
#
# Copyright (C) 2008, 2009, 2012 Jochen Voss <voss@seehuhn.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
def isascii(s):
return all(ord(c)<128 for c in s)
def conv(s):
if isinstance(s, unicode) and isascii(s):
return str(s)
else:
return s
def tokens(source):
"""Generator to read input and break it into tokens.
'Source' must iterate over the lines of input (it could for
example be a file-like object). The generator then yields
4-tuples consisting of a type string, a value, the line number
(starting with 1) and the column number (starting with 1): if the
type string is one of "token" or "string", the value is the
corresponding input character sequence. Otherwise both the type
string and the value are the same, single input character.
If the input ends in an unterminated string or comment, a
SyntaxError exception is raised.
"""
s = None
state = None
line = 1
for l in source:
l = l.expandtabs()
if not l.endswith("\n"):
l = l + '\n'
for col, c in enumerate(l):
if state == "skip":
state = None
elif state == "word":
if c.isalnum() or c in "-_":
s += c
else:
yield ("token", conv(s), line0, col0)
state = None
elif state == "string":
if c == '\\':
state = "quote"
elif c == sep:
yield ("string", conv(s), line0, col0)
state = "skip"
else:
s += c
elif state == "quote":
s += c
state = "string"
elif state == "comment" and c == '\n':
state = "skip"
if state is None:
line0 = line
col0 = col+1
if c == "'":
state = "string"
sep = "'"
s = ""
elif c == '"':
state = "string"
sep = '"'
s = ""
elif c.isalnum() or c == "_":
state = "word"
s = c
elif c == "#":
state = "comment"
elif c.isspace():
state = "skip"
else:
yield (conv(c), conv(c), line0, col0)
state = "skip"
line += 1
if state == "word":
yield ("token", conv(s), line0, col0)
elif state not in [ None, "skip", "comment" ]:
if l[-1] == '\n':
l = l[:-1]
msg = "unterminated string"
raise SyntaxError(msg, (source.name, line0, col0, l[-20:]))
|
seehuhn/wisent
|
scanner.py
|
Python
|
gpl-2.0
| 3,522
|
# Copyright (c) 2012 NTT DOCOMO, INC.
# Copyright (c) 2011-2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Ironic host manager.
This host manager will consume all cpu's, disk space, and
ram from a host / node as it is supporting Baremetal hosts, which can not be
subdivided into multiple instances.
"""
from oslo.config import cfg
from oslo.utils import timeutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
import nova.scheduler.base_baremetal_host_manager as bbhm
from nova.scheduler import host_manager
host_manager_opts = [
cfg.ListOpt('baremetal_scheduler_default_filters',
default=[
'RetryFilter',
'AvailabilityZoneFilter',
'ComputeFilter',
'ComputeCapabilitiesFilter',
'ImagePropertiesFilter',
'ExactRamFilter',
'ExactDiskFilter',
'ExactCoreFilter',
],
help='Which filter class names to use for filtering '
'baremetal hosts when not specified in the request.'),
cfg.BoolOpt('scheduler_use_baremetal_filters',
default=False,
help='Flag to decide whether to use '
'baremetal_scheduler_default_filters or not.'),
]
CONF = cfg.CONF
CONF.register_opts(host_manager_opts)
LOG = logging.getLogger(__name__)
class IronicNodeState(bbhm.BaseBaremetalNodeState):
"""Mutable and immutable information tracked for a host.
This is an attempt to remove the ad-hoc data structures
previously used and lock down access.
"""
def update_from_compute_node(self, compute):
"""Update information about a host from its compute_node info."""
super(IronicNodeState, self).update_from_compute_node(compute)
self.total_usable_disk_gb = compute['local_gb']
self.hypervisor_type = compute.get('hypervisor_type')
self.hypervisor_version = compute.get('hypervisor_version')
self.hypervisor_hostname = compute.get('hypervisor_hostname')
self.cpu_info = compute.get('cpu_info')
if compute.get('supported_instances'):
self.supported_instances = jsonutils.loads(
compute.get('supported_instances'))
self.updated = compute['updated_at']
def consume_from_instance(self, instance):
"""Consume nodes entire resources regardless of instance request."""
super(IronicNodeState, self).consume_from_instance(instance)
self.updated = timeutils.utcnow()
class IronicHostManager(bbhm.BaseBaremetalHostManager):
"""Ironic HostManager class."""
def __init__(self):
super(IronicHostManager, self).__init__()
if CONF.scheduler_use_baremetal_filters:
baremetal_default = CONF.baremetal_scheduler_default_filters
CONF.scheduler_default_filters = baremetal_default
def host_state_cls(self, host, node, **kwargs):
"""Factory function/property to create a new HostState."""
compute = kwargs.get('compute')
if compute and compute.get('cpu_info') == 'baremetal cpu':
return IronicNodeState(host, node, **kwargs)
else:
return host_manager.HostState(host, node, **kwargs)
|
vmthunder/nova
|
nova/scheduler/ironic_host_manager.py
|
Python
|
apache-2.0
| 3,918
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('login', '0003_auto_20151106_1320'),
]
operations = [
migrations.CreateModel(
name='User_Profiles',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('username', models.CharField(max_length=20)),
('email', models.CharField(max_length=50)),
('default_shipping_address', models.CharField(max_length=100)),
('phone_number', models.CharField(max_length=20)),
('tw_id', models.CharField(max_length=10)),
('real_name', models.CharField(max_length=10)),
],
),
migrations.RemoveField(
model_name='users',
name='default_shipping_address',
),
migrations.RemoveField(
model_name='users',
name='email',
),
migrations.RemoveField(
model_name='users',
name='phone_number',
),
migrations.RemoveField(
model_name='users',
name='real_name',
),
migrations.RemoveField(
model_name='users',
name='tw_id',
),
migrations.RemoveField(
model_name='users',
name='username',
),
migrations.AddField(
model_name='user_profiles',
name='user',
field=models.ForeignKey(to='login.Users'),
),
]
|
sonicyang/chiphub
|
login/migrations/0004_auto_20151108_1139.py
|
Python
|
mit
| 1,668
|
import base64
import json
import os
import pickle
import serial
import time
from flask import render_template, jsonify, url_for
from state import ACState
from tasks.ir import send_ir_command
from webapp import app, redis
BUTTON_DIR = os.path.join(os.path.dirname(__file__), 'button_json')
buttons = dict()
for button_file in os.listdir(BUTTON_DIR):
button_file = os.path.abspath(
os.path.join(BUTTON_DIR, button_file))
with open(button_file, 'r') as f:
button_data = json.load(f)
buttons[button_data['id']] = button_data
def get_state():
state = None
if redis.exists(app.config['REDIS_SETTINGS_KEY']):
state = pickle.loads(redis.get(
app.config['REDIS_SETTINGS_KEY']))
else:
state = ACState()
return state
def save_state(state):
redis.set(app.config['REDIS_SETTINGS_KEY'],
pickle.dumps(state))
@app.route("/")
def index():
global buttons
state = get_state()
config = {
'buttons': buttons.values(),
'state': state.export(),
'apiUrl': url_for('.do_button', button='PLACEHOLDER'),
}
return render_template('index.jade', config=config)
@app.route("/do-button/<button>/")
def do_button(button):
global buttons
state = get_state()
should_send = state.apply_button(button)
save_state(state)
button_data = buttons[button]
buf = bytearray(base64.b64decode(button_data['irdata']))
if should_send:
send_ir_command.delay(buf)
return jsonify(state.export())
@app.route("/state/")
def state():
state = get_state()
return jsonify(state.export())
|
wyattanderson/pywebir
|
webapp/views.py
|
Python
|
mit
| 1,652
|
#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
# Test the pylit.py literal python module
# =======================================
#
# :Date: $Date$
# :Version: SVN-Revision $Revision$
# :URL: $URL: svn+ssh://svn.berlios.de/svnroot/repos/pylit/trunk/test/pylit_test.py $
# :Copyright: 2006 Guenter Milde.
# Released under the terms of the GNU General Public License
# (v. 2 or later)
#
# .. contents::
from pprint import pprint
# import operator
from pylit import *
from pylit_elisp import *
import nose
# Test source samples
# ===================
#
code = {}
filtered_code = {}
text = {}
code["simple"] = [";; documentation::\n",
"\n",
"code_block\n"]
filtered_code["simple"] = code["simple"]
text["simple"] = ["documentation::\n",
"\n",
" code_block\n"]
code["section header"] = [";; \n", ";;;Commentary:\n"]
filtered_code["section header"] = [";; \n", ";; .. |elisp> ;;;Commentary:\n"]
text["section header"] = ["\n", ".. |elisp> ;;;Commentary:\n"]
# This example fails, as the rst-comment in the first text line is recognized
# as a leading code_block (header).
# code["section header"] = [";;;Commentary:\n"]
# filtered_code["section header"] = [";; .. |elisp> ;;;Commentary:\n"]
# text["section header"] = [".. |elisp> ;;;Commentary:\n"]
code["section"] = [";; \n",
";;;Commentary:\n",
";; This is\n",
";; a test."]
filtered_code["section"] = [";; \n",
";; .. |elisp> ;;;Commentary:\n",
";; This is\n",
";; a test."]
text["section"] = ["\n",
".. |elisp> ;;;Commentary:\n",
"This is\n",
"a test."]
def test_elisp_code_preprocessor():
"""test the code preprocessing filter"""
for key in code.keys():
data = code[key]
soll = filtered_code[key]
output = [line for line in elisp_code_preprocessor(data)]
print "ist %r (%s)"%(output, key)
print "soll %r (%s)"%(soll, key)
assert output == soll
def test_elisp_code_postprocessor():
"""test the code preprocessing filter"""
for key in code.keys():
data = filtered_code[key]
soll = code[key]
output = [line for line in elisp_code_postprocessor(data)]
print "ist %r (%s)"%(output, key)
print "soll %r (%s)"%(soll, key)
assert output == soll
def test_elisp_settings():
assert defaults.languages[".el"] == "elisp"
assert defaults.comment_strings["elisp"] == ';; '
assert defaults.preprocessors["elisp2text"] == elisp_code_preprocessor
assert defaults.postprocessors["text2elisp"] == elisp_code_postprocessor
def test_elisp2text():
for key in code.keys():
data = code[key]
soll = text[key]
converter = Code2Text(data, language="elisp")
output = converter()
print "ist %r (%s)"%(output, key)
print "soll %r (%s)"%(soll, key)
assert output == soll
class test_Code2Text(object):
def test_setup(self):
converter = Code2Text(text['simple'], language="elisp")
assert converter.preprocessor == elisp_code_preprocessor
class test_Text2Code(object):
def test_setup(self):
converter = Text2Code(text['simple'], language="elisp")
assert converter.postprocessor == elisp_code_postprocessor
def test_call_without_filter(self):
for key in code.keys():
data = text[key]
soll = filtered_code[key]
converter = Text2Code(data, comment_string=";; ")
output = converter()
print "ist %r (%s)"%(output, key)
print "soll %r (%s)"%(soll, key)
assert output == soll
def test_convert(self):
for key in code.keys():
data = text[key]
soll = filtered_code[key]
converter = Text2Code(data, language="elisp")
output = [line for line in converter.convert(data)]
print "ist %r (%s)"%(output, key)
print "soll %r (%s)"%(soll, key)
assert output == soll
def test_call_with_filter(self):
for key in code.keys():
data = text[key]
soll = code[key]
converter = Text2Code(data, language="elisp")
output = converter()
print "ist %r (%s)"%(output, key)
print "soll %r (%s)"%(soll, key)
assert output == soll
if __name__ == "__main__":
nose.runmodule() # requires nose 0.9.1
sys.exit()
|
tkarna/cofs
|
pylit/contribs/pylit_elisp_test.py
|
Python
|
mit
| 4,665
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding M2M table for field days on 'Group'
db.create_table('rainman_group_days', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('group', models.ForeignKey(orm['rainman.group'], null=False)),
('dayrange', models.ForeignKey(orm['rainman.dayrange'], null=False))
))
db.create_unique('rainman_group_days', ['group_id', 'dayrange_id'])
# Adding M2M table for field xdays on 'Group'
db.create_table('rainman_group_xdays', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('group', models.ForeignKey(orm['rainman.group'], null=False)),
('dayrange', models.ForeignKey(orm['rainman.dayrange'], null=False))
))
db.create_unique('rainman_group_xdays', ['group_id', 'dayrange_id'])
def backwards(self, orm):
# Removing M2M table for field days on 'Group'
db.delete_table('rainman_group_days')
# Removing M2M table for field xdays on 'Group'
db.delete_table('rainman_group_xdays')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'rainman.controller': {
'Meta': {'unique_together': "(('site', 'name'),)", 'object_name': 'Controller'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'max_on': ('django.db.models.fields.IntegerField', [], {'default': '3'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'controllers'", 'to': "orm['rainman.Site']"}),
'var': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
},
'rainman.day': {
'Meta': {'object_name': 'Day'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'rainman.dayrange': {
'Meta': {'object_name': 'DayRange'},
'days': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'ranges'", 'symmetrical': 'False', 'to': "orm['rainman.Day']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'rainman.daytime': {
'Meta': {'unique_together': "(('day', 'descr'),)", 'object_name': 'DayTime'},
'day': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'times'", 'to': "orm['rainman.Day']"}),
'descr': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'rainman.environmenteffect': {
'Meta': {'object_name': 'EnvironmentEffect'},
'factor': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'param_group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'environment_effects'", 'to': "orm['rainman.ParamGroup']"}),
'sun': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'temp': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'wind': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
'rainman.feed': {
'Meta': {'object_name': 'Feed'},
'db_max_flow_wait': ('django.db.models.fields.PositiveIntegerField', [], {'default': '300', 'db_column': "'max_flow_wait'"}),
'flow': ('django.db.models.fields.FloatField', [], {'default': '10'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'feed_meters'", 'to': "orm['rainman.Site']"}),
'var': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
},
'rainman.group': {
'Meta': {'unique_together': "(('site', 'name'),)", 'object_name': 'Group'},
'adj_rain': ('django.db.models.fields.FloatField', [], {'default': '1'}),
'adj_sun': ('django.db.models.fields.FloatField', [], {'default': '1'}),
'adj_temp': ('django.db.models.fields.FloatField', [], {'default': '1'}),
'adj_wind': ('django.db.models.fields.FloatField', [], {'default': '1'}),
'days': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'groups_y'", 'blank': 'True', 'to': "orm['rainman.DayRange']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'groups'", 'to': "orm['rainman.Site']"}),
'valves': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'groups'", 'symmetrical': 'False', 'to': "orm['rainman.Valve']"}),
'xdays': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'groups_n'", 'blank': 'True', 'to': "orm['rainman.DayRange']"})
},
'rainman.groupadjust': {
'Meta': {'unique_together': "(('group', 'start'),)", 'object_name': 'GroupAdjust'},
'factor': ('django.db.models.fields.FloatField', [], {}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'adjusters'", 'to': "orm['rainman.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'})
},
'rainman.groupoverride': {
'Meta': {'unique_together': "(('group', 'name'), ('group', 'start'))", 'object_name': 'GroupOverride'},
'allowed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'db_duration': ('django.db.models.fields.PositiveIntegerField', [], {'db_column': "'duration'"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'overrides'", 'to': "orm['rainman.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'off_level': ('django.db.models.fields.FloatField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'on_level': ('django.db.models.fields.FloatField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'})
},
'rainman.history': {
'Meta': {'unique_together': "(('site', 'time'),)", 'object_name': 'History'},
'feed': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rain': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'history'", 'to': "orm['rainman.Site']"}),
'sun': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'temp': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'wind': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
'rainman.level': {
'Meta': {'unique_together': "(('valve', 'time'),)", 'object_name': 'Level'},
'flow': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.FloatField', [], {}),
'time': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'valve': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'levels'", 'to': "orm['rainman.Valve']"})
},
'rainman.log': {
'Meta': {'object_name': 'Log'},
'controller': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'logs'", 'null': 'True', 'to': "orm['rainman.Controller']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'logs'", 'to': "orm['rainman.Site']"}),
'text': ('django.db.models.fields.TextField', [], {}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 5, 22, 0, 0)', 'db_index': 'True'}),
'valve': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'logs'", 'null': 'True', 'to': "orm['rainman.Valve']"})
},
'rainman.paramgroup': {
'Meta': {'unique_together': "(('site', 'name'),)", 'object_name': 'ParamGroup'},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'factor': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'rain': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'param_groups'", 'to': "orm['rainman.Site']"}),
'var': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
},
'rainman.rainmeter': {
'Meta': {'unique_together': "(('site', 'name'),)", 'object_name': 'RainMeter'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'rain_meters'", 'to': "orm['rainman.Site']"}),
'var': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'weight': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '10'})
},
'rainman.schedule': {
'Meta': {'unique_together': "(('valve', 'start'),)", 'object_name': 'Schedule'},
'changed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'max_length': '1'}),
'db_duration': ('django.db.models.fields.PositiveIntegerField', [], {'db_column': "'duration'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'seen': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'max_length': '1'}),
'start': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'valve': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'schedules'", 'to': "orm['rainman.Valve']"})
},
'rainman.site': {
'Meta': {'object_name': 'Site'},
'db_rain_delay': ('django.db.models.fields.PositiveIntegerField', [], {'default': '300', 'db_column': "'rain_delay'"}),
'db_rate': ('django.db.models.fields.FloatField', [], {'default': '2', 'db_column': "'rate'"}),
'host': ('django.db.models.fields.CharField', [], {'default': "'localhost'", 'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'port': ('django.db.models.fields.PositiveIntegerField', [], {'default': '50005'}),
'var': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200', 'blank': 'True'})
},
'rainman.sunmeter': {
'Meta': {'unique_together': "(('site', 'name'),)", 'object_name': 'SunMeter'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sun_meters'", 'to': "orm['rainman.Site']"}),
'var': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'weight': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '10'})
},
'rainman.tempmeter': {
'Meta': {'unique_together': "(('site', 'name'),)", 'object_name': 'TempMeter'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'temp_meters'", 'to': "orm['rainman.Site']"}),
'var': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'weight': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '10'})
},
'rainman.userforgroup': {
'Meta': {'object_name': 'UserForGroup'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'users'", 'to': "orm['rainman.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'rainman.valve': {
'Meta': {'unique_together': "(('controller', 'name'),)", 'object_name': 'Valve'},
'area': ('django.db.models.fields.FloatField', [], {}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'controller': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'valves'", 'to': "orm['rainman.Controller']"}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'valves'", 'to': "orm['rainman.Feed']"}),
'flow': ('django.db.models.fields.FloatField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'max_level': ('django.db.models.fields.FloatField', [], {'default': '10'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'param_group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'valves'", 'to': "orm['rainman.ParamGroup']"}),
'priority': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'runoff': ('django.db.models.fields.FloatField', [], {'default': '1'}),
'shade': ('django.db.models.fields.FloatField', [], {'default': '1'}),
'start_level': ('django.db.models.fields.FloatField', [], {'default': '8'}),
'stop_level': ('django.db.models.fields.FloatField', [], {'default': '3'}),
'time': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'var': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'verbose': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'rainman.valveoverride': {
'Meta': {'unique_together': "(('valve', 'name'), ('valve', 'start'))", 'object_name': 'ValveOverride'},
'db_duration': ('django.db.models.fields.PositiveIntegerField', [], {'db_column': "'duration'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'off_level': ('django.db.models.fields.FloatField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'on_level': ('django.db.models.fields.FloatField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'running': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'start': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'valve': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'overrides'", 'to': "orm['rainman.Valve']"})
},
'rainman.windmeter': {
'Meta': {'unique_together': "(('site', 'name'),)", 'object_name': 'WindMeter'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'wind_meters'", 'to': "orm['rainman.Site']"}),
'var': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'weight': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '10'})
}
}
complete_apps = ['rainman']
|
smurfix/HomEvenT
|
irrigation/rainman/migrations/0034_auto.py
|
Python
|
gpl-3.0
| 21,466
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2013-2014, Martín Gaitán
# Copyright (c) 2012-2013, Alexander Jung-Loddenkemper
# This file is part of Waliki (http://waliki.nqnwebs.com/)
# License: BSD (https://github.com/mgaitan/waliki/blob/master/LICENSE)
#===============================================================================
# DOCS
#===============================================================================
"""All supported markups
"""
#===============================================================================
# IMPORTS
#===============================================================================
import re
import docutils.core
import docutils.io
import markdown
import textwrap
from rst2html5 import HTML5Writer
import wiki
#===============================================================================
# MARKUP BASE
#===============================================================================
class Markup(object):
""" Base markup class."""
NAME = 'Text'
META_LINE = '%s: %s\n'
EXTENSION = '.txt'
HOWTO = """ """
def __init__(self, raw_content):
self.raw_content = raw_content
@classmethod
def render_meta(cls, key, value):
return cls.META_LINE % (key, value)
def process(self):
"""
return (html, body, meta) where HTML is the rendered output
body is the the editable content (text), and meta is
a dictionary with at least ['title', 'tags'] keys
"""
raise NotImplementedError("override in a subclass")
@classmethod
def howto(cls):
return cls(textwrap.dedent(cls.HOWTO)).process()[0]
#===============================================================================
# MARKDOWN
#===============================================================================
class Markdown(Markup):
NAME = 'markdown'
META_LINE = '%s: %s\n'
EXTENSION = '.md'
HOWTO = """
This editor is [markdown][] featured.
* I am
* a
* list
Turns into:
* I am
* a
* list
`**bold** and *italics*` turn into **bold** and *italics*. Very easy!
Create links with `[Wiki](http://github.com/alexex/wiki)`.
They turn into [Wiki][http://github.com/alexex/wiki].
Headers are as follows:
# Level 1
## Level 2
### Level 3
[markdown]: http://daringfireball.net/projects/markdown/
"""
def process(self):
# Processes Markdown text to HTML, returns original markdown text,
# and adds meta
md = markdown.Markdown(['codehilite', 'fenced_code', 'meta'])
html = md.convert(self.raw_content)
meta_lines, body = self.raw_content.split('\n\n', 1)
meta = md.Meta
return html, body, meta
#===============================================================================
# RESTRUCTURED TEXT
#===============================================================================
class RestructuredText(Markup):
NAME = 'restructuredtext'
META_LINE = '.. %s: %s\n'
IMAGE_LINE = '.. image:: %(url)s'
LINK_LINE = '`%(filename)s <%(url)s>`_'
EXTENSION = '.rst'
HOWTO = """
This editor is `reStructuredText`_ featured::
* I am
* a
* list
Turns into:
* I am
* a
* list
``**bold** and *italics*`` turn into **bold** and *italics*. Very easy!
Create links with ```Wiki <http://github.com/alexex/wiki>`_``.
They turn into `Wiki <https://github.com/alexex/wiki>`_.
Headers are just any underline (and, optionally, overline).
For example::
Level 1
*******
Level 2
-------
Level 3
+++++++
.. _reStructuredText: http://docutils.sourceforge.net/rst.html
"""
def process(self):
settings = {'initial_header_level': 2,
'record_dependencies': True,
'stylesheet_path': None,
'link_stylesheet': True,
'syntax_highlight': 'short',
}
html = self._rst2html(self.raw_content,
settings_overrides=settings)
# Convert unknow links to internal wiki links.
# Examples:
# Something_ will link to '/something'
# `something great`_ to '/something_great'
# `another thing <thing>`_ '/thing'
refs = re.findall(r'Unknown target name: "(.*)"', html)
if refs:
content = self.raw_content + self.get_autolinks(refs)
html = self._rst2html(content, settings_overrides=settings)
meta_lines, body = self.raw_content.split('\n\n', 1)
meta = self._parse_meta(meta_lines.split('\n'))
return html, body, meta
def get_autolinks(self, refs):
autolinks = '\n'.join(['.. _%s: /%s' % (ref, wiki.urlify(ref, False))
for ref in refs])
return '\n\n' + autolinks
def _rst2html(self, source, source_path=None,
source_class=docutils.io.StringInput,
destination_path=None, reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext', writer=None,
writer_name=None, settings=None, settings_spec=None,
settings_overrides=None, config_section=None,
enable_exit_status=None):
if not writer:
writer = HTML5Writer()
# Taken from Nikola
# http://bit.ly/14CmQyh
output, pub = docutils.core.publish_programmatically(
source=source, source_path=source_path, source_class=source_class,
destination_class=docutils.io.StringOutput,
destination=None, destination_path=destination_path,
reader=reader, reader_name=reader_name,
parser=parser, parser_name=parser_name,
writer=writer, writer_name=writer_name,
settings=settings, settings_spec=settings_spec,
settings_overrides=settings_overrides,
config_section=config_section,
enable_exit_status=enable_exit_status)
return pub.writer.parts['body']
def _parse_meta(self, lines):
""" Parse Meta-Data. Taken from Python-Markdown"""
META_RE = re.compile(r'^\.\.\s(?P<key>.*?): (?P<value>.*)')
meta = {}
key = None
for line in lines:
if line.strip() == '':
continue
m1 = META_RE.match(line)
if m1:
key = m1.group('key').lower().strip()
value = m1.group('value').strip()
try:
meta[key].append(value)
except KeyError:
meta[key] = [value]
return meta
#===============================================================================
# MAIN
#===============================================================================
if __name__ == "__main__":
print(__doc__)
|
mgaitan/waliki_flask
|
waliki/markup.py
|
Python
|
bsd-3-clause
| 7,181
|
# encoding: utf-8
# Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
import logging
import pybreaker
import requests as requests
import jmespath
from jormungandr import cache, app, utils, new_relic
from jormungandr.parking_space_availability import AbstractParkingPlacesProvider
from jormungandr.parking_space_availability.car.parking_places import ParkingPlaces
from jormungandr.ptref import FeedPublisher
DEFAULT_STAR_FEED_PUBLISHER = None
class StarProvider(AbstractParkingPlacesProvider):
def __init__(self, url, operators, dataset, timeout=1, feed_publisher=DEFAULT_STAR_FEED_PUBLISHER, **kwargs):
self.ws_service_template = url + '/?dataset={}&refine.idparc={}'
self.operators = [o.lower() for o in operators]
self.timeout = timeout
self.dataset = dataset
fail_max = kwargs.get('circuit_breaker_max_fail', app.config['CIRCUIT_BREAKER_MAX_STAR_FAIL'])
reset_timeout = kwargs.get('circuit_breaker_reset_timeout', app.config['CIRCUIT_BREAKER_STAR_TIMEOUT_S'])
self.breaker = pybreaker.CircuitBreaker(fail_max=fail_max, reset_timeout=reset_timeout)
self._feed_publisher = FeedPublisher(**feed_publisher) if feed_publisher else None
self.log = logging.LoggerAdapter(logging.getLogger(__name__), extra={'dataset': self.dataset})
def support_poi(self, poi):
properties = poi.get('properties', {})
return properties.get('operator', '').lower() in self.operators
def get_informations(self, poi):
ref = poi.get('properties', {}).get('ref')
if not ref:
return
data = self._call_webservice(ref)
if not data:
return
available = jmespath.search('records[0].fields.nombreplacesdisponibles', data)
occupied = jmespath.search('records[0].fields.nombreplacesoccupees', data)
# Person with reduced mobility
available_PRM = jmespath.search('records[0].fields.nombreplacesdisponiblespmr', data)
occupied_PRM = jmespath.search('records[0].fields.nombreplacesoccupeespmr', data)
return ParkingPlaces(available, occupied, available_PRM, occupied_PRM)
@cache.memoize(app.config['CACHE_CONFIGURATION'].get('TIMEOUT_STAR', 30))
def _call_webservice(self, parking_id):
try:
data = self.breaker.call(requests.get, self.ws_service_template.format(self.dataset, parking_id),
timeout=self.timeout)
# record in newrelic
self.record_call("OK")
return data.json()
except pybreaker.CircuitBreakerError as e:
msg = 'STAR service dead (error: {})'.format(e)
self.log.error(msg)
# record in newrelic
utils.record_external_failure(msg, 'parking', 'STAR')
except requests.Timeout as t:
msg = 'STAR service timeout (error: {})'.format(t)
self.log.error(msg)
# record in newrelic
utils.record_external_failure(msg, 'parking', 'STAR')
except:
msg = 'STAR service error'
self.log.exception(msg)
# record in newrelic
utils.record_external_failure(msg, 'parking', 'STAR')
return None
def status(self):
return {'operators': self.operators}
def feed_publisher(self):
return self._feed_publisher
def record_call(self, status, **kwargs):
"""
status can be in: ok, failure
"""
params = {'parking_service': 'STAR', 'dataset': self.dataset, 'status': status}
params.update(kwargs)
new_relic.record_custom_event('parking_service', params)
|
antoine-de/navitia
|
source/jormungandr/jormungandr/parking_space_availability/car/star.py
|
Python
|
agpl-3.0
| 4,893
|
"""
Usage:
trtl [-p PIPELINE] [-w WORKSPACE_DIR] [-r RESULT_DIR] [-d IMAGE] [-s STAGE] [-t TIMEOUT]
[-v VOLUME]... [--stop STOP] [--one] [--max-recurse MAX_RECURSE] [--verbose]
[--home-dir=HOME_DIR] [--volume-prefix=VOL_PREFIX]
Options:
-p PIPELINE Path to a turtle pipeline script.
-w WORKSPACE_DIR The directory to mount as /workspace inside the container. Typically the root
of your source tree. If not specified CWD will be used.
-r RESULT_DIR Target directory base for output from this stage. If not specified
<WORKSPACE_DIR> is used.
-d IMAGE Docker image URL. If not specified, the image from <PIPELINE> is used.
Please note: ports exposed by an image are automatically detected and exposed.
-s STAGE Optionally specify the stage to run on the input. If not specified it will
be derived from <PIPELINE>.
-t TIMEOUT The timeout in seconds, only supported on python3.
-v VOLUME Extra volume mounts for the container. Same format as -v for docker.
--one Run just one stage, then stop.
--stop STOP Run until the next stage is <STOP>, then stop.
--verbose Verbose
--max-recurse MAX_RECURSE The maximum stages to allow trtl to recurse through [default: 100]
--volume-prefix VOL_PREFIX Use this to prefix relative volume mounts, not CWD. Can also be set
using env var TRTL_CWD.
--home-dir HOME_DIR Sets that path to use as "home" in paths [default: ~]
"""
from __future__ import absolute_import, print_function
import os
import json
import six
def main(opts):
""" Acts on the options derived from the usage described in __doc__.
"""
def abspath(path, prefix):
if not path.startswith("/"):
path = os.path.join(prefix, path)
return os.path.abspath(path)
def say(*args):
if opts['--verbose']:
print(*args)
from . import load_pipeline, stage, StageFailed, MaxRecursion, em
if opts['-r'] is not None:
opts['-r'] = os.path.abspath(opts['-r'])
settings = {k: v for k, v in six.iteritems(opts) if not k.startswith('--')}
opts['--max-recurse'] = int(opts['--max-recurse'])
if not opts['--volume-prefix']:
opts['--volume-prefix'] = os.environ.get('TRTL_CWD', os.getcwd())
say("OPTS", opts)
if settings['-p']:
for k, v in six.iteritems(load_pipeline(settings['-p']).stage(settings)):
if k == '-v':
settings['-v'].extend(v)
elif settings.get(k, None) is None:
settings[k] = v
if not settings['-p'] and not settings['-w']:
raise Exception("At least one of INPUT_DIR and PIPELINE must be set.")
if not settings['-s']:
raise Exception("STAGE must be set.")
if not settings['-d']:
raise Exception("IMAGE must be set.")
if not settings['-w']:
settings['-w'] = os.getcwd()
if not settings['-r']:
settings['-r'] = settings['-w']
settings['-w'] = abspath(os.path.expanduser(settings['-w']), opts['--volume-prefix'])
settings['-r'] = abspath(os.path.expanduser(settings['-r']), opts['--volume-prefix'])
say("SETTINGS", settings)
del settings['-p']
for _ in range(opts['--max-recurse']):
try:
res_path = settings['-r'] + '/result.json'
if os.path.exists(res_path):
os.unlink(res_path)
say(_, "SETTINGS", settings)
stage(settings)
print(em('+1'), "Stage successful:", settings['-s'])
if not os.path.exists(res_path):
say("MISSING", res_path)
break
with open(res_path) as fin:
tmp = json.load(fin)
if tmp.get('-s', None) is None or tmp['-s'] == settings['-s']:
break
if '-r' in tmp:
tmp['-r'] = abspath(tmp['-r'], opts['--volume-prefix'])
settings.update(tmp)
if '-p' in settings:
settings.update(load_pipeline(tmp['-p']).stage(settings))
del settings['-p']
print(em('fast_forward'), "Next stage is", settings['-s'])
if opts['--one'] or opts['--stop'] == settings['-s']:
print(em("no_entry"), "Not proceeding to next stage because --stop")
break
except StageFailed as e:
print(em('x', 'boom'), "Failed", e)
return False
else:
print(em('x', 'boom'), "Maximum recursion reached:", opts['--max-recurse'])
raise MaxRecursion(settings)
print(em('ok', 'tada'), "Final stage completed")
return True
def cli():
""" Wrapper for docopt parsing without dirtying the global namespace.
"""
import docopt
opts = docopt.docopt(__doc__)
main(opts)
if __name__ == '__main__':
cli()
|
philipbergen/turtles
|
turtles/trtl.py
|
Python
|
mit
| 5,002
|
# Actual match API
from enum import Enum
import json
from datetime import date
class Minute:
def __init__(self, minuteObject):
self._normal = minuteObject['normal']
self._added = minuteObject['added']
def getNormal(self):
return self._normal
def getAdded(self):
return self._added
def toString(self):
return str(self._normal) + "'+" + str(self._added) if self._added > 0\
else str(self._normal) + "'"
class GoalType(Enum):
REGULAR = 1
PENALTY = 2
OWN_GOAL = 3
class Goal:
def __init__(self, goalObject):
self._scorer = goalObject['scorer']
self._goalType = {
"regular": GoalType.REGULAR,
"penalty": GoalType.PENALTY,
"own goal": GoalType.OWN_GOAL
}[goalObject['goalType']]
self._minute = Minute(goalObject['minute'])
def getScorer(self):
return self._scorer
def getType(self):
return self._goalType
def getMinute(self):
return self._minute
def toString(self):
goalTypeString = {
GoalType.REGULAR: "Regular",
GoalType.PENALTY: "Penalty",
GoalType.OWN_GOAL: "Own goal"
}[self._goalType]
return self._minute.toString() + " " + self._scorer + " (" + goalTypeString + ")"
class CardColor(Enum):
YELLOW = 1
RED = 2
class Card:
def __init__(self, cardObject):
self._color = {
"yellow": CardColor.YELLOW,
'red': CardColor.RED,
}[cardObject['color']]
self._minute = Minute(cardObject['minute'])
def getColor(self):
return self._color
def getMinute(self):
return self._minute
def toString(self):
return self._minute.toString() + ", " + self._color
class Player:
def __init__(self, playerObject):
self._name = playerObject['name']
self._number = int(playerObject['number'])
self._cards = list()
for cardObject in playerObject['cards']:
self._cards.append(Card(cardObject))
def getName(self):
return self._name
def getShirtNumber(self):
return self._number
def getCards(self):
return self._cards
def toString(self):
playerDescription = str(self._number) + ". " + self._name
nbCards = len(self._cards)
if nbCards == 1:
card = self._cards[0]
if card.getColor() == 'red':
playerDescription += ", sent off (" + card.getMinute().toString() + ")"
else:
playerDescription += ", booked (" + card.getMinute().toString() + ")"
elif nbCards == 2:
firstCard = self._cards[0]
secondCard = self._cards[1]
playerDescription += ", booked (" + firstCard.getMinute().toString() +\
") then sent off (" + secondCard.getMinute().toString() + ")";
elif nbCards == 3:
firstCard = self._cards[0]
secondCard = self._cards[1]
playerDescription += ", booked (" + firstCard.getMinute().toString() +\
"), booked again and sent off (" + secondCard.getMinute().toString() + ")";
return playerDescription
class Substitution:
def __init__(self, replacementObject):
self._replacedName = replacementObject['name']
self._minute = Minute(replacementObject['minute'])
def getSubstitutedName(self):
return self._replacedName
def getMinute(self):
return self._minute
class Substitute(Player):
def __init__(self, substituteObject):
super().__init__(substituteObject)
self._substitution = Substitution(substituteObject['replacement'])\
if substituteObject['replacement'] is not None else None
def getSubstitution(self):
return self._substitution
def toString(self):
playerDescription = super(Substitute, self).toString()
if self._substitution is not None:
playerDescription += ". Replaced " + self._substitution._replacedName +\
" (" + self._substitution.getMinute().toString() + ")"
return playerDescription
class Side:
def __init__(self, sideObject):
self._name = sideObject['name']
self._fullTimeGoals = int(sideObject['fulltimegoals'])
self._shotsOnTarget = int(sideObject['shotsontarget']) if sideObject['shotsontarget'] is not None else None
self._shotsWide = int(sideObject['shotswide']) if sideObject['shotswide'] is not None else None
self._goalList = list()
for goalObject in sideObject['goals']:
self._goalList.append(Goal(goalObject))
self._lineup = list()
for playerObject in sideObject['lineup']:
self._lineup.append(Player(playerObject))
self._substitutes = list()
for substituteObject in sideObject['substitutes']:
self._substitutes.append(Substitute(substituteObject))
def getName(self):
return self._name
def getFullTimeGoals(self):
return self._fullTimeGoals
def getShotsOnTarget(self):
return self._shotsOnTarget
def getShotsWide(self):
return self._shotsWide
def getGoalList(self):
return self._goalList
def getLineup(self):
return self._lineup
def getBench(self):
return self._substitutes
def toTeamString(self):
teamString = "Lineup\n"
for player in self.getLineup():
teamString += player.toString() + "\n"
teamString += "Bench\n"
for player in self.getBench():
teamString += player.toString() + "\n"
return teamString
def toGoalsString(self):
goalString = ""
if len(self._goalList) > 0:
for goal in self._goalList:
goalString += goal.toString() + "\n"
else:
goalString += "No goals\n"
return goalString
class BaseMatch:
def __init__(self, eventDate):
self._eventDate = eventDate
def getDate(self):
return self._eventDate
def getDateString(self):
return "{:%d %b %Y}".format(self._eventDate)
class Match(BaseMatch):
def __init__(self, matchPath):
with open(matchPath, 'r', encoding='utf-8') as matchFile:
matchObject = json.load(matchFile)
matchDate = matchObject['date']
super().__init__(date(matchDate['year'], matchDate['month'], matchDate['day']))
self._sides = dict()
self._sides['home'] = Side(matchObject['home'])
self._sides['away'] = Side(matchObject['away'])
def getHomeSide(self) -> Side:
return self._sides['home']
def getAwaySide(self) -> Side:
return self._sides['away']
def toShortString(self):
return self.getDateString() + ": " +\
self.getHomeSide().getName() + " " + str(self.getHomeSide().getFullTimeGoals()) +\
" - " + str(self.getAwaySide().getFullTimeGoals()) + " " + self.getAwaySide().getName()
def toString(self):
homeShots = self.getHomeSide().getShotsWide() + self.getHomeSide().getShotsOnTarget()
awayShots = self.getAwaySide().getShotsWide() + self.getAwaySide().getShotsOnTarget()
return self.toShortString() + "\n" + \
"Shots : " + str(homeShots) + " - " + str(awayShots) + "\n" + \
"Shots on target : " + str(self.getHomeSide().getShotsOnTarget()) + " - " +\
str(self.getAwaySide().getShotsOnTarget()) + "\n" + "\n" \
"Home goals\n" + self.getHomeSide().toGoalsString() + "\n" \
"Away goals\n" + self.getAwaySide().toGoalsString() + "\n" \
"Home team\n" + self.getHomeSide().toTeamString() + "\n" \
"Away team\n" + self.getAwaySide().toTeamString()
|
pwalch/football-stats
|
code/api/match_definitions.py
|
Python
|
gpl-3.0
| 7,899
|
from wsgiref.handlers import format_date_time
from datetime import datetime
from time import mktime
RESPONSE_CODES = {
"100" : "Continue",
"101" : "Switching Protocols",
"200" : "OK",
"201" : "Created",
"202" : "Accepted",
"203" : "Non-Authoritative Information",
"204" : "No Content",
"205" : "Reset Content",
"206" : "Partial Content",
"300" : "Multiple Choices",
"301" : "Moved Permanently",
"302" : "Found",
"303" : "See Other",
"304" : "Not Modified",
"305" : "Use Proxy",
"307" : "Temporary Redirect",
"400" : "Bad Request",
"401" : "Unauthorized",
"402" : "Payment Required",
"403" : "Forbidden",
"404" : "Not Found",
"405" : "Method Not Allowed",
"406" : "Not Acceptable",
"407" : "Proxy Authentication Required",
"408" : "Request Time-out",
"409" : "Conflict",
"410" : "Gone",
"411" : "Length Required",
"412" : "Precondition Failed",
"413" : "Request Entity Too Large",
"414" : "Request-URI Too Large",
"415" : "Unsupported Media Type",
"416" : "Requested range not satisfiable",
"417" : "Expectation Failed",
"500" : "Internal Server Error",
"501" : "Not Implemented",
"502" : "Bad Gateway",
"503" : "Service Unavailable",
"504" : "Gateway Time-out",
"505" : "HTTP Version not supported",
}
RESPONSE_HEADERS = ["Accept-Ranges", "Age", "ETag", "Location",
"Proxy-Authenticate", "Retry-After", "Server",
"Vary", "WWW-Authenticate"]
class Response(object):
def __init__(self, code, header, body):
self.code, self.header, self.body = code, header, body
def as_raw(self):
header_raw = ""
if self.header:
if not self.header.get_field('Date'):
self.header.add_field('Date', format_date_time(mktime(datetime.now().timetuple())))
header_raw = self.header.as_raw()
body_raw = "\r\n%s" % self.body
return "%s%s%s" % (self.status_line, header_raw, body_raw)
@property
def status_line(self):
response_message = RESPONSE_CODES[self.code]
return "HTTP/1.1 %s %s\r\n" % (self.code, response_message)
|
suhridsatyal/piccolo
|
piccolo/http/response.py
|
Python
|
bsd-3-clause
| 2,214
|
# IMPORTANT, call this module from /sandbox.py and run() it. This file cannot
# be called directly.
# @see http://stackoverflow.com/questions/4348452/
from lib.geo import util
from lib.geo.segment import Segment
from lib.geo.point import Point
from lib.geo.waypoint import Waypoint
from lib.geo.route import Route
from formation_flight.aircraft import Aircraft
from formation_flight import simulator
from formation_flight import config
from lib import debug
fuel_burn_per_nm = .88
formation_discount = .13
v_opt = 8.333
def fuel_diff(aircraft, departure_hub, arrival_hub, required_etah,
verbose = True):
origin = aircraft.route.waypoints[0]
destination = aircraft.route.waypoints[-1]
position = aircraft.get_position()
direct_length = aircraft.route.get_length()
origin_to_here = origin.distance_to(position)
here_to_hub_length = position.distance_to(departure_hub)
formation_length = departure_hub.distance_to(arrival_hub)
arrival_length = arrival_hub.distance_to(destination)
# Temporarily insert the hubs into the aircraft's route
old_waypoints = aircraft.route.waypoints
aircraft.route.waypoints = [aircraft.route.waypoints[0],
position,
departure_hub,
arrival_hub,
aircraft.route.waypoints[-1]]
planned_etah = aircraft.get_waypoint_eta()
t = simulator.get_time()
v_factor = (planned_etah - t) / (required_etah - t)
v_old = aircraft._speed
v_new = v_factor * v_old
v_penalty = speed_penalty(v_new)
direct_costs = direct_length * fuel_burn_per_nm
formation_costs = fuel_burn_per_nm * origin_to_here +\
v_penalty * fuel_burn_per_nm * here_to_hub_length +\
fuel_burn_per_nm * (1 - formation_discount) *\
formation_length +\
fuel_burn_per_nm * arrival_length
# Change route back to what it was
aircraft.route.waypoints = old_waypoints
if verbose:
headers = []
headers.append(('Mijn header', 'uhuh'))
messages = []
messages.append(('Flight', aircraft))
messages.append(('Departure hub', departure_hub))
messages.append(('Arrival hub', arrival_hub))
messages.append(('Time to hub (planned)', '%d time units' % (planned_etah - t)))
messages.append(('Time to hub (required)', '%d time units' % (required_etah - t)))
messages.append(('Current speed', '%.0f kts' % (v_old*60)))
messages.append(('Required speed', '%.0f kts' % (v_new*60)))
messages.append(('Sync fuel', '%.2f gallons' %
(v_penalty * fuel_burn_per_nm * here_to_hub_length)))
messages.append(('Fuel (solo flight)', '%.2f gallons' % direct_costs))
messages.append(('Fuel (formation flight)', '%.2f gallons' % formation_costs))
debug.print_table(messages = messages, headers = headers)
return direct_costs - formation_costs
def speed_penalty(v):
return 1 + abs(v - v_opt) / v_opt
def run():
planes = [
Aircraft(route = Route([Waypoint('AMS'), Waypoint('JFK')]))
]
simulator.execute([10], planes);
departure_hub = Waypoint('LHR')
arrival_hub = Waypoint('BOS')
p = fuel_diff(planes[0], departure_hub, arrival_hub, 20, verbose = True)
debug.print_table([('Net benefit', '%d gallons' % p)])
|
mauzeh/formation-flight
|
sandbox/penalties.py
|
Python
|
mit
| 3,527
|
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from tempest.api_schema.compute import flavors
from tempest.api_schema.compute import flavors_extra_specs
list_flavors_details = copy.deepcopy(flavors.common_flavor_list_details)
# NOTE- In v3 API, 'swap' comes as '0' not empty string '""'
# (In V2 API, it comes as empty string) So leaving 'swap'as integer type only.
# Defining extra attributes for V3 flavor schema
list_flavors_details['response_body']['properties']['flavors']['items'][
'properties'].update({'disabled': {'type': 'boolean'},
'ephemeral': {'type': 'integer'},
'flavor-access:is_public': {'type': 'boolean'},
'os-flavor-rxtx:rxtx_factor': {'type': 'number'}})
# 'flavor-access' and 'os-flavor-rxtx' are API extensions.
# So they are not 'required'.
list_flavors_details['response_body']['properties']['flavors']['items'][
'required'].extend(['disabled', 'ephemeral'])
set_flavor_extra_specs = copy.deepcopy(flavors_extra_specs.flavor_extra_specs)
set_flavor_extra_specs['status_code'] = [201]
unset_flavor_extra_specs = {
'status_code': [204]
}
get_flavor_details = copy.deepcopy(flavors.common_flavor_details)
# NOTE- In v3 API, 'swap' comes as '0' not empty string '""'
# (In V2 API, it comes as empty string) So leaving 'swap'as integer type only.
# Defining extra attributes for V3 flavor schema
get_flavor_details['response_body']['properties']['flavor'][
'properties'].update({'disabled': {'type': 'boolean'},
'ephemeral': {'type': 'integer'},
'flavor-access:is_public': {'type': 'boolean'},
'os-flavor-rxtx:rxtx_factor': {'type': 'number'}})
# 'flavor-access' and 'os-flavor-rxtx' are API extensions.
# So they are not 'required'.
get_flavor_details['response_body']['properties']['flavor'][
'required'].extend(['disabled', 'ephemeral'])
create_flavor_details = copy.deepcopy(get_flavor_details)
# Overriding the status code for create flavor V3 API.
create_flavor_details['status_code'] = [201]
delete_flavor = {
'status_code': [204]
}
|
vedujoshi/os_tempest
|
tempest/api_schema/compute/v3/flavors.py
|
Python
|
apache-2.0
| 2,750
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Recurrent layers for TF 2.0.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import uuid
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.keras import activations
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.engine.input_spec import InputSpec
from tensorflow.python.keras.layers import recurrent
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_cudnn_rnn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import build_info
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import keras_export
# The following string constants are used by Defun approach for unified backend
# of LSTM and GRU.
_FUNCTION_API_NAME_ATTRIBUTE = 'api_implements'
_FUNCTION_DEVICE_ATTRIBUTE = 'api_preferred_device'
_CPU_DEVICE_NAME = 'CPU'
_GPU_DEVICE_NAME = 'GPU'
# The following number constants are used to represent the runtime of the defun
# backend function. Since the CPU/GPU implementation are mathematically same, we
# need some signal for the function to indicate which function is executed. This
# is for testing purpose to verify the correctness of swapping backend function.
_RUNTIME_UNKNOWN = 0
_RUNTIME_CPU = 1
_RUNTIME_GPU = 2
_CUDNN_AVAILABLE_MSG = 'Layer %s will use cuDNN kernel when run on GPU.'
_CUDNN_NOT_AVAILABLE_MSG = ('Layer %s will not use cuDNN kernel since it '
'doesn\'t meet the cuDNN kernel criteria. It will '
'use generic GPU kernel as fallback when running '
'on GPU')
def _use_new_code():
return True
# TODO(b/169707691): The wrapper can be removed if TFLite doesn't need to rely
# on supportive attributes from LSTM/GRU.
class _DefunWrapper(object):
"""A wrapper with no deep copy of the Defun in LSTM/GRU layer."""
def __init__(self, time_major, go_backwards, layer_name):
self.time_major = time_major
self.go_backwards = go_backwards
self.layer_name = layer_name
if self.layer_name not in ['lstm', 'gru']:
raise ValueError('Defun wrapper only applies to LSTM and GRU layer, '
'but given {}'.format(self.layer_name))
# The first two attributes are added to support TFLite use case.
supportive_attributes = {
'time_major': self.time_major,
'go_backwards': self.go_backwards,
_FUNCTION_API_NAME_ATTRIBUTE: self.layer_name + '_' + str(uuid.uuid4())
}
if self.layer_name == 'lstm':
layer_func = lstm_with_backend_selection
else:
layer_func = gru_with_backend_selection
self.defun_layer = function.defun_with_attributes(
layer_func,
attributes=supportive_attributes,
autograph=False)
def __deepcopy__(self, memo):
new_wrapper = type(self)(
self.time_major, self.go_backwards, self.layer_name)
memo[id(self)] = new_wrapper
return new_wrapper
@keras_export('keras.layers.GRUCell', v1=[])
class GRUCell(recurrent.GRUCell):
"""Cell class for the GRU layer.
See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)
for details about the usage of RNN API.
This class processes one step within the whole time sequence input, whereas
`tf.keras.layer.GRU` processes the whole sequence.
For example:
>>> inputs = tf.random.normal([32, 10, 8])
>>> rnn = tf.keras.layers.RNN(tf.keras.layers.GRUCell(4))
>>> output = rnn(inputs)
>>> print(output.shape)
(32, 4)
>>> rnn = tf.keras.layers.RNN(
... tf.keras.layers.GRUCell(4),
... return_sequences=True,
... return_state=True)
>>> whole_sequence_output, final_state = rnn(inputs)
>>> print(whole_sequence_output.shape)
(32, 10, 4)
>>> print(final_state.shape)
(32, 4)
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use. Default: hyperbolic tangent
(`tanh`). If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use for the recurrent step.
Default: sigmoid (`sigmoid`). If you pass `None`, no activation is
applied (ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, (default `True`), whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs. Default:
`glorot_uniform`.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix, used for the linear transformation of the recurrent state.
Default: `orthogonal`.
bias_initializer: Initializer for the bias vector. Default: `zeros`.
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_regularizer: Regularizer function applied to the
`recurrent_kernel` weights matrix. Default: `None`.
bias_regularizer: Regularizer function applied to the bias vector. Default:
`None`.
kernel_constraint: Constraint function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_constraint: Constraint function applied to the `recurrent_kernel`
weights matrix. Default: `None`.
bias_constraint: Constraint function applied to the bias vector. Default:
`None`.
dropout: Float between 0 and 1. Fraction of the units to drop for the
linear transformation of the inputs. Default: 0.
recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for
the linear transformation of the recurrent state. Default: 0.
reset_after: GRU convention (whether to apply reset gate after or
before matrix multiplication). False = "before",
True = "after" (default and CuDNN compatible).
Call arguments:
inputs: A 2D tensor, with shape of `[batch, feature]`.
states: A 2D tensor with shape of `[batch, units]`, which is the state from
the previous time step. For timestep 0, the initial state provided by user
will be feed to cell.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. Only relevant when `dropout` or
`recurrent_dropout` is used.
"""
def __init__(self,
units,
activation='tanh',
recurrent_activation='sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
reset_after=True,
**kwargs):
super(GRUCell, self).__init__(
units,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
implementation=kwargs.pop('implementation', 2),
reset_after=reset_after,
**kwargs)
@keras_export('keras.layers.GRU', v1=[])
class GRU(recurrent.DropoutRNNCellMixin, recurrent.GRU):
"""Gated Recurrent Unit - Cho et al. 2014.
See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)
for details about the usage of RNN API.
Based on available runtime hardware and constraints, this layer
will choose different implementations (cuDNN-based or pure-TensorFlow)
to maximize the performance. If a GPU is available and all
the arguments to the layer meet the requirement of the CuDNN kernel
(see below for details), the layer will use a fast cuDNN implementation.
The requirements to use the cuDNN implementation are:
1. `activation` == `tanh`
2. `recurrent_activation` == `sigmoid`
3. `recurrent_dropout` == 0
4. `unroll` is `False`
5. `use_bias` is `True`
6. `reset_after` is `True`
7. Inputs, if use masking, are strictly right-padded.
8. Eager execution is enabled in the outermost context.
There are two variants of the GRU implementation. The default one is based on
[v3](https://arxiv.org/abs/1406.1078v3) and has reset gate applied to hidden
state before matrix multiplication. The other one is based on
[original](https://arxiv.org/abs/1406.1078v1) and has the order reversed.
The second variant is compatible with CuDNNGRU (GPU-only) and allows
inference on CPU. Thus it has separate biases for `kernel` and
`recurrent_kernel`. To use this variant, set `'reset_after'=True` and
`recurrent_activation='sigmoid'`.
For example:
>>> inputs = tf.random.normal([32, 10, 8])
>>> gru = tf.keras.layers.GRU(4)
>>> output = gru(inputs)
>>> print(output.shape)
(32, 4)
>>> gru = tf.keras.layers.GRU(4, return_sequences=True, return_state=True)
>>> whole_sequence_output, final_state = gru(inputs)
>>> print(whole_sequence_output.shape)
(32, 10, 4)
>>> print(final_state.shape)
(32, 4)
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step.
Default: sigmoid (`sigmoid`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, (default `True`), whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs. Default:
`glorot_uniform`.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix, used for the linear transformation of the recurrent
state. Default: `orthogonal`.
bias_initializer: Initializer for the bias vector. Default: `zeros`.
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_regularizer: Regularizer function applied to the
`recurrent_kernel` weights matrix. Default: `None`.
bias_regularizer: Regularizer function applied to the bias vector. Default:
`None`.
activity_regularizer: Regularizer function applied to the output of the
layer (its "activation"). Default: `None`.
kernel_constraint: Constraint function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_constraint: Constraint function applied to the `recurrent_kernel`
weights matrix. Default: `None`.
bias_constraint: Constraint function applied to the bias vector. Default:
`None`.
dropout: Float between 0 and 1. Fraction of the units to drop for the linear
transformation of the inputs. Default: 0.
recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for
the linear transformation of the recurrent state. Default: 0.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence. Default: `False`.
return_state: Boolean. Whether to return the last state in addition to the
output. Default: `False`.
go_backwards: Boolean (default `False`).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
time_major: The shape format of the `inputs` and `outputs` tensors.
If True, the inputs and outputs will be in shape
`[timesteps, batch, feature]`, whereas in the False case, it will be
`[batch, timesteps, feature]`. Using `time_major = True` is a bit more
efficient because it avoids transposes at the beginning and end of the
RNN calculation. However, most TensorFlow data is batch-major, so by
default this function accepts input and emits output in batch-major
form.
reset_after: GRU convention (whether to apply reset gate after or
before matrix multiplication). False = "before",
True = "after" (default and CuDNN compatible).
Call arguments:
inputs: A 3D tensor, with shape `[batch, timesteps, feature]`.
mask: Binary tensor of shape `[samples, timesteps]` indicating whether
a given timestep should be masked (optional, defaults to `None`).
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. This argument is passed to the cell
when calling it. This is only relevant if `dropout` or
`recurrent_dropout` is used (optional, defaults to `None`).
initial_state: List of initial state tensors to be passed to the first
call of the cell (optional, defaults to `None` which causes creation
of zero-filled initial state tensors).
"""
def __init__(self,
units,
activation='tanh',
recurrent_activation='sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
time_major=False,
reset_after=True,
**kwargs):
# return_runtime is a flag for testing, which shows the real backend
# implementation chosen by grappler in graph mode.
self._return_runtime = kwargs.pop('return_runtime', False)
super(GRU, self).__init__(
units,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
implementation=kwargs.pop('implementation', 2),
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
unroll=unroll,
time_major=time_major,
reset_after=reset_after,
**kwargs)
# GPU kernel uses following setting by default and not configurable.
self._could_use_gpu_kernel = (
self.activation in (activations.tanh, nn.tanh) and
self.recurrent_activation in (activations.sigmoid, nn.sigmoid) and
recurrent_dropout == 0 and not unroll and use_bias and
reset_after and ops.executing_eagerly_outside_functions())
if config.list_logical_devices('GPU'):
# Only show the message when there is GPU available, user will not care
# about the cuDNN if there isn't any GPU.
if self._could_use_gpu_kernel:
logging.debug(_CUDNN_AVAILABLE_MSG % self.name)
else:
logging.warn(_CUDNN_NOT_AVAILABLE_MSG % self.name)
if _use_new_code():
self._defun_wrapper = _DefunWrapper(time_major, go_backwards, 'gru')
def build(self, input_shape):
super(GRU, self).build(input_shape)
if not all(isinstance(v, resource_variable_ops.ResourceVariable)
for v in self.weights):
# Non-resource variables, such as DistributedVariables and
# AutoCastVariables, do not work properly with the implementation
# selector, which is used when cuDNN is used. However, by chance, such
# variables happen to work in LSTM, so this check is only needed for GRU.
# TODO(b/136512020): Make non-resource variables work with the
# implementation selector.
self._could_use_gpu_kernel = False
def call(self, inputs, mask=None, training=None, initial_state=None):
# The input should be dense, padded with zeros. If a ragged input is fed
# into the layer, it is padded and the row lengths are used for masking.
inputs, row_lengths = K.convert_inputs_if_ragged(inputs)
is_ragged_input = (row_lengths is not None)
self._validate_args_if_ragged(is_ragged_input, mask)
# GRU does not support constants. Ignore it during process.
inputs, initial_state, _ = self._process_inputs(inputs, initial_state, None)
if isinstance(mask, list):
mask = mask[0]
input_shape = K.int_shape(inputs)
timesteps = input_shape[0] if self.time_major else input_shape[1]
# TODO(b/156447398) Investigate why the cuDNN kernel kernel fails with
# ragged inputs.
if is_ragged_input or not self._could_use_gpu_kernel:
kwargs = {'training': training}
self._maybe_reset_cell_dropout_mask(self.cell)
def step(cell_inputs, cell_states):
return self.cell(cell_inputs, cell_states, **kwargs)
last_output, outputs, states = K.rnn(
step,
inputs,
initial_state,
constants=None,
go_backwards=self.go_backwards,
mask=mask,
unroll=self.unroll,
input_length=row_lengths if row_lengths is not None else timesteps,
time_major=self.time_major,
zero_output_for_mask=self.zero_output_for_mask)
# This is a dummy tensor for testing purpose.
runtime = _runtime(_RUNTIME_UNKNOWN)
else:
last_output, outputs, runtime, states = self._defun_gru_call(
inputs, initial_state, training, mask, row_lengths)
if self.stateful:
updates = [state_ops.assign(self.states[0], states[0])]
self.add_update(updates)
if self.return_sequences:
output = K.maybe_convert_to_ragged(is_ragged_input, outputs, row_lengths)
else:
output = last_output
if self.return_state:
return [output] + list(states)
elif self._return_runtime:
return output, runtime
else:
return output
def _defun_gru_call(self, inputs, initial_state, training, mask,
sequence_lengths):
# Use the new defun approach for backend implementation swap.
# Note that different implementations need to have same function
# signature, eg, the tensor parameters need to have same shape and dtypes.
self.reset_dropout_mask()
dropout_mask = self.get_dropout_mask_for_cell(inputs, training, count=3)
if dropout_mask is not None:
inputs = inputs * dropout_mask[0]
if _use_new_code():
gru_kwargs = {
'inputs': inputs,
'init_h': _read_variable_value(initial_state[0]),
'kernel': _read_variable_value(self.cell.kernel),
'recurrent_kernel': _read_variable_value(self.cell.recurrent_kernel),
'bias': _read_variable_value(self.cell.bias),
'mask': mask,
'time_major': self.time_major,
'go_backwards': self.go_backwards,
'sequence_lengths': sequence_lengths,
'zero_output_for_mask': self.zero_output_for_mask
}
(last_output, outputs, new_h,
runtime) = self._defun_wrapper.defun_layer(**gru_kwargs)
else:
gpu_gru_kwargs = {
'inputs': inputs,
'init_h': _read_variable_value(initial_state[0]),
'kernel': _read_variable_value(self.cell.kernel),
'recurrent_kernel': _read_variable_value(self.cell.recurrent_kernel),
'bias': _read_variable_value(self.cell.bias),
'mask': mask,
'time_major': self.time_major,
'go_backwards': self.go_backwards,
'sequence_lengths': sequence_lengths
}
normal_gru_kwargs = gpu_gru_kwargs.copy()
normal_gru_kwargs.update({
'zero_output_for_mask': self.zero_output_for_mask,
})
if context.executing_eagerly():
device_type = _get_context_device_type()
can_use_gpu = (
# Either user specified GPU or unspecified but GPU is available.
(device_type == _GPU_DEVICE_NAME or
(device_type is None and config.list_logical_devices('GPU'))) and
(mask is None or is_cudnn_supported_inputs(mask, self.time_major)))
# Under eager context, check the device placement and prefer the
if can_use_gpu:
last_output, outputs, new_h, runtime = gpu_gru(**gpu_gru_kwargs)
else:
last_output, outputs, new_h, runtime = standard_gru(
**normal_gru_kwargs)
else:
last_output, outputs, new_h, runtime = gru_with_backend_selection(
**normal_gru_kwargs)
states = [new_h]
return last_output, outputs, runtime, states
def standard_gru(inputs, init_h, kernel, recurrent_kernel, bias, mask,
time_major, go_backwards, sequence_lengths,
zero_output_for_mask):
"""GRU with standard kernel implementation.
This implementation can be run on all types of hardware.
This implementation lifts out all the layer weights and make them function
parameters. It has same number of tensor input params as the CuDNN
counterpart. The RNN step logic has been simplified, eg dropout and mask is
removed since CuDNN implementation does not support that.
Arguments:
inputs: Input tensor of GRU layer.
init_h: Initial state tensor for the cell output.
kernel: Weights for cell kernel.
recurrent_kernel: Weights for cell recurrent kernel.
bias: Weights for cell kernel bias and recurrent bias. The bias contains the
combined input_bias and recurrent_bias.
mask: Binary tensor of shape `(samples, timesteps)` indicating whether
a given timestep should be masked.
time_major: Boolean, whether the inputs are in the format of
[time, batch, feature] or [batch, time, feature].
go_backwards: Boolean (default False). If True, process the input sequence
backwards and return the reversed sequence.
sequence_lengths: The lengths of all sequences coming from a variable length
input, such as ragged tensors. If the input has a fixed timestep size,
this should be None.
zero_output_for_mask: Boolean, whether to output zero for masked timestep.
Returns:
last_output: output tensor for the last timestep, which has shape
[batch, units].
outputs: output tensor for all timesteps, which has shape
[batch, time, units].
state_0: the cell output, which has same shape as init_h.
runtime: constant string tensor which indicate real runtime hardware. This
value is for testing purpose and should be used by user.
"""
input_shape = K.int_shape(inputs)
timesteps = input_shape[0] if time_major else input_shape[1]
input_bias, recurrent_bias = array_ops.unstack(bias)
def step(cell_inputs, cell_states):
"""Step function that will be used by Keras RNN backend."""
h_tm1 = cell_states[0]
# inputs projected by all gate matrices at once
matrix_x = K.dot(cell_inputs, kernel)
matrix_x = K.bias_add(matrix_x, input_bias)
x_z, x_r, x_h = array_ops.split(matrix_x, 3, axis=1)
# hidden state projected by all gate matrices at once
matrix_inner = K.dot(h_tm1, recurrent_kernel)
matrix_inner = K.bias_add(matrix_inner, recurrent_bias)
recurrent_z, recurrent_r, recurrent_h = array_ops.split(matrix_inner, 3,
axis=1)
z = nn.sigmoid(x_z + recurrent_z)
r = nn.sigmoid(x_r + recurrent_r)
hh = nn.tanh(x_h + r * recurrent_h)
# previous and candidate state mixed by update gate
h = z * h_tm1 + (1 - z) * hh
return h, [h]
last_output, outputs, new_states = K.rnn(
step,
inputs, [init_h],
constants=None,
unroll=False,
time_major=time_major,
mask=mask,
go_backwards=go_backwards,
input_length=sequence_lengths
if sequence_lengths is not None else timesteps,
zero_output_for_mask=zero_output_for_mask)
return last_output, outputs, new_states[0], _runtime(_RUNTIME_CPU)
def gpu_gru(inputs, init_h, kernel, recurrent_kernel, bias, mask, time_major,
go_backwards, sequence_lengths):
"""GRU with CuDNN implementation which is only available for GPU."""
if not time_major and mask is None:
inputs = array_ops.transpose(inputs, perm=(1, 0, 2))
seq_axis, batch_axis = (0, 1)
else:
seq_axis, batch_axis = (0, 1) if time_major else (1, 0)
# For init_h, cuDNN expects one more dim of num_layers before or after batch
# dim for time major or batch major inputs respectively
init_h = array_ops.expand_dims(init_h, axis=seq_axis)
weights = array_ops.split(kernel, 3, axis=1)
weights += array_ops.split(recurrent_kernel, 3, axis=1)
# Note that the bias was initialized as shape (2, 3 * units), flat it into
# (6 * units)
bias = array_ops.split(K.flatten(bias), 6)
if build_info.build_info['is_cuda_build']:
# Note that the gate order for CuDNN is different from the canonical format.
# canonical format is [z, r, h], whereas CuDNN is [r, z, h]. The swap need
# to be done for kernel, recurrent_kernel, input_bias, recurrent_bias.
# z is update gate weights.
# r is reset gate weights.
# h is output gate weights.
weights[0], weights[1] = weights[1], weights[0]
weights[3], weights[4] = weights[4], weights[3]
bias[0], bias[1] = bias[1], bias[0]
bias[3], bias[4] = bias[4], bias[3]
params = _canonical_to_params(
weights=weights,
biases=bias,
shape=constant_op.constant([-1]),
transpose_weights=True)
if mask is not None:
sequence_lengths = calculate_sequence_by_mask(mask, time_major)
if sequence_lengths is not None:
if go_backwards:
# Three reversals are required. E.g.,
# normal input = [1, 2, 3, 0, 0] # where 0 need to be masked
# reversed_input_to_cudnn = [3, 2, 1, 0, 0]
# output_from_cudnn = [6, 5, 4, 0, 0]
# expected_output = [0, 0, 6, 5 ,4]
inputs = array_ops.reverse_sequence_v2(
inputs, sequence_lengths, seq_axis=seq_axis, batch_axis=batch_axis)
outputs, h, _, _, _ = gen_cudnn_rnn_ops.cudnn_rnnv3(
inputs,
input_h=init_h,
input_c=0,
params=params,
is_training=True,
rnn_mode='gru',
sequence_lengths=sequence_lengths,
time_major=time_major)
if go_backwards:
outputs = array_ops.reverse_sequence_v2(
outputs, sequence_lengths, seq_axis=seq_axis, batch_axis=batch_axis)
outputs = array_ops.reverse(outputs, axis=[seq_axis])
else:
if go_backwards:
# Reverse axis 0 since the input is already convert to time major.
inputs = array_ops.reverse(inputs, axis=[0])
outputs, h, _, _ = gen_cudnn_rnn_ops.cudnn_rnn(
inputs, input_h=init_h, input_c=0, params=params, is_training=True,
rnn_mode='gru')
last_output = outputs[-1]
if not time_major and mask is None:
outputs = array_ops.transpose(outputs, perm=[1, 0, 2])
h = array_ops.squeeze(h, axis=seq_axis)
# In the case of variable length input, the cudnn kernel will fill zeros for
# the output, whereas the default keras behavior is to bring over the previous
# output for t-1, so that in the return_sequence=False case, user can quickly
# get the final effect output instead just 0s at the last timestep.
# In order to mimic the default keras behavior, we copy the final h state as
# the last_output, since it is numerically same as the output.
if mask is not None:
last_output = h
return last_output, outputs, h, _runtime(_RUNTIME_GPU)
def gru_with_backend_selection(inputs, init_h, kernel, recurrent_kernel, bias,
mask, time_major, go_backwards, sequence_lengths,
zero_output_for_mask):
"""Call the GRU with optimized backend kernel selection.
Under the hood, this function will create two TF function, one with the most
generic kernel and can run on all device condition, and the second one with
CuDNN specific kernel, which can only run on GPU.
The first function will be called with normal_lstm_params, while the second
function is not called, but only registered in the graph. The Grappler will
do the proper graph rewrite and swap the optimized TF function based on the
device placement.
Args:
inputs: Input tensor of GRU layer.
init_h: Initial state tensor for the cell output.
kernel: Weights for cell kernel.
recurrent_kernel: Weights for cell recurrent kernel.
bias: Weights for cell kernel bias and recurrent bias. Only recurrent bias
is used in this case.
mask: Boolean tensor for mask out the steps within sequence.
time_major: Boolean, whether the inputs are in the format of
[time, batch, feature] or [batch, time, feature].
go_backwards: Boolean (default False). If True, process the input sequence
backwards and return the reversed sequence.
sequence_lengths: The lengths of all sequences coming from a variable length
input, such as ragged tensors. If the input has a fixed timestep size,
this should be None.
zero_output_for_mask: Boolean, whether to output zero for masked timestep.
Returns:
List of output tensors, same as standard_gru.
"""
params = {
'inputs': inputs,
'init_h': init_h,
'kernel': kernel,
'recurrent_kernel': recurrent_kernel,
'bias': bias,
'mask': mask,
'time_major': time_major,
'go_backwards': go_backwards,
'sequence_lengths': sequence_lengths,
'zero_output_for_mask': zero_output_for_mask,
}
def gpu_gru_with_fallback(inputs, init_h, kernel, recurrent_kernel, bias,
mask, time_major, go_backwards, sequence_lengths,
zero_output_for_mask):
"""Use CuDNN kernel when mask is none or strictly right padded."""
if mask is None:
return gpu_gru(
inputs=inputs,
init_h=init_h,
kernel=kernel,
recurrent_kernel=recurrent_kernel,
bias=bias,
mask=mask,
time_major=time_major,
go_backwards=go_backwards,
sequence_lengths=sequence_lengths)
def cudnn_gru_fn():
return gpu_gru(
inputs=inputs,
init_h=init_h,
kernel=kernel,
recurrent_kernel=recurrent_kernel,
bias=bias,
mask=mask,
time_major=time_major,
go_backwards=go_backwards,
sequence_lengths=sequence_lengths)
def standard_gru_fn():
return standard_gru(
inputs=inputs,
init_h=init_h,
kernel=kernel,
recurrent_kernel=recurrent_kernel,
bias=bias,
mask=mask,
time_major=time_major,
go_backwards=go_backwards,
sequence_lengths=sequence_lengths,
zero_output_for_mask=zero_output_for_mask)
return control_flow_ops.cond(
is_cudnn_supported_inputs(mask, time_major),
true_fn=cudnn_gru_fn,
false_fn=standard_gru_fn)
if _use_new_code():
# Chooses the implementation dynamicly based on the running device.
(last_output, outputs, new_h,
runtime) = control_flow_ops.execute_fn_for_device(
{
_CPU_DEVICE_NAME: lambda: standard_gru(**params),
_GPU_DEVICE_NAME: lambda: gpu_gru_with_fallback(**params)
}, lambda: standard_gru(**params))
else:
# Each time a `tf.function` is called, we will give it a unique
# identifiable API name, so that Grappler won't get confused when it
# sees multiple GRU layers added into same graph, and it will be able
# to pair up the different implementations across them.
api_name = 'gru_' + str(uuid.uuid4())
supportive_attribute = {
'time_major': time_major,
'go_backwards': go_backwards,
}
defun_standard_gru = _generate_defun_backend(api_name, _CPU_DEVICE_NAME,
standard_gru,
supportive_attribute)
defun_gpu_gru = _generate_defun_backend(api_name, _GPU_DEVICE_NAME,
gpu_gru_with_fallback,
supportive_attribute)
# Call the normal GRU impl and register the CuDNN impl function. The
# grappler will kick in during session execution to optimize the graph.
last_output, outputs, new_h, runtime = defun_standard_gru(**params)
function.register(defun_gpu_gru, **params)
return last_output, outputs, new_h, runtime
@keras_export('keras.layers.LSTMCell', v1=[])
class LSTMCell(recurrent.LSTMCell):
"""Cell class for the LSTM layer.
See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)
for details about the usage of RNN API.
This class processes one step within the whole time sequence input, whereas
`tf.keras.layer.LSTM` processes the whole sequence.
For example:
>>> inputs = tf.random.normal([32, 10, 8])
>>> rnn = tf.keras.layers.RNN(tf.keras.layers.LSTMCell(4))
>>> output = rnn(inputs)
>>> print(output.shape)
(32, 4)
>>> rnn = tf.keras.layers.RNN(
... tf.keras.layers.LSTMCell(4),
... return_sequences=True,
... return_state=True)
>>> whole_seq_output, final_memory_state, final_carry_state = rnn(inputs)
>>> print(whole_seq_output.shape)
(32, 10, 4)
>>> print(final_memory_state.shape)
(32, 4)
>>> print(final_carry_state.shape)
(32, 4)
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use. Default: hyperbolic tangent
(`tanh`). If you pass `None`, no activation is applied (ie. "linear"
activation: `a(x) = x`).
recurrent_activation: Activation function to use for the recurrent step.
Default: sigmoid (`sigmoid`). If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, (default `True`), whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix, used for
the linear transformation of the inputs. Default: `glorot_uniform`.
recurrent_initializer: Initializer for the `recurrent_kernel` weights
matrix, used for the linear transformation of the recurrent state.
Default: `orthogonal`.
bias_initializer: Initializer for the bias vector. Default: `zeros`.
unit_forget_bias: Boolean (default `True`). If True, add 1 to the bias of
the forget gate at initialization. Setting it to true will also force
`bias_initializer="zeros"`. This is recommended in [Jozefowicz et
al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix. Default: `None`.
bias_regularizer: Regularizer function applied to the bias vector. Default:
`None`.
kernel_constraint: Constraint function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_constraint: Constraint function applied to the `recurrent_kernel`
weights matrix. Default: `None`.
bias_constraint: Constraint function applied to the bias vector. Default:
`None`.
dropout: Float between 0 and 1. Fraction of the units to drop for the linear
transformation of the inputs. Default: 0.
recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for
the linear transformation of the recurrent state. Default: 0.
Call arguments:
inputs: A 2D tensor, with shape of `[batch, feature]`.
states: List of 2 tensors that corresponding to the cell's units. Both of
them have shape `[batch, units]`, the first tensor is the memory state
from previous time step, the second tensor is the carry state from
previous time step. For timestep 0, the initial state provided by user
will be feed to cell.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. Only relevant when `dropout` or
`recurrent_dropout` is used.
"""
def __init__(self,
units,
activation='tanh',
recurrent_activation='sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
**kwargs):
super(LSTMCell, self).__init__(
units,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
unit_forget_bias=unit_forget_bias,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
implementation=kwargs.pop('implementation', 2),
**kwargs)
@keras_export('keras.layers.LSTM', v1=[])
class LSTM(recurrent.DropoutRNNCellMixin, recurrent.LSTM):
"""Long Short-Term Memory layer - Hochreiter 1997.
See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)
for details about the usage of RNN API.
Based on available runtime hardware and constraints, this layer
will choose different implementations (cuDNN-based or pure-TensorFlow)
to maximize the performance. If a GPU is available and all
the arguments to the layer meet the requirement of the CuDNN kernel
(see below for details), the layer will use a fast cuDNN implementation.
The requirements to use the cuDNN implementation are:
1. `activation` == `tanh`
2. `recurrent_activation` == `sigmoid`
3. `recurrent_dropout` == 0
4. `unroll` is `False`
5. `use_bias` is `True`
6. Inputs, if use masking, are strictly right-padded.
7. Eager execution is enabled in the outermost context.
For example:
>>> inputs = tf.random.normal([32, 10, 8])
>>> lstm = tf.keras.layers.LSTM(4)
>>> output = lstm(inputs)
>>> print(output.shape)
(32, 4)
>>> lstm = tf.keras.layers.LSTM(4, return_sequences=True, return_state=True)
>>> whole_seq_output, final_memory_state, final_carry_state = lstm(inputs)
>>> print(whole_seq_output.shape)
(32, 10, 4)
>>> print(final_memory_state.shape)
(32, 4)
>>> print(final_carry_state.shape)
(32, 4)
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`). If you pass `None`, no activation
is applied (ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use for the recurrent step.
Default: sigmoid (`sigmoid`). If you pass `None`, no activation is
applied (ie. "linear" activation: `a(x) = x`).
use_bias: Boolean (default `True`), whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix, used for
the linear transformation of the inputs. Default: `glorot_uniform`.
recurrent_initializer: Initializer for the `recurrent_kernel` weights
matrix, used for the linear transformation of the recurrent state.
Default: `orthogonal`.
bias_initializer: Initializer for the bias vector. Default: `zeros`.
unit_forget_bias: Boolean (default `True`). If True, add 1 to the bias of
the forget gate at initialization. Setting it to true will also force
`bias_initializer="zeros"`. This is recommended in [Jozefowicz et
al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf).
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_regularizer: Regularizer function applied to the
`recurrent_kernel` weights matrix. Default: `None`.
bias_regularizer: Regularizer function applied to the bias vector. Default:
`None`.
activity_regularizer: Regularizer function applied to the output of the
layer (its "activation"). Default: `None`.
kernel_constraint: Constraint function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_constraint: Constraint function applied to the `recurrent_kernel`
weights matrix. Default: `None`.
bias_constraint: Constraint function applied to the bias vector. Default:
`None`.
dropout: Float between 0 and 1. Fraction of the units to drop for the linear
transformation of the inputs. Default: 0.
recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for
the linear transformation of the recurrent state. Default: 0.
return_sequences: Boolean. Whether to return the last output. in the output
sequence, or the full sequence. Default: `False`.
return_state: Boolean. Whether to return the last state in addition to the
output. Default: `False`.
go_backwards: Boolean (default `False`). If True, process the input sequence
backwards and return the reversed sequence.
stateful: Boolean (default `False`). If True, the last state for each sample
at index i in a batch will be used as initial state for the sample of
index i in the following batch.
time_major: The shape format of the `inputs` and `outputs` tensors.
If True, the inputs and outputs will be in shape
`[timesteps, batch, feature]`, whereas in the False case, it will be
`[batch, timesteps, feature]`. Using `time_major = True` is a bit more
efficient because it avoids transposes at the beginning and end of the
RNN calculation. However, most TensorFlow data is batch-major, so by
default this function accepts input and emits output in batch-major
form.
unroll: Boolean (default `False`). If True, the network will be unrolled,
else a symbolic loop will be used. Unrolling can speed-up a RNN, although
it tends to be more memory-intensive. Unrolling is only suitable for short
sequences.
Call arguments:
inputs: A 3D tensor with shape `[batch, timesteps, feature]`.
mask: Binary tensor of shape `[batch, timesteps]` indicating whether
a given timestep should be masked (optional, defaults to `None`).
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. This argument is passed to the cell
when calling it. This is only relevant if `dropout` or
`recurrent_dropout` is used (optional, defaults to `None`).
initial_state: List of initial state tensors to be passed to the first
call of the cell (optional, defaults to `None` which causes creation
of zero-filled initial state tensors).
"""
def __init__(self,
units,
activation='tanh',
recurrent_activation='sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
time_major=False,
unroll=False,
**kwargs):
# return_runtime is a flag for testing, which shows the real backend
# implementation chosen by grappler in graph mode.
self.return_runtime = kwargs.pop('return_runtime', False)
super(LSTM, self).__init__(
units,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
unit_forget_bias=unit_forget_bias,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
implementation=kwargs.pop('implementation', 2),
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
time_major=time_major,
unroll=unroll,
**kwargs)
self.state_spec = [
InputSpec(shape=(None, dim)) for dim in (self.units, self.units)
]
self._could_use_gpu_kernel = (
self.activation in (activations.tanh, nn.tanh) and
self.recurrent_activation in (activations.sigmoid, nn.sigmoid) and
recurrent_dropout == 0 and not unroll and use_bias and
ops.executing_eagerly_outside_functions())
if config.list_logical_devices('GPU'):
# Only show the message when there is GPU available, user will not care
# about the cuDNN if there isn't any GPU.
if self._could_use_gpu_kernel:
logging.debug(_CUDNN_AVAILABLE_MSG % self.name)
else:
logging.warn(_CUDNN_NOT_AVAILABLE_MSG % self.name)
if _use_new_code():
self._defun_wrapper = _DefunWrapper(time_major, go_backwards, 'lstm')
def call(self, inputs, mask=None, training=None, initial_state=None):
# The input should be dense, padded with zeros. If a ragged input is fed
# into the layer, it is padded and the row lengths are used for masking.
inputs, row_lengths = K.convert_inputs_if_ragged(inputs)
is_ragged_input = (row_lengths is not None)
self._validate_args_if_ragged(is_ragged_input, mask)
# LSTM does not support constants. Ignore it during process.
inputs, initial_state, _ = self._process_inputs(inputs, initial_state, None)
if isinstance(mask, list):
mask = mask[0]
input_shape = K.int_shape(inputs)
timesteps = input_shape[0] if self.time_major else input_shape[1]
# TODO(b/156447398) Investigate why the cuDNN kernel kernel fails with
# ragged inputs.
if is_ragged_input or not self._could_use_gpu_kernel:
# Fall back to use the normal LSTM.
kwargs = {'training': training}
self._maybe_reset_cell_dropout_mask(self.cell)
def step(inputs, states):
return self.cell(inputs, states, **kwargs)
last_output, outputs, states = K.rnn(
step,
inputs,
initial_state,
constants=None,
go_backwards=self.go_backwards,
mask=mask,
unroll=self.unroll,
input_length=row_lengths if row_lengths is not None else timesteps,
time_major=self.time_major,
zero_output_for_mask=self.zero_output_for_mask)
runtime = _runtime(_RUNTIME_UNKNOWN)
else:
# Use the new defun approach for backend implementation swap.
# Note that different implementations need to have same function
# signature, eg, the tensor parameters need to have same shape and dtypes.
# Since the CuDNN has an extra set of bias, those bias will be passed to
# both normal and CuDNN implementations.
self.reset_dropout_mask()
dropout_mask = self.get_dropout_mask_for_cell(inputs, training, count=4)
if dropout_mask is not None:
inputs = inputs * dropout_mask[0]
if _use_new_code():
lstm_kwargs = {
'inputs':
inputs,
'init_h':
_read_variable_value(initial_state[0]),
'init_c':
_read_variable_value(initial_state[1]),
'kernel':
_read_variable_value(self.cell.kernel),
'recurrent_kernel':
_read_variable_value(self.cell.recurrent_kernel),
'bias':
_read_variable_value(self.cell.bias),
'mask':
mask,
'time_major':
self.time_major,
'go_backwards':
self.go_backwards,
'sequence_lengths':
row_lengths,
'zero_output_for_mask':
self.zero_output_for_mask,
}
(last_output, outputs, new_h, new_c,
runtime) = self._defun_wrapper.defun_layer(**lstm_kwargs)
else:
gpu_lstm_kwargs = {
'inputs':
inputs,
'init_h':
_read_variable_value(initial_state[0]),
'init_c':
_read_variable_value(initial_state[1]),
'kernel':
_read_variable_value(self.cell.kernel),
'recurrent_kernel':
_read_variable_value(self.cell.recurrent_kernel),
'bias':
_read_variable_value(self.cell.bias),
'mask':
mask,
'time_major':
self.time_major,
'go_backwards':
self.go_backwards,
'sequence_lengths':
row_lengths
}
normal_lstm_kwargs = gpu_lstm_kwargs.copy()
normal_lstm_kwargs.update({
'zero_output_for_mask': self.zero_output_for_mask,
})
if context.executing_eagerly():
device_type = _get_context_device_type()
can_use_gpu = (
# Either user specified GPU or unspecified but GPU is available.
(device_type == _GPU_DEVICE_NAME or
(device_type is None and config.list_logical_devices('GPU'))) and
(mask is None or
is_cudnn_supported_inputs(mask, self.time_major)))
# Under eager context, check the device placement and prefer the
# GPU implementation when GPU is available.
if can_use_gpu:
last_output, outputs, new_h, new_c, runtime = gpu_lstm(
**gpu_lstm_kwargs)
else:
last_output, outputs, new_h, new_c, runtime = standard_lstm(
**normal_lstm_kwargs)
else:
(last_output, outputs, new_h, new_c,
runtime) = lstm_with_backend_selection(**normal_lstm_kwargs)
states = [new_h, new_c]
if self.stateful:
updates = [
state_ops.assign(self_state, state)
for self_state, state in zip(self.states, states)
]
self.add_update(updates)
if self.return_sequences:
output = K.maybe_convert_to_ragged(is_ragged_input, outputs, row_lengths)
else:
output = last_output
if self.return_state:
return [output] + list(states)
elif self.return_runtime:
return output, runtime
else:
return output
def _canonical_to_params(weights, biases, shape, transpose_weights=False):
"""Utility function convert variable to CuDNN compatible parameter.
Note that Keras weights for kernels are different from the CuDNN format. Eg.:
```
Keras CuDNN
[[0, 1, 2], <---> [[0, 2, 4],
[3, 4, 5]] [1, 3, 5]]
```
If the input weights need to be in a unified format, then set
`transpose_weights=True` to convert the weights.
Args:
weights: list of weights for the individual kernels and recurrent kernels.
biases: list of biases for individual gate.
shape: the shape for the converted variables that will be feed to CuDNN.
transpose_weights: boolean, whether to transpose the weights.
Returns:
The converted weights that can be feed to CuDNN ops as param.
"""
def convert(w):
return array_ops.transpose(w) if transpose_weights else w
weights = [array_ops.reshape(convert(x), shape) for x in weights]
biases = [array_ops.reshape(x, shape) for x in biases]
return array_ops.concat(weights + biases, axis=0)
def standard_lstm(inputs, init_h, init_c, kernel, recurrent_kernel, bias,
mask, time_major, go_backwards, sequence_lengths,
zero_output_for_mask):
"""LSTM with standard kernel implementation.
This implementation can be run on all types for hardware.
This implementation lifts out all the layer weights and make them function
parameters. It has same number of tensor input params as the CuDNN
counterpart. The RNN step logic has been simplified, eg dropout and mask is
removed since CuDNN implementation does not support that.
Note that the first half of the bias tensor should be ignored by this impl.
The CuDNN impl need an extra set of input gate bias. In order to make the both
function take same shape of parameter, that extra set of bias is also feed
here.
Args:
inputs: input tensor of LSTM layer.
init_h: initial state tensor for the cell output.
init_c: initial state tensor for the cell hidden state.
kernel: weights for cell kernel.
recurrent_kernel: weights for cell recurrent kernel.
bias: weights for cell kernel bias and recurrent bias. Only recurrent bias
is used in this case.
mask: Boolean tensor for mask out the steps within sequence.
time_major: boolean, whether the inputs are in the format of
[time, batch, feature] or [batch, time, feature].
go_backwards: Boolean (default False). If True, process the input sequence
backwards and return the reversed sequence.
sequence_lengths: The lengths of all sequences coming from a variable length
input, such as ragged tensors. If the input has a fixed timestep size,
this should be None.
zero_output_for_mask: Boolean, whether to output zero for masked timestep.
Returns:
last_output: output tensor for the last timestep, which has shape
[batch, units].
outputs: output tensor for all timesteps, which has shape
[batch, time, units].
state_0: the cell output, which has same shape as init_h.
state_1: the cell hidden state, which has same shape as init_c.
runtime: constant string tensor which indicate real runtime hardware. This
value is for testing purpose and should be used by user.
"""
input_shape = K.int_shape(inputs)
timesteps = input_shape[0] if time_major else input_shape[1]
def step(cell_inputs, cell_states):
"""Step function that will be used by Keras RNN backend."""
h_tm1 = cell_states[0] # previous memory state
c_tm1 = cell_states[1] # previous carry state
z = K.dot(cell_inputs, kernel)
z += K.dot(h_tm1, recurrent_kernel)
z = K.bias_add(z, bias)
z0, z1, z2, z3 = array_ops.split(z, 4, axis=1)
i = nn.sigmoid(z0)
f = nn.sigmoid(z1)
c = f * c_tm1 + i * nn.tanh(z2)
o = nn.sigmoid(z3)
h = o * nn.tanh(c)
return h, [h, c]
last_output, outputs, new_states = K.rnn(
step,
inputs, [init_h, init_c],
constants=None,
unroll=False,
time_major=time_major,
mask=mask,
go_backwards=go_backwards,
input_length=(sequence_lengths
if sequence_lengths is not None else timesteps),
zero_output_for_mask=zero_output_for_mask)
return (last_output, outputs, new_states[0], new_states[1],
_runtime(_RUNTIME_CPU))
def gpu_lstm(inputs, init_h, init_c, kernel, recurrent_kernel, bias, mask,
time_major, go_backwards, sequence_lengths):
"""LSTM with either CuDNN or ROCm implementation which is only available for GPU.
Note that currently only right padded data is supported, or the result will be
polluted by the unmasked data which should be filtered.
Args:
inputs: Input tensor of LSTM layer.
init_h: Initial state tensor for the cell output.
init_c: Initial state tensor for the cell hidden state.
kernel: Weights for cell kernel.
recurrent_kernel: Weights for cell recurrent kernel.
bias: Weights for cell kernel bias and recurrent bias. Only recurrent bias
is used in this case.
mask: Boolean tensor for mask out the steps within sequence.
time_major: Boolean, whether the inputs are in the format of [time, batch,
feature] or [batch, time, feature].
go_backwards: Boolean (default False). If True, process the input sequence
backwards and return the reversed sequence.
sequence_lengths: The lengths of all sequences coming from a variable length
input, such as ragged tensors. If the input has a fixed timestep size,
this should be None.
Returns:
last_output: Output tensor for the last timestep, which has shape
[batch, units].
outputs: Output tensor for all timesteps, which has shape
[batch, time, units].
state_0: The cell output, which has same shape as init_h.
state_1: The cell hidden state, which has same shape as init_c.
runtime: Constant string tensor which indicate real runtime hardware. This
value is for testing purpose and should not be used by user.
"""
if not time_major and mask is None:
inputs = array_ops.transpose(inputs, perm=(1, 0, 2))
seq_axis, batch_axis = (0, 1)
else:
seq_axis, batch_axis = (0, 1) if time_major else (1, 0)
# For init_h and init_c, cuDNN expects one more dim of num_layers before or
# after batch dim for time major or batch major inputs respectively
init_h = array_ops.expand_dims(init_h, axis=seq_axis)
init_c = array_ops.expand_dims(init_c, axis=seq_axis)
weights = array_ops.split(kernel, 4, axis=1)
weights += array_ops.split(recurrent_kernel, 4, axis=1)
# CuDNN has an extra set of bias for inputs, we disable them (setting to 0),
# so that mathematically it is same as the canonical LSTM implementation.
full_bias = array_ops.concat((array_ops.zeros_like(bias), bias), 0)
if build_info.build_info['is_rocm_build']:
# ROCm MIOpen's weight sequence for LSTM is different from both canonical
# and Cudnn format
# MIOpen: [i, f, o, c] Cudnn/Canonical: [i, f, c, o]
# i is input gate weights.
# f is forget gate weights.
# o is output gate weights.
# c is cell gate weights.
weights = [weights[x] for x in (0, 1, 3, 2, 4, 5, 7, 6)]
# full_bias is a tensor of shape (8*n,)
full_bias = array_ops.split(full_bias, 8, axis=0)
full_bias = [full_bias[x] for x in (0, 1, 3, 2, 4, 5, 7, 6)]
params = _canonical_to_params(
weights=weights,
biases=array_ops.split(full_bias, 8),
shape=constant_op.constant([-1]),
transpose_weights=True)
if mask is not None:
sequence_lengths = calculate_sequence_by_mask(mask, time_major)
if sequence_lengths is not None:
if go_backwards:
# Three reversals are required. E.g.,
# normal input = [1, 2, 3, 0, 0] # where 0 need to be masked
# reversed_input_to_cudnn = [3, 2, 1, 0, 0]
# output_from_cudnn = [6, 5, 4, 0, 0]
# expected_output = [0, 0, 6, 5 ,4]
inputs = array_ops.reverse_sequence_v2(
inputs, sequence_lengths, seq_axis=seq_axis, batch_axis=batch_axis)
outputs, h, c, _, _ = gen_cudnn_rnn_ops.cudnn_rnnv3(
inputs,
input_h=init_h,
input_c=init_c,
params=params,
is_training=True,
rnn_mode='lstm',
sequence_lengths=sequence_lengths,
time_major=time_major)
if go_backwards:
outputs = array_ops.reverse_sequence_v2(
outputs, sequence_lengths, seq_axis=seq_axis, batch_axis=batch_axis)
outputs = array_ops.reverse(outputs, axis=[seq_axis])
else:
# # Fill the array with shape [batch] with value of max timesteps.
# sequence_length = array_ops.fill([array_ops.shape(inputs)[1]],
# array_ops.shape(inputs)[0])
if go_backwards:
# Reverse axis 0 since the input is already convert to time major.
inputs = array_ops.reverse(inputs, axis=[0])
outputs, h, c, _ = gen_cudnn_rnn_ops.cudnn_rnn(
inputs, input_h=init_h, input_c=init_c, params=params, is_training=True,
rnn_mode='lstm')
last_output = outputs[-1]
if not time_major and mask is None:
outputs = array_ops.transpose(outputs, perm=[1, 0, 2])
h = array_ops.squeeze(h, axis=seq_axis)
c = array_ops.squeeze(c, axis=seq_axis)
# In the case of variable length input, the cudnn kernel will fill zeros for
# the output, whereas the default keras behavior is to bring over the previous
# output for t-1, so that in the return_sequence=False case, user can quickly
# get the final effect output instead just 0s at the last timestep.
# In order to mimic the default keras behavior, we copy the final h state as
# the last_output, since it is numerically same as the output.
if mask is not None:
last_output = h
return last_output, outputs, h, c, _runtime(_RUNTIME_GPU)
def lstm_with_backend_selection(inputs, init_h, init_c, kernel,
recurrent_kernel, bias, mask, time_major,
go_backwards, sequence_lengths,
zero_output_for_mask):
"""Call the LSTM with optimized backend kernel selection.
Under the hood, this function will create two TF function, one with the most
generic kernel and can run on all device condition, and the second one with
CuDNN specific kernel, which can only run on GPU.
The first function will be called with normal_lstm_params, while the second
function is not called, but only registered in the graph. The Grappler will
do the proper graph rewrite and swap the optimized TF function based on the
device placement.
Args:
inputs: Input tensor of LSTM layer.
init_h: Initial state tensor for the cell output.
init_c: Initial state tensor for the cell hidden state.
kernel: Weights for cell kernel.
recurrent_kernel: Weights for cell recurrent kernel.
bias: Weights for cell kernel bias and recurrent bias. Only recurrent bias
is used in this case.
mask: Boolean tensor for mask out the steps within sequence.
time_major: Boolean, whether the inputs are in the format of
[time, batch, feature] or [batch, time, feature].
go_backwards: Boolean (default False). If True, process the input sequence
backwards and return the reversed sequence.
sequence_lengths: The lengths of all sequences coming from a variable length
input, such as ragged tensors. If the input has a fixed timestep size,
this should be None.
zero_output_for_mask: Boolean, whether to output zero for masked timestep.
Returns:
List of output tensors, same as standard_lstm.
"""
params = {
'inputs': inputs,
'init_h': init_h,
'init_c': init_c,
'kernel': kernel,
'recurrent_kernel': recurrent_kernel,
'bias': bias,
'mask': mask,
'time_major': time_major,
'go_backwards': go_backwards,
'sequence_lengths': sequence_lengths,
'zero_output_for_mask': zero_output_for_mask,
}
def gpu_lstm_with_fallback(inputs, init_h, init_c, kernel, recurrent_kernel,
bias, mask, time_major, go_backwards,
sequence_lengths, zero_output_for_mask):
"""Use CuDNN kernel when mask is none or strictly right padded."""
if mask is None:
return gpu_lstm(
inputs=inputs,
init_h=init_h,
init_c=init_c,
kernel=kernel,
recurrent_kernel=recurrent_kernel,
bias=bias,
mask=mask,
time_major=time_major,
go_backwards=go_backwards,
sequence_lengths=sequence_lengths)
def cudnn_lstm_fn():
return gpu_lstm(
inputs=inputs,
init_h=init_h,
init_c=init_c,
kernel=kernel,
recurrent_kernel=recurrent_kernel,
bias=bias,
mask=mask,
time_major=time_major,
go_backwards=go_backwards,
sequence_lengths=sequence_lengths)
def stardard_lstm_fn():
return standard_lstm(
inputs=inputs,
init_h=init_h,
init_c=init_c,
kernel=kernel,
recurrent_kernel=recurrent_kernel,
bias=bias,
mask=mask,
time_major=time_major,
go_backwards=go_backwards,
sequence_lengths=sequence_lengths,
zero_output_for_mask=zero_output_for_mask)
return control_flow_ops.cond(
is_cudnn_supported_inputs(mask, time_major),
true_fn=cudnn_lstm_fn,
false_fn=stardard_lstm_fn)
if _use_new_code():
# Chooses the implementation dynamicly based on the running device.
(last_output, outputs, new_h, new_c,
runtime) = control_flow_ops.execute_fn_for_device(
{
_CPU_DEVICE_NAME: lambda: standard_lstm(**params),
_GPU_DEVICE_NAME: lambda: gpu_lstm_with_fallback(**params)
}, lambda: standard_lstm(**params))
else:
# Each time a `tf.function` is called, we will give it a unique
# identifiable API name, so that Grappler won't get confused when it
# sees multiple LSTM layers added into same graph, and it will be able
# to pair up the different implementations across them.
api_name = 'lstm_' + str(uuid.uuid4())
supportive_attribute = {
'time_major': time_major,
'go_backwards': go_backwards,
}
defun_standard_lstm = _generate_defun_backend(api_name, _CPU_DEVICE_NAME,
standard_lstm,
supportive_attribute)
defun_gpu_lstm = _generate_defun_backend(api_name, _GPU_DEVICE_NAME,
gpu_lstm_with_fallback,
supportive_attribute)
# Call the normal LSTM impl and register the CuDNN impl function. The
# grappler will kick in during session execution to optimize the graph.
last_output, outputs, new_h, new_c, runtime = defun_standard_lstm(**params)
function.register(defun_gpu_lstm, **params)
return last_output, outputs, new_h, new_c, runtime
def is_sequence_right_padded(mask):
"""Check the mask tensor and see if it right padded.
For CuDNN kernel, it uses the sequence length param to skip the tailing
timestep. If the data is left padded, or not a strict right padding (has
masked value in the middle of the sequence), then CuDNN kernel won't be work
properly in those cases.
Left padded data: [[False, False, True, True, True]].
Right padded data: [[True, True, True, False, False]].
Mixture of mask/unmasked data: [[True, False, True, False, False]].
Note that for the mixed data example above, the actually data RNN should see
are those 2 Trues (index 0 and 2), the index 1 False should be ignored and not
pollute the internal states.
Args:
mask: the Boolean tensor with shape [batch, timestep]
Returns:
boolean scalar tensor, whether the mask is strictly right padded.
"""
max_seq_length = array_ops.shape(mask)[1]
count_of_true = math_ops.reduce_sum(math_ops.cast(mask, dtypes.int32), axis=1)
right_padded_mask = array_ops.sequence_mask(
count_of_true, maxlen=max_seq_length)
return math_ops.reduce_all(math_ops.equal(mask, right_padded_mask))
def has_fully_masked_sequence(mask):
# See https://github.com/tensorflow/tensorflow/issues/33148 for more details.
# Cudnn kernel will error out if the input sequence contains any fully masked
# data. We walk around this issue by rerouting the computation to standard
# kernel, until the issue on cudnn side has been fixed.
# For a fully masked sequence, it will contain all Falses. To make it easy to
# check, we inverse the boolean, check if any of the seqence has all True.
return math_ops.reduce_any(
math_ops.reduce_all(
math_ops.logical_not(mask),
axis=1))
def is_cudnn_supported_inputs(mask, time_major):
if time_major:
mask = array_ops.transpose(mask)
return math_ops.logical_and(
is_sequence_right_padded(mask),
math_ops.logical_not(has_fully_masked_sequence(mask)))
def calculate_sequence_by_mask(mask, time_major):
"""Calculate the sequence length tensor (1-D) based on the masking tensor.
The masking tensor is a 2D boolean tensor with shape [batch, timestep]. For
any timestep that should be masked, the corresponding field will be False.
Consider the following example:
a = [[True, True, False, False],
[True, True, True, False]]
It is a (2, 4) tensor, and the corresponding sequence length result should be
1D tensor with value [2, 3]. Note that the masking tensor must be right
padded that could be checked by, e.g., `is_sequence_right_padded()`.
Args:
mask: Boolean tensor with shape [batch, timestep] or [timestep, batch] if
time_major=True.
time_major: Boolean, which indicates whether the mask is time major or batch
major.
Returns:
sequence_length: 1D int32 tensor.
"""
timestep_index = 0 if time_major else 1
return math_ops.reduce_sum(math_ops.cast(mask, dtypes.int32),
axis=timestep_index)
def _generate_defun_backend(unique_api_name, preferred_device, func,
supportive_attributes):
function_attributes = {
_FUNCTION_API_NAME_ATTRIBUTE: unique_api_name,
_FUNCTION_DEVICE_ATTRIBUTE: preferred_device,
}
function_attributes.update(supportive_attributes)
return function.defun_with_attributes(func=func,
attributes=function_attributes,
autograph=False)
def _get_context_device_type():
"""Parse the current context and return the device type, eg CPU/GPU."""
current_device = context.context().device_name
if current_device is None:
return None
return device.DeviceSpec.from_string(current_device).device_type
def _runtime(runtime_name):
with ops.device('/cpu:0'):
return constant_op.constant(
runtime_name, dtype=dtypes.float32, name='runtime')
def _read_variable_value(v):
"""Read the value of a variable if it is variable."""
if isinstance(v, variables.Variable):
return v.read_value()
return v
|
karllessard/tensorflow
|
tensorflow/python/keras/layers/recurrent_v2.py
|
Python
|
apache-2.0
| 73,776
|
from django import template
register = template.Library()
@register.simple_tag
def fa_icon(name, title=None):
tmp = ('title="%s"' % title) if title else ""
return "<i class=\"fa fa-%s\" aria-hidden=\"true\" %s></i>" % (name,tmp)
|
unitycoders/uc-django-site
|
bs_themetools/templatetags/themetools.py
|
Python
|
gpl-3.0
| 239
|
#!/usr/bin/env bash
_=''''
exec $(dirname $0)/../../wrap python $0 "${@}"
' '''
#!/usr/bin/env python
import zipfile
import os
from distutils.dir_util import remove_tree
from glob import glob
import argparse
lib_infos = (('Cesium-1.23.zip', None, 'cesium'), #https://cesiumjs.org/downloads.html
('jQuery-File-Upload-9.12.5.zip',
'jQuery-File-Upload-9.12.5', 'fileUpload'), #https://github.com/blueimp/jQuery-File-Upload/releases
('jquery-ui-1.11.4.zip', 'jquery-ui-1.11.4', 'jquery-ui'), #https://jqueryui.com/download/all/
('jquery-ui-themes-1.11.4.zip', 'jquery-ui-themes-1.11.4', 'jquery-ui-themes'), #https://jqueryui.com/download/all/
('potree-1.3-patch1.zip', 'potree', 'potree'), #./just build_potree
('v3.17.1-dist.zip', 'v3.17.1-dist', 'OpenLayers3')) #http://openlayers.org/download/
def unzip(filename, base_dir):
print 'Unzipping', filename, base_dir
with zipfile.ZipFile(filename, 'r') as z:
z.extractall(base_dir)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--remove', default=False, action='store_true',
help='Remove deployed library directories rather than unzip them')
return parser.parse_args()
def main(base_dir):
opts = parse_args()
#for old_dir in glob(os.path.join(base_dir, '*/')):
# if os.path.exists(old_dir):
# remove_tree(old_dir)
# print "Removing", old_dir
for (zip_filename, unzip_dir, dest_dir) in lib_infos:
zip_filename = os.path.join(base_dir, zip_filename)
assert(dest_dir.strip(' /\\.') != '')
dest_dir = os.path.join(base_dir, dest_dir)
#Remove the old
if os.path.exists(dest_dir):
print "Removing", dest_dir
remove_tree(dest_dir)
if not opts.remove:
if unzip_dir is not None:
unzip(zip_filename, base_dir)
unzip_dir = os.path.join(base_dir, unzip_dir)
print "Moving %s to %s" % (unzip_dir, dest_dir)
os.rename(unzip_dir, dest_dir)
else:
os.makedirs(dest_dir)
unzip(zip_filename, dest_dir)
if __name__=='__main__':
main(os.environ['VIP_DJANGO_STATIC_COMMON'])
|
ngageoint/voxel-globe
|
voxel_globe/static_common/deploy.py
|
Python
|
mit
| 2,158
|
#!/usr/bin/env python3
import subprocess
import os
import time
import signal
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('AppIndicator3', '0.1')
from gi.repository import Gtk, AppIndicator3, GObject
from threading import Thread
key = ["org.gnome.settings-daemon.plugins.power",
"lid-close-ac-action", "lid-close-battery-action"]
currpath = os.path.dirname(os.path.realpath(__file__))
def runs():
# The test True/False
return subprocess.check_output([
"gsettings", "get", key[0], key[1]
]).decode("utf-8").strip() == "'suspend'"
class Indicator():
def __init__(self):
self.app = 'show_proc'
iconpath = currpath+"/nocolor.png"
self.indicator = AppIndicator3.Indicator.new(
self.app, iconpath,
AppIndicator3.IndicatorCategory.OTHER)
self.indicator.set_status(AppIndicator3.IndicatorStatus.ACTIVE)
self.indicator.set_menu(self.create_menu())
self.update = Thread(target=self.check_runs)
# daemonize the thread to make the indicator stopable
self.update.setDaemon(True)
self.update.start()
def check_runs(self):
# the function (thread), checking for the process to run
runs1 = None
while True:
time.sleep(1)
runs2 = runs()
# if there is a change in state, update the icon
if runs1 != runs2:
if runs2:
# set the icon to show
GObject.idle_add(
self.indicator.set_icon,
currpath+"/nocolor.png",
priority=GObject.PRIORITY_DEFAULT
)
else:
# set the icon to hide
GObject.idle_add(
self.indicator.set_icon,
currpath+"/green.png",
priority=GObject.PRIORITY_DEFAULT
)
runs1 = runs2
def create_menu(self):
menu = Gtk.Menu()
# quit
item_quit = Gtk.MenuItem('Quit')
item_quit.connect('activate', self.stop)
menu.append(item_quit)
menu.show_all()
return menu
def stop(self, source):
Gtk.main_quit()
Indicator()
GObject.threads_init()
signal.signal(signal.SIGINT, signal.SIG_DFL)
Gtk.main()
|
orschiro/dslli
|
show_state.py
|
Python
|
gpl-3.0
| 2,393
|
from __future__ import absolute_import, division, print_function, unicode_literals
from amaascore.assets.asset import Asset
class Sukuk(Asset):
def __init__(self, asset_manager_id, asset_id, maturity_date, asset_issuer_id=None,
asset_status='Active', roll_price=True, issue_date=None, display_name='', description='',
country_id=None, venue_id=None, currency=None,
comments=None, links=None, references=None, *args, **kwargs):
if not hasattr(self, 'asset_class'): # A more specific child class may have already set this
self.asset_class = 'Sukuk'
self.maturity_date = maturity_date
super(Sukuk, self).__init__(asset_manager_id=asset_manager_id, asset_id=asset_id, fungible=True,
asset_issuer_id=asset_issuer_id, asset_status=asset_status,
roll_price=roll_price, display_name=display_name, currency=currency,
description=description, country_id=country_id, venue_id=venue_id,
comments=comments, links=links, references=references,
issue_date=issue_date,
*args, **kwargs)
|
amaas-fintech/amaas-core-sdk-python
|
amaascore/assets/sukuk.py
|
Python
|
apache-2.0
| 1,282
|
import pcapture2 as p
import dbutil as db
import cal_header as ch
import stateframe as stf
import numpy as np
def DCM_master_attn_cal(update=False):
''' New version of this command, which uses the power values in
the 10gbe packet headers instead of the very slow measurement
of the ADC levels themselves. This version only takes about 8 s!
If update is True, it writes the results to the SQL database.
Returns the DCM_master_table in the form of lines of text
strings, with labels (handy for viewing).
'''
pwr = np.zeros((50,8,4),'int')
# Capture on eth2 interface
command = 'tcpdump -i eth2 -c 155000 -w /home/user/Python/dcm2.pcap -s 1000'
p.sendcmd(command)
# Capture on eth3 interface
command = 'tcpdump -i eth3 -c 155000 -w /home/user/Python/dcm3.pcap -s 1000'
p.sendcmd(command)
headers = p.list_header('/home/user/Python/dcm2.pcap')
for line in headers:
try:
j, id, p1,p2,p3,p4 = np.array(map(int,line.split()))[[0,3,6,7,8,9]]
pwr[j,id] = (p1, p2, p3, p4)
except:
# This is to skip the non-data header lines in the list
pass
headers = p.list_header('/home/user/Python/dcm3.pcap')
for line in headers:
try:
j, id, p1,p2,p3,p4 = np.array(map(int,line.split()))[[0,3,6,7,8,9]]
pwr[j,id] = (p1, p2, p3, p4)
except:
# This is to skip the non-data header lines in the list
pass
# Reshape to (slot, nant, npol)
pwr.shape = (50,16,2)
# Read current frequency sequence from database
cursor = db.get_cursor()
query = 'select top 50 FSeqList from hV37_vD50 order by Timestamp desc'
fseq, msg = db.do_query(cursor, query)
if msg == 'Success':
fseqlist = fseq['FSeqList'][::-1] # Reverse the order
bandlist = ((np.array(fseqlist)-0.44)*2).astype(int)
cursor.close()
# Read current DCM_master_table from database
xml, buf = ch.read_cal(2)
orig_table = stf.extract(buf,xml['Attenuation'])
# Order pwr values according to bandlist, taking median of any repeated values
new_pwr = np.zeros((34,16,2))
for i in range(34):
idx, = np.where(bandlist-1 == i)
if len(idx) > 0:
new_pwr[i] = np.median(pwr[idx],0)
new_pwr.shape = (34,32)
# Now determine the change in attenuation needed to achieve a target
# value of 1600. Eliminate last two entries, corresponding to Ant16
attn = np.log10(new_pwr[:,:-2]/1600.)*10.
new_table = (np.clip(orig_table + attn,0,30)/2).astype(int)*2
DCMlines = []
DCMlines.append('# Ant1 Ant2 Ant3 Ant4 Ant5 Ant6 Ant7 Ant8 Ant9 Ant10 Ant11 Ant12 Ant13 Ant14 Ant15')
DCMlines.append('# X Y X Y X Y X Y X Y X Y X Y X Y X Y X Y X Y X Y X Y X Y X Y')
DCMlines.append('# ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -----')
for band in range(1,35):
DCMlines.append('{:2} : {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2}'.format(band,*new_table[band-1]))
if update:
msg = ch.dcm_master_table2sql(DCMlines)
if msg:
print 'Success'
else:
print 'Error writing table to SQL database!'
return DCMlines
if __name__ == "__main__":
import sys
print len(sys.argv)
if len(sys.argv) == 2:
if sys.argv[1] == 'update':
lines = DCM_master_attn_cal(True)
for line in lines:
print line
else:
lines = DCM_master_attn_cal()
for line in lines:
print line
|
dgary50/eovsa
|
dcm_master_attn_cal.py
|
Python
|
gpl-2.0
| 3,790
|
# coding=utf-8
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
# all regexes are case insensitive
normal_regexes = [
('standard_repeat',
# Show.Name.S01E02.S01E03.Source.Quality.Etc-Group
# Show Name - S01E02 - S01E03 - S01E04 - Ep Name
r'''
^(?P<series_name>.+?)[. _-]+ # Show_Name and separator
s(?P<season_num>\d+)[. _-]* # S01 and optional separator
e(?P<ep_num>\d+) # E02 and separator
([. _-]+s(?P=season_num)[. _-]* # S01 and optional separator
e(?P<extra_ep_num>\d+))+ # E03/etc and separator
[. _-]*((?P<extra_info>.+?) # Source_Quality_Etc-
((?<![. _-])(?<!WEB) # Make sure this is really the release group
-(?P<release_group>[^ -]+([. _-]\[.*\])?))?)?$ # Group
'''),
('fov_repeat',
# Show.Name.1x02.1x03.Source.Quality.Etc-Group
# Show Name - 1x02 - 1x03 - 1x04 - Ep Name
r'''
^(?P<series_name>.+?)[. _-]+ # Show_Name and separator
(?P<season_num>\d+)x # 1x
(?P<ep_num>\d+) # 02 and separator
([. _-]+(?P=season_num)x # 1x
(?P<extra_ep_num>\d+))+ # 03/etc and separator
[. _-]*((?P<extra_info>.+?) # Source_Quality_Etc-
((?<![. _-])(?<!WEB) # Make sure this is really the release group
-(?P<release_group>[^ -]+([. _-]\[.*\])?))?)?$ # Group
'''),
('standard',
# Show.Name.S01E02.Source.Quality.Etc-Group
# Show Name - S01E02 - My Ep Name
# Show.Name.S01.E03.My.Ep.Name
# Show.Name.S01E02E03.Source.Quality.Etc-Group
# Show Name - S01E02-03 - My Ep Name
# Show.Name.S01.E02.E03
r'''
^((?P<series_name>.+?)[. _-]+)? # Show_Name and separator
\(?s(?P<season_num>\d+)[. _-]* # S01 and optional separator
e(?P<ep_num>\d+)\)? # E02 and separator
(([. _-]*e|-) # linking e/- char
(?P<extra_ep_num>(?!(1080|720|480)[pi])\d+)(\))?)* # additional E03/etc
([. _,-]+((?P<extra_info>.+?) # Source_Quality_Etc-
((?<![. _-])(?<!WEB) # Make sure this is really the release group
-(?P<release_group>[^ -]+([. _-]\[.*\])?))?)?)?$ # Group
'''),
('newpct',
# American Horror Story - Temporada 4 HDTV x264[Cap.408_409]SPANISH AUDIO -NEWPCT
# American Horror Story - Temporada 4 [HDTV][Cap.408][Espanol Castellano]
# American Horror Story - Temporada 4 HDTV x264[Cap.408]SPANISH AUDIO –NEWPCT)
r'''
(?P<series_name>.+?).-.+\d{1,2}[ ,.] # Show name: American Horror Story
(?P<extra_info>.+)\[Cap\. # Quality: HDTV x264, [HDTV], HDTV x264
(?P<season_num>\d{1,2}) # Season Number: 4
(?P<ep_num>\d{2}) # Episode Number: 08
((_\d{1,2}(?P<extra_ep_num>\d{2}))|.*\]) # Episode number2: 09
'''),
('fov',
# Show_Name.1x02.Source_Quality_Etc-Group
# Show Name - 1x02 - My Ep Name
# Show_Name.1x02x03x04.Source_Quality_Etc-Group
# Show Name - 1x02-03-04 - My Ep Name
r'''
^((?P<series_name>.+?)[\[. _-]+)? # Show_Name and separator
(?P<season_num>\d+)x # 1x
(?P<ep_num>\d+) # 02 and separator
(([. _-]*x|-) # linking x/- char
(?P<extra_ep_num>
(?!(1080|720|480)[pi])(?!(?<=x)264) # ignore obviously wrong multi-eps
\d+))* # additional x03/etc
[\]. _-]*((?P<extra_info>.+?) # Source_Quality_Etc-
((?<![. _-])(?<!WEB) # Make sure this is really the release group
-(?P<release_group>[^ -]+([. _-]\[.*\])?))?)?$ # Group
'''),
('scene_date_format',
# Show.Name.2010.11.23.Source.Quality.Etc-Group
# Show Name - 2010-11-23 - Ep Name
r'''
^((?P<series_name>.+?)[. _-]+)? # Show_Name and separator
(?P<air_date>(\d+[. _-]\d+[. _-]\d+)|(\d+\w+[. _-]\w+[. _-]\d+))
[. _-]*((?P<extra_info>.+?) # Source_Quality_Etc-
((?<![. _-])(?<!WEB) # Make sure this is really the release group
-(?P<release_group>[^ -]+([. _-]\[.*\])?))?)?$ # Group
'''),
('scene_sports_format',
# Show.Name.100.Event.2010.11.23.Source.Quality.Etc-Group
# Show.Name.2010.11.23.Source.Quality.Etc-Group
# Show Name - 2010-11-23 - Ep Name
r'''
^(?P<series_name>.*?(UEFA|MLB|ESPN|WWE|MMA|UFC|TNA|EPL|NASCAR|NBA|NFL|NHL|NRL|PGA|SUPER LEAGUE|FORMULA|FIFA|NETBALL|MOTOGP).*?)[. _-]+
((?P<series_num>\d{1,3})[. _-]+)?
(?P<air_date>(\d+[. _-]\d+[. _-]\d+)|(\d+\w+[. _-]\w+[. _-]\d+))[. _-]+
((?P<extra_info>.+?)((?<![. _-])
(?<!WEB)-(?P<release_group>[^ -]+([. _-]\[.*\])?))?)?$
'''),
('stupid_with_denotative',
# aaf-sns03e09
# flhd-supernaturals07e02-1080p
r'''
(?P<release_group>.+?)(?<!WEB)-(?P<series_name>\w*)(?<!\d)[\. ]? # aaf-sn
(?!264) # don't count x264
s(?P<season_num>\d{1,2}) # s03
e(?P<ep_num>\d{2})(?:(rp|-(1080p|720p)))?$ # e09
'''),
('stupid',
# tpz-abc102
r'''
(?P<release_group>.+?)(?<!WEB)-(?P<series_name>\w*)(?<!\d)[\. ]? # tpz-abc
(?!264) # don't count x264
(?P<season_num>\d{1,2}) # 1
(?P<ep_num>\d{2})$ # 02
'''),
('verbose',
# Show Name Season 1 Episode 2 Ep Name
r'''
^(?P<series_name>.+?)[. _-]+ # Show Name and separator
(season|series)[. _-]+ # season and separator
(?P<season_num>\d+)[. _-]+ # 1
episode[. _-]+ # episode and separator
(?P<ep_num>\d+)[. _-]+ # 02 and separator
(?P<extra_info>.+)$ # Source_Quality_Etc-
'''),
('season_only',
# Show.Name.S01.Source.Quality.Etc-Group
r'''
^((?P<series_name>.+?)[. _-]+)? # Show_Name and separator
s(eason[. _-])? # S01/Season 01
(?P<season_num>\d+)[. _-]* # S01 and optional separator
[. _-]*((?P<extra_info>.+?) # Source_Quality_Etc-
((?<![. _-])(?<!WEB) # Make sure this is really the release group
-(?P<release_group>[^ -]+([. _-]\[.*\])?))?)?$ # Group
'''),
('no_season_multi_ep',
# Show.Name.E02-03
# Show.Name.E02.2010
r'''
^((?P<series_name>.+?)[. _-]+)? # Show_Name and separator
(e(p(isode)?)?|part|pt)[. _-]? # e, ep, episode, or part
(?P<ep_num>(\d+|(?<!e)[ivx]+)) # first ep num
((([. _-]+(and|&|to)[. _-]+)|-) # and/&/to joiner
(?P<extra_ep_num>(?!(1080|720|480)[pi])(\d+|(?<!e)[ivx]+))[. _-]) # second ep num
([. _-]*(?P<extra_info>.+?) # Source_Quality_Etc-
((?<![. _-])(?<!WEB) # Make sure this is really the release group
-(?P<release_group>[^ -]+([. _-]\[.*\])?))?)?$ # Group
'''),
('no_season_general',
# Show.Name.E23.Test
# Show.Name.Part.3.Source.Quality.Etc-Group
# Show.Name.Part.1.and.Part.2.Blah-Group
r'''
^((?P<series_name>.+?)[. _-]+)? # Show_Name and separator
(e(p(isode)?)?|part|pt)[. _-]? # e, ep, episode, or part
(?P<ep_num>(\d+|((?<!e)[ivx]+(?=[. _-])))) # first ep num
([. _-]+((and|&|to)[. _-]+)? # and/&/to joiner
((e(p(isode)?)?|part|pt)[. _-]?) # e, ep, episode, or part
(?P<extra_ep_num>(?!(1080|720|480)[pi])
(\d+|((?<!e)[ivx]+(?=[. _-]))))[. _-])* # second ep num
([. _-]*(?P<extra_info>.+?) # Source_Quality_Etc-
((?<![. _-])(?<!WEB) # Make sure this is really the release group
-(?P<release_group>[^ -]+([. _-]\[.*\])?))?)?$ # Group
'''),
('bare',
# Show.Name.102.Source.Quality.Etc-Group
r'''
^(?P<series_name>.+?)[. _-]+ # Show_Name and separator
(?P<season_num>\d{1,2}) # 1
(e?) # Optional episode separator
(?P<ep_num>\d{2}) # 02 and separator
([. _-]+(?P<extra_info>(?!\d{3}[. _-]+)[^-]+) # Source_Quality_Etc-
(-(?P<release_group>[^ -]+([. _-]\[.*\])?))?)?$ # Group
'''),
('no_season',
# Show Name - 01 - Ep Name
# 01 - Ep Name
# 01 - Ep Name
r'''
^((?P<series_name>.+?)(?:[. _-]{2,}|[. _]))? # Show_Name and separator
(?P<ep_num>\d{1,3}) # 02
(?:-(?P<extra_ep_num>\d{1,3}))* # -03-04-05 etc
(\s*(?:of)?\s*\d{1,3})? # of joiner (with or without spaces) and series total ep
[. _-]+((?P<extra_info>.+?) # Source_Quality_Etc-
((?<![. _-])(?<!WEB) # Make sure this is really the release group
-(?P<release_group>[^ -]+([. _-]\[.*\])?))?)?$ # Group
'''),
]
anime_regexes = [
('anime_horriblesubs',
# [HorribleSubs] Maria the Virgin Witch - 01 [720p].mkv
r'''
^(?:\[(?P<release_group>HorribleSubs)\][\s\.])
(?:(?P<series_name>.+?)[\s\.]-[\s\.])
(?P<ep_ab_num>((?!(1080|720|480)[pi]))\d{1,3})
(-(?P<extra_ab_ep_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}))?
(?:v(?P<version>[0-9]))?
(?:[\w\.\s]*)
(?:(?:(?:[\[\(])(?P<extra_info>\d{3,4}[xp]?\d{0,4}[\.\w\s-]*)(?:[\]\)]))|(?:\d{3,4}[xp]))
.*?
'''),
('anime_ultimate',
r'''
^(?:\[(?P<release_group>.+?)\][ ._-]*)
(?P<series_name>.+?)[ ._-]+
(?P<ep_ab_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3})
(-(?P<extra_ab_ep_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}))?[ ._-]+?
(?:v(?P<version>[0-9]))?
(?:[\w\.]*)
(?:(?:(?:[\[\(])(?P<extra_info>\d{3,4}[xp]?\d{0,4}[\.\w\s-]*)(?:[\]\)]))|(?:\d{3,4}[xp]))
(?:[ ._]?\[(?P<crc>\w+)\])?
.*?
'''),
('anime_french_fansub',
# [Kaerizaki-Fansub]_One_Piece_727_[VOSTFR][HD_1280x720].mp4
# [Titania-Fansub]_Fairy_Tail_269_[VOSTFR]_[720p]_[1921E00C].mp4
# [ISLAND]One_Piece_726_[VOSTFR]_[V1]_[8bit]_[720p]_[2F7B3FA2].mp4
# Naruto Shippuden 445 VOSTFR par Fansub-Resistance (1280*720) - version MQ
# Dragon Ball Super 015 VOSTFR par Fansub-Resistance (1280x720) - HQ version
# [Mystic.Z-Team].Dragon.Ball.Super.-.épisode.36.VOSTFR.720p
# [Z-Team][DBSuper.pw] Dragon Ball Super - 028 (VOSTFR)(720p AAC)(MP4)
# [SnF] Shokugeki no Souma - 24 VOSTFR [720p][41761A60].mkv
# [Y-F] Ao no Kanata no Four Rhythm - 03 Vostfr HD 8bits
# Phantasy Star Online 2 - The Animation 04 vostfr FHD
# Detective Conan 804 vostfr HD
# Active Raid 04 vostfr [1080p]
# Sekko Boys 04 vostfr [720p]
r'''
^(\[(?P<release_group>.+?)\][ ._-]*)? # Release Group and separator (Optional)
((\[|\().+?(\]|\))[ ._-]*)? # Extra info (Optionnal)
(?P<series_name>.+?)[ ._-]+ # Show_Name and separator
((épisode|episode|Episode)[ ._-]+)? # Sentence for special fansub (Optionnal)
(?P<ep_ab_num>\d{1,3})[ ._-]+ # Episode number and separator
(((\[|\())?(VOSTFR|vostfr|Vostfr|VostFR|vostFR)((\]|\)))?([ ._-])*)+ # Subtitle Language and separator
(par Fansub-Resistance)? # Sentence for special fansub (Optionnal)
(\[((v|V)(?P<version>[0-9]))\]([ ._-])*)? # Version and separator (Optional)
((\[(8|10)(Bits|bits|Bit|bit)\])?([ ._-])*)? # Colour resolution and separator (Optional)
((\[|\()((FHD|HD|SD)*([ ._-])*((?P<extra_info>\d{3,4}[xp*]?\d{0,4}[\.\w\s-]*)))(\]|\)))? # Source_Quality_Etc-
([ ._-]*\[(?P<crc>\w{8})\])? # CRC (Optional)
.* # Separator and EOL
'''),
('anime_standard',
# [Group Name] Show Name.13-14
# [Group Name] Show Name - 13-14
# Show Name 13-14
# [Group Name] Show Name.13
# [Group Name] Show Name - 13
# Show Name 13
r'''
^(\[(?P<release_group>.+?)\][ ._-]*)? # Release Group and separator
(?P<series_name>.+?)[ ._-]+ # Show_Name and separator
(?P<ep_ab_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}) # E01
(-(?P<extra_ab_ep_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}))? # E02
(v(?P<version>[0-9]))? # version
[ ._-]+\[(?P<extra_info>\d{3,4}[xp]?\d{0,4}[\.\w\s-]*)\] # Source_Quality_Etc-
(\[(?P<crc>\w{8})\])? # CRC
.*? # Separator and EOL
'''),
('anime_standard_round',
# [Stratos-Subs]_Infinite_Stratos_-_12_(1280x720_H.264_AAC)_[379759DB]
# [ShinBunBu-Subs] Bleach - 02-03 (CX 1280x720 x264 AAC)
r'''
^(\[(?P<release_group>.+?)\][ ._-]*)? # Release Group and separator
(?P<series_name>.+?)[ ._-]+ # Show_Name and separator
(?P<ep_ab_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}) # E01
(-(?P<extra_ab_ep_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}))? # E02
(v(?P<version>[0-9]))? # version
[ ._-]+\((?P<extra_info>(CX[ ._-]?)?\d{3,4}[xp]?\d{0,4}[\.\w\s-]*)\) # Source_Quality_Etc-
(\[(?P<crc>\w{8})\])? # CRC
.*? # Separator and EOL
'''),
('anime_slash',
# [SGKK] Bleach 312v1 [720p/MKV]
r'''
^(\[(?P<release_group>.+?)\][ ._-]*)? # Release Group and separator
(?P<series_name>.+?)[ ._-]+ # Show_Name and separator
(?P<ep_ab_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}) # E01
(-(?P<extra_ab_ep_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}))? # E02
(v(?P<version>[0-9]))? # version
[ ._-]+\[(?P<extra_info>\d{3,4}p) # Source_Quality_Etc-
(\[(?P<crc>\w{8})\])? # CRC
.*? # Separator and EOL
'''),
('anime_standard_codec',
# [Ayako]_Infinite_Stratos_-_IS_-_07_[H264][720p][EB7838FC]
# [Ayako] Infinite Stratos - IS - 07v2 [H264][720p][44419534]
# [Ayako-Shikkaku] Oniichan no Koto Nanka Zenzen Suki Janain Dakara ne - 10 [LQ][h264][720p] [8853B21C]
r'''
^(\[(?P<release_group>.+?)\][ ._-]*)? # Release Group and separator
(?P<series_name>.+?)[ ._]* # Show_Name and separator
([ ._-]+-[ ._-]+[A-Z]+[ ._-]+)?[ ._-]+ # funny stuff, this is sooo nuts ! this will kick me in the butt one day
(?P<ep_ab_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}) # E01
(-(?P<extra_ab_ep_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}))? # E02
(v(?P<version>[0-9]))? # version
([ ._-](\[\w{1,2}\])?\[[a-z][.]?\w{2,4}\])? #codec
[ ._-]*\[(?P<extra_info>(\d{3,4}[xp]?\d{0,4})?[\.\w\s-]*)\] # Source_Quality_Etc-
(\[(?P<crc>\w{8})\])?
.*? # Separator and EOL
'''),
('anime_codec_crc',
r'''
^(?:\[(?P<release_group>.*?)\][ ._-]*)?
(?:(?P<series_name>.*?)[ ._-]*)?
(?:(?P<ep_ab_num>(((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}))[ ._-]*).+?
(?:\[(?P<codec>.*?)\][ ._-]*)
(?:\[(?P<crc>\w{8})\])?
.*?
'''),
('anime_SxxExx',
# Show.Name.S01E02.Source.Quality.Etc-Group
# Show Name - S01E02 - My Ep Name
# Show.Name.S01.E03.My.Ep.Name
# Show.Name.S01E02E03.Source.Quality.Etc-Group
# Show Name - S01E02-03 - My Ep Name
# Show.Name.S01.E02.E03
r'''
^((?P<series_name>.+?)[. _-]+)? # Show_Name and separator
(\()?s(?P<season_num>\d+)[. _-]* # S01 and optional separator
e(?P<ep_num>\d+)(\))? # E02 and separator
(([. _-]*e|-) # linking e/- char
(?P<extra_ep_num>(?!(1080|720|480)[pi])\d+)(\))?)* # additional E03/etc
[. _-]*((?P<extra_info>.+?) # Source_Quality_Etc-
((?<![. _-])(?<!WEB) # Make sure this is really the release group
-(?P<release_group>[^ -]+([. _-]\[.*\])?))?)?$ # Group
'''),
('anime_and_normal',
# Bleach - s16e03-04 - 313-314
# Bleach.s16e03-04.313-314
# Bleach s16e03e04 313-314
r'''
^(?P<series_name>.+?)[ ._-]+ # start of string and series name and non optinal separator
[sS](?P<season_num>\d+)[. _-]* # S01 and optional separator
[eE](?P<ep_num>\d+) # epipisode E02
(([. _-]*e|-) # linking e/- char
(?P<extra_ep_num>\d+))* # additional E03/etc
([ ._-]{2,}|[ ._]+) # if "-" is used to separate at least something else has to be there(->{2,}) "s16e03-04-313-314" would make sens any way
((?P<ep_ab_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}))? # absolute number
(-(?P<extra_ab_ep_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}))? # "-" as separator and anditional absolute number, all optinal
(v(?P<version>[0-9]))? # the version e.g. "v2"
.*?
'''),
('anime_and_normal_x',
# Bleach - s16e03-04 - 313-314
# Bleach.s16e03-04.313-314
# Bleach s16e03e04 313-314
r'''
^(?P<series_name>.+?)[ ._-]+ # start of string and series name and non optinal separator
(?P<season_num>\d+)[. _-]* # S01 and optional separator
[xX](?P<ep_num>\d+) # epipisode E02
(([. _-]*e|-) # linking e/- char
(?P<extra_ep_num>\d+))* # additional E03/etc
([ ._-]{2,}|[ ._]+) # if "-" is used to separate at least something else has to be there(->{2,}) "s16e03-04-313-314" would make sens any way
((?P<ep_ab_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}))? # absolute number
(-(?P<extra_ab_ep_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}))? # "-" as separator and anditional absolute number, all optinal
(v(?P<version>[0-9]))? # the version e.g. "v2"
.*?
'''),
('anime_and_normal_reverse',
# Bleach - 313-314 - s16e03-04
r'''
^(?P<series_name>.+?)[ ._-]+ # start of string and series name and non optinal separator
(?P<ep_ab_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}) # absolute number
(-(?P<extra_ab_ep_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}))? # "-" as separator and anditional absolute number, all optinal
(v(?P<version>[0-9]))? # the version e.g. "v2"
([ ._-]{2,}|[ ._]+) # if "-" is used to separate at least something else has to be there(->{2,}) "s16e03-04-313-314" would make sens any way
[sS](?P<season_num>\d+)[. _-]* # S01 and optional separator
[eE](?P<ep_num>\d+) # epipisode E02
(([. _-]*e|-) # linking e/- char
(?P<extra_ep_num>\d+))* # additional E03/etc
.*?
'''),
('anime_and_normal_front',
# 165.Naruto Shippuuden.s08e014
r'''
^(?P<ep_ab_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}) # start of string and absolute number
(-(?P<extra_ab_ep_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}))? # "-" as separator and anditional absolute number, all optinal
(v(?P<version>[0-9]))?[ ._-]+ # the version e.g. "v2"
(?P<series_name>.+?)[ ._-]+
[sS](?P<season_num>\d+)[. _-]* # S01 and optional separator
[eE](?P<ep_num>\d+)
(([. _-]*e|-) # linking e/- char
(?P<extra_ep_num>\d+))* # additional E03/etc
.*?
'''),
('anime_ep_name',
r'''
^(?:\[(?P<release_group>.+?)\][ ._-]*)
(?P<series_name>.+?)[ ._-]+
(?P<ep_ab_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3})
(-(?P<extra_ab_ep_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}))?[ ._-]*?
(?:v(?P<version>[0-9])[ ._-]+?)?
(?:.+?[ ._-]+?)?
\[(?P<extra_info>\w+)\][ ._-]?
(?:\[(?P<crc>\w{8})\])?
.*?
'''),
('anime_WarB3asT',
# 003. Show Name - Ep Name.ext
# 003-004. Show Name - Ep Name.ext
r'''
^(?P<ep_ab_num>\d{3,4})(-(?P<extra_ab_ep_num>\d{3,4}))?\.\s+(?P<series_name>.+?)\s-\s.*
'''),
('anime_bare',
# One Piece - 102
# [ACX]_Wolf's_Spirit_001.mkv
r'''
^(\[(?P<release_group>.+?)\][ ._-]*)?
(?P<series_name>.+?)[ ._-]+ # Show_Name and separator
(?P<ep_ab_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}) # E01
(-(?P<extra_ab_ep_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}))? # E02
(v(?P<version>[0-9]))? # v2
.*? # Separator and EOL
''')
]
|
Jusedawg/SickRage
|
sickbeard/name_parser/regexes.py
|
Python
|
gpl-3.0
| 23,791
|
# Copyright (C) 2010 Wil Mahan <wmahan+fatics@gmail.com>
#
# This file is part of FatICS.
#
# FatICS is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FatICS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with FatICS. If not, see <http://www.gnu.org/licenses/>.
#
""" Chess960, known as fr or fischerandom on original FICS.
http://www.dwheeler.com/essays/Fischer_Random_Chess.html
"""
import re
import copy
import random
from array import array
from db import db
from game_constants import *
from variant.base_variant import BaseVariant, IllegalMoveError
"""
0x88 board representation; pieces are represented as ASCII,
the same as FEN. A blank square is '-'.
"""
class BadFenError(Exception):
def __init__(self, reason=None):
self.reason = reason
piece_moves = {
'n': [-0x21, -0x1f, -0xe, -0x12, 0x12, 0xe, 0x1f, 0x21],
'b': [-0x11, -0xf, 0xf, 0x11],
'r': [-0x10, -1, 1, 0x10],
'q': [-0x11, -0xf, 0xf, 0x11, -0x10, -1, 1, 0x10],
'k': [-0x11, -0xf, 0xf, 0x11, -0x10, -1, 1, 0x10]
}
direction_table = array('i', [0 for i in range(0, 0x100)])
def dir(fr, to):
"""Returns the direction a queen needs to go to get from TO to FR,
or 0 if it's not possible."""
return direction_table[to - fr + 0x7f]
sliding_pieces = frozenset(['b', 'r', 'q', 'B', 'R', 'Q'])
piece_material = {
'-': 0,
'p': 1,
'n': 3,
'b': 3,
'r': 5,
'q': 9,
'k': 0
}
def to_castle_flags(w_oo, w_ooo, b_oo, b_ooo):
return (w_oo << 3) + (w_ooo << 2) + (b_oo << 1) + b_ooo
def str_to_sq(s):
return 'abcdefgh'.index(s[0]) + 0x10 * '12345678'.index(s[1])
def sq_to_str(sq):
return 'abcdefgh'[file(sq)] + '12345678'[rank(sq)]
def piece_is_white(pc):
assert(len(pc) == 1)
assert(pc in 'pnbrqkPNBRQK')
return pc.isupper()
class Zobrist(object):
"""Zobrist keys for low-overhead repetition detection"""
_piece_index = {
'p': 0, 'n': 1, 'b': 2, 'r': 3, 'q': 4, 'k': 5,
'P': 6, 'N': 7, 'B': 8, 'R': 9, 'Q': 10, 'K': 11
}
# Note: using 64-bit hashes, the expected number of positions
# before a collision is 2^32. Given that a collision has to
# occur within one game to be meaningful, and games are no
# longer than 5949 moves, the chance of any affect should be
# negligible.
def __init__(self):
random.seed(2010)
self.side_hash = random.getrandbits(64)
self._piece = self._rand_list(0x10 * 0x80)
self._ep = self._rand_list(8)
self._castle = self._rand_list(0x10)
random.seed()
def piece_hash(self, sq, pc):
assert((0xf << 7) & sq == 0)
assert(valid_sq(sq))
#print 'hashing %s at %s' % (pc, sq_to_str(sq))
return self._piece[(self._piece_index[pc] << 7) | sq]
def ep_hash(self, ep):
return self._ep[file(ep)]
def castle_hash(self, flags):
assert(flags & ~0xf == 0)
return self._castle[flags]
def _rand_list(self, len):
return [random.getrandbits(64) for i in xrange(0, len)]
zobrist = Zobrist()
class Move(object):
def __init__(self, pos, fr, to, prom=None, is_oo=False,
is_ooo=False, is_ep=False, new_ep=None):
self.pos = pos
self.fr = fr
self.to = to
self.pc = self.pos.board[self.fr]
self.prom = prom
self.is_oo = is_oo
self.is_ooo = is_ooo
self.capture = '-' if is_oo or is_ooo else pos.board[to]
self.is_capture = self.capture != '-'
self.is_ep = is_ep
self.new_ep = new_ep
self.time = None
self._san = None
self._verbose_alg = None
self.lag = 0
# if a promotion piece is not given, assume queen
if not self.prom:
if self.pc == 'p' and rank(to) == 0:
self.prom = 'q'
elif self.pc == 'P' and rank(to) == 7:
self.prom = 'Q'
def __str__(self):
s = '%s%s' % (sq_to_str(self.fr), sq_to_str(self.to))
if self.prom:
s += '=%s' % self.prom
return s
def check_pseudo_legal(self):
"""Tests if a move is pseudo-legal, that is, legal ignoring the
fact that the king cannot be left in check. Also sets en passant
flags for this move. This is used for long algebraic moves,
but not san, which does these checks implicitly."""
if self.pc == '-' or piece_is_white(self.pc) != self.pos.wtm:
raise IllegalMoveError('can only move own pieces')
if self.is_oo or self.is_ooo:
return
if (self.is_capture and piece_is_white(self.capture) == self.pos.wtm
and not self.is_oo and not self.is_ooo):
raise IllegalMoveError('cannot capture own piece')
diff = self.to - self.fr
if self.pc == 'p':
if self.pos.board[self.to] == '-':
if diff == -0x10:
pass
elif diff == -0x20 and rank(self.fr) == 6:
self.new_ep = self.fr - 0x10
if self.pos.board[self.new_ep] != '-':
raise IllegalMoveError('bad en passant')
elif diff in [-0x11, -0xf] and self.to == self.pos.ep:
self.is_ep = True
else:
raise IllegalMoveError('bad pawn push')
else:
if not diff in [-0x11, -0xf]:
raise IllegalMoveError('bad pawn capture')
elif self.pc == 'P':
if self.pos.board[self.to] == '-':
if diff == 0x10:
pass
elif diff == 0x20 and rank(self.fr) == 1:
self.new_ep = self.fr + 0x10
if self.pos.board[self.new_ep] != '-':
raise IllegalMoveError('bad en passant')
elif diff in [0x11, 0xf] and self.to == self.pos.ep:
self.is_ep = True
else:
raise IllegalMoveError('bad pawn push')
else:
if not diff in [0x11, 0xf]:
raise IllegalMoveError('bad pawn capture')
else:
if self.pc in sliding_pieces:
d = dir(self.fr, self.to)
if d == 0 or not d in piece_moves[self.pc.lower()]:
raise IllegalMoveError('piece cannot make that move')
# now check if there are any pieces in the way
cur_sq = self.fr + d
while cur_sq != self.to:
assert(valid_sq(cur_sq))
if self.pos.board[cur_sq] != '-':
raise IllegalMoveError('sliding piece blocked')
cur_sq += d
else:
if not diff in piece_moves[self.pc.lower()]:
raise IllegalMoveError('piece cannot make that move')
def check_legal(self):
"""Test whether a move leaves the king in check, or if
castling if blocked or otherwise unavailable. These
tests are grouped together because they are common
to all move formats."""
if self.is_oo:
pos = self.pos
if (pos.in_check
or not pos.check_castle_flags(pos.wtm, True)):
raise IllegalMoveError('illegal castling')
# unimpeded
assert(self.fr == pos.king_pos[pos.wtm])
rsq = (pos.hside_rook_file if pos.wtm else
0x70 + pos.hside_rook_file)
if pos.wtm:
rsq = pos.hside_rook_file
assert(pos.board[rsq] == 'R')
else:
rsq = 0x70 + pos.hside_rook_file
assert(pos.board[rsq] == 'r')
sqs = [self.fr, self.to, rsq]
for sq in range(min(sqs), max(sqs) + 1):
if sq != rsq and sq != self.fr and pos.board[sq] != '-':
raise IllegalMoveError('castling blocked')
# unattacked
assert(rank(self.fr) == rank(self.to))
dir = 1 if self.fr < self.to else -1
for sq in range(self.fr + dir, self.to + dir, dir):
if pos.under_attack(sq, not pos.wtm):
raise IllegalMoveError('castling through check')
return
if self.is_ooo:
pos = self.pos
if (pos.in_check
or not pos.check_castle_flags(pos.wtm, False)):
raise IllegalMoveError('illegal castling')
# unimpeded
assert(self.fr == pos.king_pos[pos.wtm])
rsq = (pos.aside_rook_file if pos.wtm else
0x70 + pos.aside_rook_file)
if pos.wtm:
rsq = pos.aside_rook_file
assert(pos.board[rsq] == 'R')
else:
rsq = 0x70 + pos.aside_rook_file
assert(pos.board[rsq] == 'r')
sqs = [self.fr, self.to, rsq]
for sq in range(min(sqs), max(sqs) + 1):
if sq != rsq and sq != self.fr and pos.board[sq] != '-':
raise IllegalMoveError('castling blocked')
# unattacked
assert(rank(self.fr) == rank(self.to))
dir = 1 if self.fr < self.to else -1
for sq in range(self.fr + dir, self.to + dir, dir):
if pos.under_attack(sq, not pos.wtm):
raise IllegalMoveError('castling through check')
return
self.pos.make_move(self)
try:
if self.pos.under_attack(self.pos.king_pos[not self.pos.wtm],
self.pos.wtm):
raise IllegalMoveError('leaves king in check')
finally:
self.pos.undo_move(self)
def to_san(self):
if self._san is None:
self._san = self._to_san()
return self._san
def add_san_decorator(self):
assert(self._san is not None)
if self.pos.is_checkmate:
self._san += '#'
elif self.pos.in_check:
self._san += '+'
def _to_san(self):
if self.is_oo:
san = 'O-O'
elif self.is_ooo:
san = 'O-O-O'
elif self.pc in ['P', 'p']:
san = ''
if self.is_capture or self.is_ep:
san += 'abcdefgh'[file(self.fr)] + 'x'
san += sq_to_str(self.to)
if self.prom:
san += '=' + self.prom.upper()
else:
assert(not self.is_ep)
san = self.pc.upper()
ambigs = self.pos.get_from_sqs(self.pc, self.to)
if not (len(ambigs) >= 1):
print 'move not ambig with itself: %s%s' % (sq_to_str(self.fr), sq_to_str(self.to))
assert(len(ambigs) >= 1)
if len(ambigs) > 1:
r = rank(self.fr)
f = file(self.fr)
# try disambiguating with file
if len(filter(lambda sq: file(sq) == f, ambigs)) == 1:
san += 'abcdefgh'[f]
elif len(filter(lambda sq: rank(sq) == r, ambigs)) == 1:
san += '12345678'[r]
else:
san += sq_to_str(self.fr)
if self.is_capture:
san += 'x'
san += sq_to_str(self.to)
return san
def to_verbose_alg(self):
if self._verbose_alg is None:
self._verbose_alg = self._to_verbose_alg()
return self._verbose_alg
def _to_verbose_alg(self):
"""convert to the verbose notation used in style12"""
if self.is_oo:
# why fics, why?
ret = 'o-o'
elif self.is_ooo:
ret = 'o-o-o'
else:
ret = self.pc.upper() + '/'
ret += sq_to_str(self.fr)
ret += '-'
ret += sq_to_str(self.to)
if self.prom:
ret += '=' + self.prom.upper()
return ret
def is_legal(self):
try:
self.check_legal()
except IllegalMoveError:
return False
else:
return True
class Undo(object):
"""information needed to undo a move"""
pass
class PositionHistory(object):
"""keeps past of past positions for repetition detection"""
def __init__(self):
self.hashes = [None] * 40
self.moves = [None] * 40
def set_hash(self, ply, hash):
if ply >= len(self.hashes):
self.hashes.extend([None] * (ply - len(self.hashes) + 1))
self.hashes[ply] = hash
def set_move(self, ply, mv):
if ply >= len(self.moves):
self.moves.extend([None] * (ply - len(self.moves) + 1))
self.moves[ply] = mv
def get_hash(self, ply):
return self.hashes[ply]
def get_move(self, ply):
return self.moves[ply]
class Position(object):
def __init__(self, fen):
self.board = array('c', 0x80 * ['-'])
self.castle_flags = 0
self.king_pos = [None, None]
self.history = PositionHistory()
self.set_pos(fen)
def _set_aside_rook_file(self, v):
if self.aside_rook_file is None:
self.aside_rook_file = v
else:
assert(self.aside_rook_file == v)
def _set_hside_rook_file(self, v):
if self.hside_rook_file is None:
self.hside_rook_file = v
else:
assert(self.hside_rook_file == v)
set_pos_re = re.compile(r'''^([1-8rnbqkpRNBQKP/]+) ([wb]) ([kqKQ]+|-) ([a-h][36]|-) (\d+) (\d+)$''')
def set_pos(self, fen, detect_check=True):
"""Set the position from Forsyth-Fdwards notation. The format
is intentionally interpreted strictly; better to give the user an
error than take in bad data."""
try:
# rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1
m = self.set_pos_re.match(fen)
if not m:
raise BadFenError('does not look like FEN')
(pos, side, castle_flags, ep, fifty_count, full_moves) = [
m.group(i) for i in range(1, 7)]
ranks = pos.split('/')
ranks.reverse()
self.hash = 0
self.material = [0, 0]
self.aside_rook_file = None
self.hside_rook_file = None
for (r, rank_str) in enumerate(ranks):
sq = 0x10 * r
for c in rank_str:
d = '12345678'.find(c)
if d >= 0:
sq += d + 1
else:
assert(valid_sq(sq))
self.board[sq] = c
self.hash ^= zobrist.piece_hash(sq, c)
self.material[piece_is_white(c)] += \
piece_material[c.lower()]
if c == 'k':
if self.king_pos[0] != None:
# multiple kings
raise BadFenError()
self.king_pos[0] = sq
elif c == 'K':
if self.king_pos[1] != None:
# multiple kings
raise BadFenError()
self.king_pos[1] = sq
elif c.lower() == 'p':
if rank(sq) in [0, 7]:
# pawn on 1st or 8th rank
raise BadFenError()
sq += 1
if sq & 0xf != 8:
# wrong row length
raise BadFenError()
if None in self.king_pos:
# missing king
raise BadFenError()
self.wtm = side == 'w'
if self.wtm:
self.hash ^= zobrist.side_hash
self.castle_mask = array('i', [0xf for i in range(0x80)])
if castle_flags == '-':
self.castle_flags = 0
else:
(w_oo, w_ooo, b_oo, b_ooo) = (False, False, False, False)
for c in castle_flags:
if c == 'K':
if w_oo:
raise BadFenError()
for sq in range(H1, self.king_pos[1], -1):
if self.board[sq] == 'R':
self._set_hside_rook_file(file(sq))
break
assert(self.hside_rook_file is not None)
w_oo = True
elif c == 'Q':
if w_ooo:
raise BadFenError()
for sq in range(A1, self.king_pos[1]):
if self.board[sq] == 'R':
self._set_aside_rook_file(file(sq))
break
assert(self.aside_rook_file is not None)
w_ooo = True
elif c == 'k':
if b_oo:
raise BadFenError()
for sq in range(H8, self.king_pos[0], -1):
if self.board[sq] == 'r':
self._set_hside_rook_file(file(sq))
break
assert(self.hside_rook_file is not None)
b_oo = True
elif c == 'q':
if b_ooo:
raise BadFenError()
for sq in range(A8, self.king_pos[1]):
if self.board[sq] == 'r':
self._set_aside_rook_file(file(sq))
break
assert(self.aside_rook_file is not None)
b_ooo = True
else:
# X-FEN castling disambiguation
f = 'abcdfegh'.find(c)
if f >= 0:
# black castling with inner rook
sq = 0x70 + f
assert(self.board[sq] == 'r')
if sq < self.king_pos[0]:
self.set_aside_rook_file(f)
b_ooo = True
else:
self.set_hside_rook_file(f)
b_oo = True
else:
sq = 'ABCDEFGH'.find(c)
assert(sq >= 0)
assert(self.board[sq] == 'R')
if sq < self.king_pos[1]:
self.set_aside_rook_file(f)
w_ooo = True
else:
self.set_hside_rook_file(f)
w_oo = True
self.castle_flags = to_castle_flags(w_oo, w_ooo,
b_oo, b_ooo)
self.castle_mask[self.king_pos[0]] = to_castle_flags(
True, True, False, False)
self.castle_mask[self.king_pos[1]] = to_castle_flags(
False, False, True, True)
if w_oo:
assert(rank(self.king_pos[1]) == 0)
assert(file(self.king_pos[1]) < self.hside_rook_file)
self.castle_mask[self.hside_rook_file] = (
to_castle_flags(False, True, True, True))
if w_ooo:
assert(rank(self.king_pos[1]) == 0)
assert(self.aside_rook_file < file(self.king_pos[1]))
self.castle_mask[self.aside_rook_file] = (
to_castle_flags(True, False, True, True))
if b_oo:
assert(rank(self.king_pos[0]) == 7)
assert(file(self.king_pos[0]) < self.hside_rook_file)
self.castle_mask[0x70 + self.hside_rook_file] = (
to_castle_flags(True, True, False, True))
if b_ooo:
assert(rank(self.king_pos[0]) == 7)
assert(self.aside_rook_file < file(self.king_pos[0]))
self.castle_mask[0x70 + self.aside_rook_file] = (
to_castle_flags(True, True, True, False))
self.hash ^= zobrist.castle_hash(self.castle_flags)
self.fifty_count = int(fifty_count, 10)
self.ply = 2 * (int(full_moves, 10) - 1) + int(not self.wtm)
self.start_ply = self.ply # 0 for new games
if ep == '-':
self.ep = None
else:
# only set ep if there is a legal capture
self.ep = 'abcdefgh'.index(ep[0]) + \
0x10 * '1234567'.index(ep[1])
if rank(self.ep) not in (2, 5):
raise BadFenError('bad en passant square')
self.hash ^= zobrist.ep_hash(self.ep)
# legality checking needs a value for in_check
self.in_check = None
if not self._is_legal_ep(self.ep):
# undo the en passant square
self.ep = None
self.hash ^= zobrist.ep_hash(self.ep)
assert(self.hash == self._compute_hash())
self.history.set_hash(self.ply, self.hash)
if detect_check:
self.detect_check()
if self.is_checkmate or self.is_stalemate \
or self.is_draw_nomaterial:
raise BadFenError('got a terminal position')
except AssertionError:
raise
# Usually I don't like using a catch-all except, but it seems to
# be the safest default action because the FEN is supplied by
# the user.
#except:
# raise BadFenError()
def __iter__(self):
for r in range(0, 8):
for f in range(0, 8):
sq = 0x10 * r + f
yield (sq, self.board[sq])
def make_move(self, mv):
"""make the move"""
self._check_material()
assert(self.hash == self._compute_hash())
self.ply += 1
mv.undo = Undo()
mv.undo.ep = self.ep
mv.undo.in_check = self.in_check
mv.undo.castle_flags = self.castle_flags
mv.undo.fifty_count = self.fifty_count
mv.undo.material = self.material[:]
mv.undo.hash = self.hash
if self.ep:
# clear old en passant hash
self.hash ^= zobrist.ep_hash(self.ep)
self.ep = None
if not mv.prom:
# move the piece, unless this is castling and the king
# does not move
if mv.fr != mv.to:
self.board[mv.to] = mv.pc
self.hash ^= zobrist.piece_hash(mv.fr, mv.pc) ^ \
zobrist.piece_hash(mv.to, mv.pc)
self.board[mv.fr] = '-'
else:
self.board[mv.fr] = '-'
self.board[mv.to] = mv.prom
self.hash ^= zobrist.piece_hash(mv.fr, mv.pc) ^\
zobrist.piece_hash(mv.to, mv.prom)
self.material[self.wtm] += piece_material[mv.prom.lower()] \
- piece_material['p']
if mv.pc == 'k':
self.king_pos[0] = mv.to
elif mv.pc == 'K':
self.king_pos[1] = mv.to
if mv.pc in ['p', 'P'] or mv.is_capture:
self.fifty_count = 0
else:
self.fifty_count += 1
if mv.is_capture:
self.hash ^= zobrist.piece_hash(mv.to, mv.capture)
self.material[not self.wtm] -= piece_material[mv.capture.lower()]
if mv.is_ep:
self.material[not self.wtm] -= piece_material['p']
# remove the captured pawn
if self.wtm:
assert(self.board[mv.to - 0x10] == 'p')
self.hash ^= zobrist.piece_hash(mv.to - 0x10, 'p')
self.board[mv.to - 0x10] = '-'
else:
assert(self.board[mv.to + 0x10] == 'P')
self.hash ^= zobrist.piece_hash(mv.to + 0x10, 'P')
self.board[mv.to + 0x10] = '-'
elif mv.is_oo and self.hside_rook_file != 5:
# move the rook, if it moves
if self.wtm:
# clear the rook square, unless we just overwrote
# it with the king
if self.hside_rook_file != mv.to:
assert(self.board[self.hside_rook_file] == 'R')
self.board[self.hside_rook_file] = '-'
self.board[F1] = 'R'
self.hash ^= zobrist.piece_hash(F1, 'R') ^ \
zobrist.piece_hash(self.hside_rook_file, 'R')
else:
if 0x70 + self.hside_rook_file != mv.to:
assert(self.board[0x70 + self.hside_rook_file] == 'r')
self.board[0x70 + self.hside_rook_file] = '-'
self.board[F8] = 'r'
self.hash ^= zobrist.piece_hash(F8, 'r') ^ \
zobrist.piece_hash(0x70 + self.hside_rook_file, 'r')
elif mv.is_ooo and self.aside_rook_file != 3:
# move the rook, if it moves
if self.wtm:
if self.aside_rook_file != mv.to:
assert(self.board[self.aside_rook_file] == 'R')
self.board[self.aside_rook_file] = '-'
self.board[D1] = 'R'
self.hash ^= zobrist.piece_hash(D1, 'R') ^ \
zobrist.piece_hash(self.aside_rook_file, 'R')
else:
if 0x70 + self.aside_rook_file != mv.to:
assert(self.board[0x70 + self.aside_rook_file] == 'r')
self.board[0x70 + self.aside_rook_file] = '-'
self.board[D8] = 'r'
self.hash ^= zobrist.piece_hash(0x70 + self.aside_rook_file,
'r') ^ zobrist.piece_hash(D8, 'r')
self.castle_flags &= self.castle_mask[mv.fr] & self.castle_mask[mv.to]
if self.castle_flags != mv.undo.castle_flags:
self.hash ^= zobrist.castle_hash(self.castle_flags) ^ \
zobrist.castle_hash(mv.undo.castle_flags)
self.wtm = not self.wtm
self.hash ^= zobrist.side_hash
self._check_material()
if mv.new_ep and self._is_legal_ep(mv.new_ep):
self.ep = mv.new_ep
self.hash ^= zobrist.ep_hash(self.ep)
self.history.set_move(self.ply - 1 , mv)
#if (self.hash != self._compute_hash()):
# print 'isoo %d, isooo %d, wtm %d, iscap %d, hrf %d, fr %d, to %d, cf %x, oldcf %x, ep %s' % (mv.is_oo, mv.is_ooo, not self.wtm, mv.is_capture, self.hside_rook_file, mv.fr, mv.to, self.castle_flags, mv.undo.castle_flags, self.ep, )
assert(self.hash == self._compute_hash())
self.history.set_hash(self.ply, self.hash)
def _is_legal_ep(self, ep):
# According to Geurt Gijssen's "An Arbiter's Notebook" #110,
# if an en passant capture that is otherwise legal is not
# permitted because it would leave the king in check,
# then for the puposes of claiming a draw by repetition, the
# position is identical to one where there is no such en
# passant capture. So we have to test the legality of
# en passant captures.
if self.wtm:
if (valid_sq(ep - 0x11) and self.board[ep - 0x11] == 'P' and
Move(self, ep - 0x11, ep, is_ep=True).is_legal()):
return True
elif (valid_sq(ep - 0xf) and self.board[ep - 0xf] == 'P' and
Move(self, ep - 0xf, ep, is_ep=True).is_legal()):
return True
else:
if (valid_sq(ep + 0xf) and self.board[ep + 0xf] == 'p' and
Move(self, ep + 0xf, ep, is_ep=True).is_legal()):
return True
elif (valid_sq(ep + 0x11) and self.board[ep + 0x11] == 'p' and
Move(self, ep + 0x11, ep, is_ep=True).is_legal()):
return True
return False
def _compute_hash(self):
hash = 0
if self.wtm:
hash ^= zobrist.side_hash
for (sq, pc) in self:
if pc != '-':
hash ^= zobrist.piece_hash(sq, pc)
if self.ep:
hash ^= zobrist.ep_hash(self.ep)
hash ^= zobrist.castle_hash(self.castle_flags)
return hash
def undo_move(self, mv):
"""undo the move"""
self._check_material()
self.wtm = not self.wtm
self.ply -= 1
self.ep = mv.undo.ep
self.board[mv.to] = mv.capture
self.board[mv.fr] = mv.pc
self.in_check = mv.undo.in_check
self.castle_flags = mv.undo.castle_flags
self.fifty_count = mv.undo.fifty_count
self.material = mv.undo.material
self.hash = mv.undo.hash
if mv.pc == 'k':
self.king_pos[0] = mv.fr
elif mv.pc == 'K':
self.king_pos[1] = mv.fr
if mv.is_ep:
if self.wtm:
assert(self.board[mv.to - 0x10] == '-')
self.board[mv.to - 0x10] = 'p'
else:
assert(self.board[mv.to + 0x10] == '-')
self.board[mv.to + 0x10] = 'P'
elif mv.is_oo:
if self.wtm:
assert(self.board[F1] == 'R')
self.board[self.hside_rook_file] = 'R'
if mv.fr != F1:
self.board[F1] = '-'
else:
assert(self.board[F8] == 'r')
self.board[0x70 + self.hside_rook_file] = 'r'
if mv.fr != F8:
self.board[F8] = '-'
elif mv.is_ooo:
if self.wtm:
assert(self.board[D1] == 'R')
self.board[self.aside_rook_file] = 'R'
if mv.fr != D1:
self.board[D1] = '-'
else:
assert(self.board[D8] == 'r')
self.board[0x70 + self.aside_rook_file] = 'r'
if mv.fr != D8:
self.board[D8] = '-'
self._check_material()
assert(self.hash == self._compute_hash())
def _check_material(self):
bmat = sum([piece_material[pc.lower()]
for (sq, pc) in self if pc != '-' and not piece_is_white(pc)])
assert(bmat == self.material[0])
assert(self.material[1] == sum([piece_material[pc.lower()]
for (sq, pc) in self if pc != '-' and piece_is_white(pc)]))
def detect_check(self):
"""detect whether the player to move is in check, checkmated,
or stalemated"""
self.in_check = self.under_attack(self.king_pos[self.wtm],
not self.wtm)
any_legal = self._any_legal_moves()
self.is_checkmate = self.in_check and not any_legal
self.is_stalemate = not self.in_check and not any_legal
self._check_mating_material()
self.is_draw_nomaterial = (not self.white_has_mating_material and
not self.black_has_mating_material)
def _check_mating_material(self):
self.white_has_mating_material = self.material[1] > 3
self.black_has_mating_material = self.material[0] > 3
if (not self.white_has_mating_material or
not self.black_has_mating_material):
for (sq, pc) in self:
if pc == 'P':
self.white_has_mating_material = True
elif pc == 'p':
self.black_has_mating_material = True
def get_last_move(self):
return self.history.get_move(self.ply - 1)
def _any_legal_moves(self):
if self.ep:
return True
ksq = self.king_pos[self.wtm]
if self._any_pc_moves(ksq, self.board[ksq]):
return True
for (sq, pc) in self:
#if pc != '-' and piece_is_white(pc) == self.wtm:
if pc not in ['-', 'K', 'k'] and piece_is_white(pc) == self.wtm:
cur_sq = sq
if self._any_pc_moves(sq, pc):
return True
return False
def _pawn_cap_at(self, sq):
if not valid_sq(sq):
return False
pc = self.board[sq]
return pc != '-' and piece_is_white(pc) != self.wtm
def _any_pc_moves(self, sq, pc):
if pc == 'P':
if self.board[sq + 0x10] == '-':
if Move(self, sq, sq + 0x10).is_legal():
return True
if rank(sq) == 1 and self.board[sq + 0x20] == '-' and Move(
self, sq, sq + 0x20).is_legal():
return True
if self._pawn_cap_at(sq + 0xf) and Move(
self, sq, sq + 0xf,
is_ep=sq + 0xf == self.ep).is_legal():
return True
if self._pawn_cap_at(sq + 0x11) and Move(
self, sq, sq + 0x11,
is_ep=sq + 0x11 == self.ep).is_legal():
return True
elif pc == 'p':
if self.board[sq - 0x10] == '-':
if Move(self, sq, sq - 0x10).is_legal():
return True
if rank(sq) == 6 and self.board[sq - 0x20] == '-' and Move(
self, sq, sq - 0x20).is_legal():
return True
if self._pawn_cap_at(sq - 0xf) and Move(
self, sq, sq - 0xf,
is_ep=sq - 0xf == self.ep).is_legal():
return True
if self._pawn_cap_at(sq - 0x11) and Move(
self, sq, sq - 0x11,
is_ep=sq - 0x11 == self.ep).is_legal():
return True
else:
for d in piece_moves[pc.lower()]:
cur_sq = sq + d
# we don't need to check castling because if castling
# is legal, some other king move must be also
while valid_sq(cur_sq):
topc = self.board[cur_sq]
if topc == '-' or piece_is_white(topc) != self.wtm:
mv = Move(self, sq, cur_sq)
if mv.is_legal():
return True
if not pc in sliding_pieces or self.board[cur_sq] != '-':
break
cur_sq += d
def _is_pc_at(self, pc, sq):
return valid_sq(sq) and self.board[sq] == pc
def under_attack(self, sq, wtm):
"""determine whether a square is attacked by the given side"""
# pawn attacks
if wtm:
if (self._is_pc_at('P', sq - 0x11)
or self._is_pc_at('P', sq - 0xf)):
return True
else:
if (self._is_pc_at('p', sq + 0x11)
or self._is_pc_at('p', sq + 0xf)):
return True
# knight attacks
npc = 'N' if wtm else 'n'
for d in piece_moves['n']:
if self._is_pc_at(npc, sq + d):
return True
# king attacks
kpc = 'K' if wtm else 'k'
for d in piece_moves['k']:
if self._is_pc_at(kpc, sq + d):
return True
# bishop/queen attacks
for d in piece_moves['b']:
cur_sq = sq +d
while valid_sq(cur_sq):
if self.board[cur_sq] != '-':
if wtm:
if self.board[cur_sq] in ['B', 'Q']:
return True
else:
if self.board[cur_sq] in ['b', 'q']:
return True
# square blocked
break
cur_sq += d
# rook/queen attacks
for d in piece_moves['r']:
cur_sq = sq + d
while valid_sq(cur_sq):
if self.board[cur_sq] != '-':
if wtm:
if self.board[cur_sq] in ['R', 'Q']:
return True
else:
if self.board[cur_sq] in ['r', 'q']:
return True
# square blocked
break
cur_sq += d
return False
lalg_re = re.compile(r'([a-h][1-8])-([a-h][1-8])(?:=([NBRQ]))?$', re.I)
def move_from_lalg(self, s):
m = self.lalg_re.match(s)
if not m:
return None
fr = str_to_sq(m.group(1).lower())
to = str_to_sq(m.group(2).lower())
prom = m.group(3)
if prom == None:
mv = Move(self, fr, to)
# don't allow king moves to represent castling for
# chess960
# TODO? allow it if unambiguous (not sure if it's necessary)
"""if mv.pc == 'K' and fr == E1:
if to == G1:
mv.is_oo = True
elif to == C1:
mv.is_ooo = True
elif mv.pc == 'k' and fr == E8:
if to == G8:
mv.is_oo = True
elif to == C8:
mv.is_ooo = True"""
else:
if self.wtm:
mv = Move(self, fr, to, prom=prom.upper())
else:
mv = Move(self, fr, to, prom=prom.lower())
if mv:
mv.check_pseudo_legal()
mv.check_legal()
return mv
san_pawn_push_re = re.compile(r'^([a-h][1-8])(?:=([NBRQ]))?$')
san_pawn_capture_re = re.compile(r'^([a-h])x([a-h][1-8])(?:=([NBRQ]))?$')
san_piece_re = re.compile(r'([NBRQK])([a-h])?([1-8])?(x)?([a-h][1-8])$')
decorator_re = re.compile(r'[\+#\?\!]+$')
def move_from_san(self, s):
s = self.decorator_re.sub('', s)
matched = False
mv = None
# examples: e4 e8=Q
m = self.san_pawn_push_re.match(s)
if m:
to = str_to_sq(m.group(1))
if self.board[to] != '-':
raise IllegalMoveError('pawn push blocked')
prom = m.group(2)
if prom:
if self.wtm:
prom = m.group(2)
assert(prom == prom.upper())
else:
prom = m.group(2).lower()
new_ep = None
if self.wtm:
fr = to - 0x10
if rank(to) == 3 and self.board[fr] == '-':
new_ep = fr
fr = to - 0x20
if self.board[fr] != 'P':
raise IllegalMoveError('illegal white pawn move')
if prom:
if rank(to) == 7:
mv = Move(self, fr, to, prom=prom)
else:
raise IllegalMoveError('illegal promotion')
else:
mv = Move(self, fr, to, new_ep=new_ep)
else:
fr = to + 0x10
if rank(to) == 4 and self.board[fr] == '-':
new_ep = fr
fr = to + 0x20
if self.board[fr] != 'p':
raise IllegalMoveError('illegal black pawn move')
if prom:
if rank(to) == 0:
mv = Move(self, fr, to, prom=prom)
else:
raise IllegalMoveError('illegal promotion')
else:
mv = Move(self, fr, to, new_ep=new_ep)
# examples: dxe4 dxe8=Q
m = None
if not mv:
m = self.san_pawn_capture_re.match(s)
if m:
to = str_to_sq(m.group(2))
prom = m.group(3)
if prom:
if self.wtm:
assert(prom == prom.upper())
else:
prom = prom.lower()
is_ep = to == self.ep
if is_ep:
assert(self.board[to] == '-')
else:
topc = self.board[to]
if topc == '-' or piece_is_white(topc) == self.wtm:
raise IllegalMoveError('bad pawn capture')
f = 'abcdefgh'.index(m.group(1))
if f == file(to) - 1:
if self.wtm:
fr = to - 0x11
if self.board[fr] != 'P':
raise IllegalMoveError('bad pawn capture')
else:
fr = to + 0xf
if self.board[fr] != 'p':
raise IllegalMoveError('bad pawn capture')
elif f == file(to) + 1:
if self.wtm:
fr = to - 0xf
if self.board[fr] != 'P':
raise IllegalMoveError('bad pawn capture')
else:
fr = to + 0x11
if self.board[fr] != 'p':
raise IllegalMoveError('bad pawn capture')
else:
raise IllegalMoveError('bad pawn capture file')
mv = Move(self, fr, to, prom=prom, is_ep=is_ep)
# examples: Nf3 Nxf3 Ng1xf3
m = None
if not mv:
m = self.san_piece_re.match(s)
if m:
to = str_to_sq(m.group(5))
if m.group(4):
if self.board[to] == '-':
raise IllegalMoveError('capture on blank square')
else:
if self.board[to] != '-':
raise IllegalMoveError('missing "x" to indicate capture')
pc = m.group(1) if self.wtm else m.group(1).lower()
# TODO: it would be faster to disambiguate first, so we
# do not check whether moves are legal unnecessarily
froms = self.get_from_sqs(pc, to)
if m.group(2):
if len(froms) <= 1:
raise IllegalMoveError('unnecessary disambiguation')
f = 'abcdefgh'.index(m.group(2))
froms = filter(lambda sq: file(sq) == f, froms)
if m.group(3):
r = '12345678'.index(m.group(3))
if len(froms) <= 1:
raise IllegalMoveError('unnecessary disambiguation')
froms = filter(lambda sq: rank(sq) == r, froms)
if len(froms) != 1:
raise IllegalMoveError('illegal or ambiguous move %s: %d interpretations' % (s, len(froms)))
mv = Move(self, froms[0], to)
# begin consistency check
if mv:
try:
mv.check_pseudo_legal()
except IllegalMoveError:
raise RuntimeError('san inconsistency')
mv.check_legal()
# end consistency check
return mv
def move_from_castle(self, s):
mv = None
s = self.decorator_re.sub('', s)
if not mv and s in ['O-O', 'OO', 'o-o']:
if self.wtm:
mv = Move(self, self.king_pos[1], G1, is_oo=True)
else:
mv = Move(self, self.king_pos[0], G8, is_oo=True)
if not mv and s in ['O-O-O', 'OOO', 'o-o-o']:
if self.wtm:
mv = Move(self, self.king_pos[1], C1, is_ooo=True)
else:
mv = Move(self, self.king_pos[0], C8, is_ooo=True)
if mv:
mv.check_pseudo_legal()
mv.check_legal()
return mv
def get_from_sqs(self, pc, sq):
'''given a piece (not including a pawn) and a destination square,
return a list of all legal source squares'''
ret = []
is_sliding = pc in sliding_pieces
for d in piece_moves[pc.lower()]:
cur_sq = sq
while 1:
cur_sq += d
if not valid_sq(cur_sq):
break
if self.board[cur_sq] == pc:
if Move(self, cur_sq, sq).is_legal():
ret.append(cur_sq)
if not (self.board[cur_sq] == '-' and is_sliding):
break
return ret
def is_draw_fifty(self):
# If we checkmate comes on the move that causes the fifty-move
# counter to reach 100, the game is not a draw. That shouldn't
# be a problem because if a player is checkmated, he or she
# won't have a chance to offer a draw and trigger this check.
return self.fifty_count >= 100
def is_draw_repetition(self, side):
assert(self.hash == self._compute_hash())
"""check for draw by repetition"""
# Note that the most recent possible identical position is
# 4 ply ago, and we only have to check every other move.
# This is a well-known chess engine optimization.
if self.ply < 8:
return False
stop = max(self.ply - self.fifty_count, self.start_ply)
count = 0
hash = self.history.get_hash(self.ply)
i = self.ply - 4
while i >= stop:
if self.history.get_hash(i) == hash:
count += 1
if count == 2:
return True
i -= 2
# Also check the previous position, because unlike OTB chess,
# we do not provide a way to write down a move and offer a draw
# without actually executing the move. (Well, we do: an argument to
# the "draw" command, but few people know about it.)
#
# Previously, FICS allowed either player to claim a draw if the
# current or previous position represented a threefold repetition,
# regardless of which player's move it was. (Note that this is
# different from FIDE rules, which only consider the current
# position and only allow a player to claim a draw on his or her
# own turn.) My idea is to only check the previous position
# when the player making the draw offer has the move, to avoid
# a situation like the following:
#
# Player A has the move. The current position represents a
# threefold repetition, so player A is entitled to claim a draw.
# Instead, Player A decides to press on, and plays a blunder
# that loses his queen. Player A realizes the mistake before
# the opponent has a chance to move, and claims a draw.
#
# The old fics grants the draw request, unreasonably in my
# opinion. My change should close the loophole.
if self.ply > 8 and (side == WHITE) == self.wtm:
count = 0
hash = self.history.get_hash(self.ply - 1)
i = self.ply - 5
while i >= stop:
if self.history.get_hash(i) == hash:
count += 1
if count == 2:
return True
i -= 2
return False
def to_xfen(self):
p = []
for r in range(7, -1, -1):
num_empty = 0
for f in range(0, 8):
sq = 0x10 * r + f
pc = self.board[sq]
if pc == '-':
num_empty += 1
else:
if num_empty > 0:
p.append(str(num_empty))
num_empty = 0
p.append(pc)
if num_empty > 0:
p.append(str(num_empty))
num_empty = 0
if r != 0:
p.append('/')
pos_str = ''.join(p)
stm_str = 'w' if self.wtm else 'b'
castling = ''
if check_castle_flags(True, True):
castling += 'K'
if check_castle_flags(True, False):
castling += 'Q'
if check_castle_flags(False, True):
castling += 'k'
if check_castle_flags(False, False):
castling += 'q'
if castling == '':
castling = '-'
# we follow X-FEN rather than FEN: only print an en passant
# square if there is a legal en passant capture
if self.ep:
ep_str = sq_to_str(self.ep)
assert(ep_str[1] in ['3', '6'])
else:
ep_str = '-'
full_moves = self.ply // 2 + 1
return "%s %s %s %s %d %d" % (pos_str, stm_str, castling, ep_str, self.fifty_count, full_moves)
def check_castle_flags(self, wtm, is_oo):
return bool(self.castle_flags & (1 << (2 * int(wtm) + int(is_oo))))
class Chess960(BaseVariant):
def __init__(self, game):
self.game = game
self.idn = game.idn
self.pos = Position(db.fen_from_idn(self.idn))
self.name = 'chess960'
def parse_move(self, s, conn):
"""Try to parse a move. If it looks like a move but
is erroneous or illegal, raise an exception. Return the move
if parsing was sucessful, or False if it does not look like a move
and should be processed further."""
mv = None
try:
# castling
mv = self.pos.move_from_castle(s)
# san
if not mv:
mv = self.pos.move_from_san(s)
# long algebraic
if not mv:
mv = self.pos.move_from_lalg(s)
except IllegalMoveError as e:
print e.reason
raise
return mv
def do_move(self, mv):
mv.to_san()
self.pos.make_move(mv)
self.pos.detect_check()
mv.add_san_decorator()
def undo_move(self):
self.pos.undo_move(self.pos.get_last_move())
def get_turn(self):
return WHITE if self.pos.wtm else BLACK
def init_direction_table():
for r in range(8):
for f in range(8):
sq = 0x10 * r + f
for d in piece_moves['q']:
cur_sq = sq + d
while valid_sq(cur_sq):
assert(0 <= cur_sq - sq + 0x7f <= 0xff)
if direction_table[cur_sq - sq + 0x7f] != 0:
assert(d == direction_table[cur_sq - sq + 0x7f])
else:
direction_table[cur_sq - sq + 0x7f] = d
cur_sq += d
init_direction_table()
# vim: expandtab tabstop=4 softtabstop=4 shiftwidth=4 smarttab autoindent
|
ecolitan/fatics
|
src/variant/chess960.py
|
Python
|
agpl-3.0
| 50,954
|
from datetime import datetime, timezone, time
import dateparser
def zero_time_with_timezone(date, tz=timezone.utc):
return datetime.combine(date, time(tzinfo=tz))
def attempt_parse_date(val):
parsed_date = dateparser.parse(val, languages=['en'])
if parsed_date is None:
# try other strategies?
pass
return parsed_date
|
andrewgleave/whim
|
web/whim/core/time.py
|
Python
|
mit
| 354
|
# (c) Copyright IBM Corp. 2021
# (c) Copyright Instana Inc. 2019
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: stan.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='stan.proto',
package='stan',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\nstan.proto\x12\x04stan\"#\n\x0fQuestionRequest\x12\x10\n\x08question\x18\x01 \x01(\t\"8\n\x10QuestionResponse\x12\x0e\n\x06\x61nswer\x18\x01 \x01(\t\x12\x14\n\x0cwas_answered\x18\x02 \x01(\x08\x32\xe3\x03\n\x04Stan\x12I\n\x16OneQuestionOneResponse\x12\x15.stan.QuestionRequest\x1a\x16.stan.QuestionResponse\"\x00\x12M\n\x18ManyQuestionsOneResponse\x12\x15.stan.QuestionRequest\x1a\x16.stan.QuestionResponse\"\x00(\x01\x12M\n\x18OneQuestionManyResponses\x12\x15.stan.QuestionRequest\x1a\x16.stan.QuestionResponse\"\x00\x30\x01\x12P\n\x19ManyQuestionsManyReponses\x12\x15.stan.QuestionRequest\x1a\x16.stan.QuestionResponse\"\x00(\x01\x30\x01\x12N\n\x1bOneQuestionOneErrorResponse\x12\x15.stan.QuestionRequest\x1a\x16.stan.QuestionResponse\"\x00\x12P\n\x1dOneErroredQuestionOneResponse\x12\x15.stan.QuestionRequest\x1a\x16.stan.QuestionResponse\"\x00\x62\x06proto3')
)
_QUESTIONREQUEST = _descriptor.Descriptor(
name='QuestionRequest',
full_name='stan.QuestionRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='question', full_name='stan.QuestionRequest.question', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=20,
serialized_end=55,
)
_QUESTIONRESPONSE = _descriptor.Descriptor(
name='QuestionResponse',
full_name='stan.QuestionResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='answer', full_name='stan.QuestionResponse.answer', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='was_answered', full_name='stan.QuestionResponse.was_answered', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=57,
serialized_end=113,
)
DESCRIPTOR.message_types_by_name['QuestionRequest'] = _QUESTIONREQUEST
DESCRIPTOR.message_types_by_name['QuestionResponse'] = _QUESTIONRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
QuestionRequest = _reflection.GeneratedProtocolMessageType('QuestionRequest', (_message.Message,), dict(
DESCRIPTOR = _QUESTIONREQUEST,
__module__ = 'stan_pb2'
# @@protoc_insertion_point(class_scope:stan.QuestionRequest)
))
_sym_db.RegisterMessage(QuestionRequest)
QuestionResponse = _reflection.GeneratedProtocolMessageType('QuestionResponse', (_message.Message,), dict(
DESCRIPTOR = _QUESTIONRESPONSE,
__module__ = 'stan_pb2'
# @@protoc_insertion_point(class_scope:stan.QuestionResponse)
))
_sym_db.RegisterMessage(QuestionResponse)
_STAN = _descriptor.ServiceDescriptor(
name='Stan',
full_name='stan.Stan',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=116,
serialized_end=599,
methods=[
_descriptor.MethodDescriptor(
name='OneQuestionOneResponse',
full_name='stan.Stan.OneQuestionOneResponse',
index=0,
containing_service=None,
input_type=_QUESTIONREQUEST,
output_type=_QUESTIONRESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='ManyQuestionsOneResponse',
full_name='stan.Stan.ManyQuestionsOneResponse',
index=1,
containing_service=None,
input_type=_QUESTIONREQUEST,
output_type=_QUESTIONRESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='OneQuestionManyResponses',
full_name='stan.Stan.OneQuestionManyResponses',
index=2,
containing_service=None,
input_type=_QUESTIONREQUEST,
output_type=_QUESTIONRESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='ManyQuestionsManyReponses',
full_name='stan.Stan.ManyQuestionsManyReponses',
index=3,
containing_service=None,
input_type=_QUESTIONREQUEST,
output_type=_QUESTIONRESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='OneQuestionOneErrorResponse',
full_name='stan.Stan.OneQuestionOneErrorResponse',
index=4,
containing_service=None,
input_type=_QUESTIONREQUEST,
output_type=_QUESTIONRESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='OneErroredQuestionOneResponse',
full_name='stan.Stan.OneErroredQuestionOneResponse',
index=5,
containing_service=None,
input_type=_QUESTIONREQUEST,
output_type=_QUESTIONRESPONSE,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_STAN)
DESCRIPTOR.services_by_name['Stan'] = _STAN
# @@protoc_insertion_point(module_scope)
|
instana/python-sensor
|
tests/apps/grpc_server/stan_pb2.py
|
Python
|
mit
| 6,153
|
from pybaco.baco import Baco
|
rougeth/pybaco
|
__init__.py
|
Python
|
gpl-2.0
| 29
|
#!/usr/bin/env python3
# TBD
type Config struct {
XXX map[string]interface{} `yaml:",inline"`
}
// avoid recursion in UnmarshalYAML
type configAlias Config
func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
a := (*configAlias)(c)
if err := unmarshal(a); err != nil {
return err
}
if len(c.XXX) != 0 {
return errors.Errorf("undefined fields %v", c.XXX)
}
return nil
}
func (c *Config) Apply() error {
if err := c.Validate(); err != nil {
return err
}
return nil
}
func (c *Config) Validate() error {
return nil
}
|
xephonhq/xephon-k
|
_legacy/script/gen_config.py
|
Python
|
mit
| 566
|
from datetime import datetime
from flask import render_template, session, redirect, url_for, request, Response
from . import ros
from ..models import Node, File
from .. import db
@ros.route('/')
def index():
return render_template('ros/cover.html')
@ros.route('/file/edit/<int:id>')
def edit_file(id):
s = File.query.get_or_404(id)
return render_template('ros/edit.html', file=s)
@ros.route('/nodes')
def nodes():
return render_template('ros/nodes.html', current_time=datetime.utcnow())
@ros.route('/nodes/<int:id>')
def files_of_node(id):
return render_template('ros/files.html', current_time=datetime.utcnow(), node_id = id)
@ros.route('/rosnodes')
def rosnodes():
return render_template('ros/rosnodes.html', current_time=datetime.utcnow())
@ros.route('/rostopics')
def rostopics():
return render_template('ros/rostopics.html', current_time=datetime.utcnow())
@ros.route('/rosconsole')
def rosconsole():
return render_template('ros/rosconsole.html', current_time=datetime.utcnow())
@ros.route('/test')
def test_page():
return render_template('ros/test.html')
@ros.route('/joy')
def joy_page():
return render_template('ros/joystick.html')
|
dotbot-io/webapp
|
app/ros/views.py
|
Python
|
gpl-2.0
| 1,185
|
#
# Copyright (c) 2008--2013 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import TestServer
import server.redhat_xmlrpc.downloads
class TestProxy( TestServer.TestServer ):
def __init__(self):
TestServer.TestServer.__init__(self)
self._init_redhat_xmlrpc_downloads()
def _init_redhat_xmlrpc_downloads(self):
self.downloads = server.redhat_xmlrpc.downloads.Downloads()
def getDownloads(self):
return self.downloads
if __name__ == "__main__":
server = TestProxy()
downloads = server.getDownloads()
|
moio/spacewalk
|
backend/server/test/TestProxy.py
|
Python
|
gpl-2.0
| 1,090
|
#CHIPSEC: Platform Security Assessment Framework
#Copyright (c) 2010-2015, Intel Corporation
#
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; Version 2.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#Contact information:
#chipsec@intel.com
#
"""
Usage:
``chipsec_main.py -i -m tools.remote [ -a <ipaddr:port> ]``
"""
import time
import errno
import socket
from chipsec.module_common import *
from chipsec.file import *
class Remote(BaseModule):
def __init__(self):
BaseModule.__init__(self)
self.sock = None
self.conn = None
self.data = ''
def send_line(self, msg):
return self.conn.sendall(msg + '\n')
def recv_line(self):
line = ''
while '\n' not in self.data:
buffer = self.conn.recv(1024)
self.data += buffer
if not buffer:
return ''
line, self.data = self.data.split('\n', 1)
return line
def execute_helper_command(self):
try:
values = self.recv_line()
if not values:
logger().log('\nConnection reset by peer!\n')
return False
try:
values = eval(values)
attr = getattr(self.cs.helper.helper, values[0])
if callable(attr):
result = [True, attr(*values[1], **values[2])]
else:
result = [True, attr]
except Exception as e:
result = [False, e]
self.send_line(str(result))
except socket.error as e:
if e.errno == errno.ECONNRESET:
logger().log('\nConnection reset by peer!\n')
elif logger().VERBOSE:
logger().error(str(e))
return False
return True
def run(self, module_argv):
self.logger.start_test('Client for remote helper')
host = socket.gethostname()
port = 5000
if module_argv:
args = module_argv[0].split(':')
if len(args) == 1:
host = args[0]
elif len(args) == 2:
try:
port = int(args[1])
host = args[0]
except ValueError:
self.logger.error("Invalid port: %s" % args[1])
return ModuleResult.FAILED
else:
self.logger.error("Invalid parameter: %s" % module_argv[0])
return ModuleResult.FAILED
host = socket.gethostbyname(host)
while True:
try:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.bind((host, port))
self.sock.listen(1)
break
except socket.error as e:
if e.errno != errno.EADDRINUSE:
logger().error(str(e))
return ModuleResult.FAILED
logger().log('Waiting for connections on %s:%d' % (host, port))
while True:
count = 0
self.conn, addr = self.sock.accept()
logger().log('Got connection from: %s:%d ' % addr)
while self.execute_helper_command():
count += 1
if count % 1000 == 0:
sys.stdout.write(str(count))
sys.stdout.write('.')
self.conn.close()
return ModuleResult.PASSED
|
mikhail-gorobets/chipsec
|
chipsec/modules/tools/remote.py
|
Python
|
gpl-2.0
| 3,993
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .base import Processor, NotConfiguredException
from googleapiclient import discovery
from google.oauth2.credentials import Credentials
from urllib.parse import urlencode
import re
class GroupsProcessor(Processor):
def process(self, config_key=None):
if config_key is None:
config_key = 'groups'
if config_key not in self.config:
raise NotConfiguredException(
'No Cloud Identity groups configuration specified in config!')
groups_config = self.config[config_key]
service_account = groups_config[
'serviceAccountEmail'] if 'serviceAccountEmail' in groups_config else None
group_credentials = Credentials(
self.get_token_for_scopes([
'https://www.googleapis.com/auth/cloud-identity.groups.readonly'
],
service_account=service_account))
group_service = discovery.build(
'cloudidentity',
'v1',
http=self._get_branded_http(group_credentials))
query = groups_config['query'] if 'query' in groups_config else ""
query_template = self.jinja_environment.from_string(query)
query_template.name = 'query'
query_output = query_template.render()
page_token = None
all_groups = {}
groups_by_owner = {}
groups_by_manager = {}
group_filter = None
if 'filter' in groups_config:
group_filter_template = self.jinja_environment.from_string(
groups_config['filter'])
group_filter_template.name = 'group_filter'
group_filter_output = group_filter_template.render()
group_filter = re.compile(group_filter_output)
while True:
search_query = urlencode({"query": query_output})
search_group_request = group_service.groups().search(
pageToken=page_token, pageSize=1, view="FULL")
param = "&" + search_query
search_group_request.uri += param
response = search_group_request.execute()
if 'groups' in response:
for group in response['groups']:
group_key = group['groupKey']['id']
if group_filter:
if not group_filter.match(group_key):
continue
group['owners'] = []
group['managers'] = []
membership_page_token = None
while True:
membership_request = group_service.groups().memberships(
).list(parent=group['name'],
pageToken=membership_page_token)
membership_response = membership_request.execute()
group['memberships'] = {}
if 'memberships' in membership_response:
owners = []
managers = []
for member in membership_response['memberships']:
member_key = member['preferredMemberKey']['id']
group['memberships'][member_key] = member
if 'roles' in member:
for role in member['roles']:
if role['name'] == 'OWNER':
owners.append(member_key)
group['owners'].append(member_key)
if role['name'] == 'MANAGER':
managers.append(member_key)
group['managers'].append(member_key)
for owner in owners:
if owner not in groups_by_owner:
groups_by_owner[owner] = [group]
else:
groups_by_owner[owner].append(group)
for manager in managers:
if manager not in groups_by_manager:
groups_by_manager[manager] = [group]
else:
groups_by_manager[manager].append(group)
if 'nextPageToken' in membership_response:
membership_page_token = membership_response[
'nextPageToken']
else:
break
all_groups[group_key] = group
if 'nextPageToken' in response:
page_token = response['nextPageToken']
else:
break
res = {
'all_groups': all_groups,
'groups_by_owner': groups_by_owner,
'groups_by_manager': groups_by_manager
}
return res
|
GoogleCloudPlatform/professional-services
|
tools/pubsub2inbox/processors/groups.py
|
Python
|
apache-2.0
| 5,644
|
"""
Tests.py:
"""
from unittest import TestCase
class SimpleTest(TestCase):
"""
SimpleTest():
"""
def test_adding_something_simple(self):
"""
test_adding_something_simple():
"""
self.assertEqual(1 + 2, 3)
def test_adding_something_not_equal(self):
"""
test_adding_something_not_equal():
"""
self.assertNotEqual(1 + 2, 4)
|
GunnerJnr/_CodeInstitute
|
Stream-3/Full-Stack-Development/19.Djangos-Testing-Framework/1.How-to-Setup-Your-Test-Suite/we_are_social/we_are_social/tests.py
|
Python
|
mit
| 408
|
from random import Random
from random import randint
import random, math
import collections
import operator
from FundingAgency import FundingAgency
from Academic import Academic
from Application import Application
class Population:
"""
A population of academic agents.
This class handles population level actions such as producing grants and
research, and allocating funding. It also provides an interface to obtain
data/statistics on agents.
"""
def __init__(self, params):
self.params = params
self.postdocs_generated = 0
self.timestamp = 0
self.leavers = 0
self.hired = 0
self.academics = 0
if self.params['random_seed']:
self.rng = Random()
else:
self.rng = Random(self.params['seed'])
# initialise agents
self.funding_body = FundingAgency(params)
self.agents = []
for i in xrange(self.params['pop_size']):
self.agents.append(Academic(i, params, self.rng))
# calculate derived parameters
params['grant_count'] = int(len(self.agents) *
self.params['grant_proportion'])
def estimate_output(self, bonus, prop, time=0.0, type='rnd'):
"""
Estimate the total research output of a population given:
- type='max' : best possible allocation of grants
- type='rnd' : random allocation of grants
(averaged over several attempts)
- type='min' : worst possible allocation of grants
and that each individual spends a fixed and equal amount of
time on their applications.
"""
attempts = 1
rq_agents = [(a.research_quality, a) for a in self.agents if a.made_redundant == False]
if type == 'max':
rq_agents.sort(reverse=True)
elif type == 'min':
rq_agents.sort()
elif type == 'rnd':
attempts = 10
research_sum = 0.0
grant_number = int(len(rq_agents) * prop)
for i in range(attempts):
for a in rq_agents[:grant_number]:
research_sum += a[1].calc_research(time, True, bonus, a[0])
for a in rq_agents[grant_number:]:
research_sum += a[1].calc_research(time, False, bonus,a[0])
return research_sum / attempts
def estimate_output_sum(self):
"Estimate total research across system without any grant funding."
research_sum = 0.0
rq_agents = [(a.research_quality, a) for a in self.agents if not a.made_redundant]
rq_agents.sort(reverse=True)
for a in rq_agents:
research_sum += a[1].calc_research(0.0, True, 0.0, a[0])
return research_sum
## SIM ACTIONS ############################################################
def produce_applications(self):
"""
Produce applications by each agent (who is applying).
"""
[self.funding_body.add_application(
Application(a, self.params, self.rng), self.rng)
for a in self.agents if a.applying]
def evaluate_applications(self):
"""
Evalute the submitted applications and allocate grants.
Generate postdocs in the postdoc case, or new academics in the simple growing population case
"""
# if self.params['limited_funding'] == True and iterations == 1:
# self.funding_body.init_grants()
postdoc_noise = self.rng.randint(1, 4)
grant_total = 0
self.funding_body.rank_applications()
if self.params['limited_funding'] == False:
successful = self.funding_body.get_grant_recipients(self.params, self.academics)
else:
successful = self.funding_body.get_recipients_limited(self.params, self.academics)
print("Total academics now: {}".format(self.academics))
# print(successful)
for a in successful:
self.agents[a].grant_held = True
self.agents[a].grant_count += 1
if self.params['use_postdocs'] == 1 or self.params['growing_pop'] == 1:
grant_total += 1
if self.params['use_postdocs'] == 1:
self.generate_postdocs(grant_total//10 + postdoc_noise)
if self.params['growing_pop'] == 1:
self.add_academics(grant_total//20 + postdoc_noise)
print('Total grants disbursed: {}'.format(grant_total))
## def evaluate_applications_pdr(self):
##
## """
## Evalute the submitted applications and allocate grants.
## """
##
## postdoc_noise = self.rng.randint(1, 4)
## grant_total = 0
## self.funding_body.rank_applications()
## successful = self.funding_body.get_grant_recipients_pdr(self.params, len(self.agents) - self.postdocs_generated)
## for a in successful:
## self.agents[a].grant_held = True
## self.agents[a].grant_count += 1
## if self.params['use_postdocs'] == 1:
## grant_total += 1
## if self.params['use_postdocs'] == 1:
## self.generate_postdocs(grant_total//6 + postdoc_noise)
## print("Total cash moneyz: {}".format(grant_total))
def produce_research(self):
"""
Produce research by each agent. Return total research.
"""
return sum([a.produce_research(self.params) for a in self.agents if not a.made_redundant])
def update_strategies(self):
"""
Update agent strategies.
"""
for a in self.agents:
if a.postdoc_status == 0:
if self.params['learning_type'] == 'thermostat':
a.update_strategy_self_thermostat(self.params, self.rng)
elif self.params['learning_type'] == 'memory':
a.update_strategy_self_memory(self.params, self.rng)
else:
System.exit("Unknown learning type")
def clear_all(self):
"""
Clear any grants currently held by agents.
"""
for a in self.agents:
a.grant_held = False
self.funding_body.clear_applications()
def update_postdocs(self):
"""
Update postdoc contracts and status
"""
for a in self.agents:
if a.postdoc_status == 1:
if a.contract_length >= 1:
a.contract_length -= 1
if a.newb >=1:
a.newb -= 1
# print('Contract Length: {}'.format(a.contract_length))
def update_careers(self):
"""
Decrement career_length by one each iteration
"""
for a in self.agents:
if a.career_length >= 1:
a.career_length -=1
elif a.career_length <= 1 and not a.retired:
a.retire()
def update_newbies(self):
"""
Update newbie academics in growing population case
"""
for a in self.agents:
if a.newb >= 1:
a.newb -= 1
def hire_postdocs(self, params):
"""
Base: Hire 15% of postdocs (lucky!)
RQ counts: postdocs with higher RQ get hired
"""
leavers = [a for a in self.agents if a.postdoc_status == 1 and a.contract_length <=0 and a.made_redundant == False]
promotions_count = 0
redundancies_count = 0
if len(leavers) > 0:
ranked_leavers = sorted(leavers, key=lambda x: x.research_quality, reverse=True)
candidate_num = int(math.ceil(params['postdoc_chance'] * len(leavers)))
#print('Research Quality counts: {}'.format(self.params['pdr_rq_counts']))
#print('Postdoc Chance: {}'.format(params['postdoc_chance']))
#print('Candidates: {}'.format(candidate_num))
#print('Ranked Leavers: {}'.format(len(ranked_leavers)))
if self.params['pdr_rq_counts'] == 0:
random.shuffle(leavers)
for a in ranked_leavers[:candidate_num]:
a.former_postdoc = 1
a.tenured = True
a.postdoc_status = 0
a.set_random_time_grant(self.params['init_time'], self.rng)
a.applying = True
promotions_count += 1
for a in ranked_leavers[candidate_num:]:
a.made_redundant = True
a.exited_postdoc = 1
redundancies_count += 1
elif self.params['pdr_rq_counts'] == 1:
for a in ranked_leavers[:candidate_num]:
a.former_postdoc = 1
a.tenured = True
a.postdoc_status = 0
a.set_random_time_grant(self.params['init_time'], self.rng)
a.applying = True
promotions_count += 1
for a in ranked_leavers[candidate_num:]:
a.made_redundant = True
a.exited_postdoc = 1
redundancies_count += 1
#print('Total agents sacked this round: {}'.format(redundancies_count))
#print('Total agents promoted this round: {}'.format(promotions_count))
if self.params['pdr_rq_counts'] == 0:
self.agents = list(set(self.agents+leavers))
else:
self.agents = list(set(self.agents + ranked_leavers))
def add_academics(self, num_academics):
"""
Add new academics for a simple growing population case
Each timestep a selection of new academics come in based on the number of grants issued
"""
for a in range(0, num_academics):
self.new_id = (len(self.agents))
print("Length of agent list: {}".format(self.new_id))
self.agents.append(Academic(self.new_id, self.params, self.rng))
self.agents[self.new_id].research_quality = self.rng.random()
self.agents[self.new_id].applying = True
self.agents[self.new_id].grant_held = False
self.agents[self.new_id].tenured = True
self.agents[self.new_id].grant_count = 0
self.agents[self.new_id].set_random_time_grant(self.params['init_time'], self.rng)
self.agents[self.new_id].research = 0.0
self.agents[self.new_id].research_sum = 0.0
self.agents[self.new_id].memory = []
self.agents[self.new_id].former_postdoc = 0
self.agents[self.new_id].exited_postdoc = 0
self.agents[self.new_id].num_postdocs = 0
self.agents[self.new_id].postdoc_status = 0
self.agents[self.new_id].contract_length = 100
self.agents[self.new_id].time_grant = 0.0
self.agents[self.new_id].newb = 2
def generate_postdocs(self, num_postdocs):
"""
Generate postdocs as requested by grant allocation functions
Each new postdoc gets assigned appropriate attributes
"""
for a in range(0, num_postdocs):
self.postdoc_id = (len(self.agents))
#print("New postdoc: {}".format(self.postdoc_id))
self.agents.append(Academic(self.postdoc_id, self.params, self.rng))
self.agents[self.postdoc_id].research_quality = self.rng.random()
self.agents[self.postdoc_id].applying = False
self.agents[self.postdoc_id].grant_held = False
self.agents[self.postdoc_id].tenured = False
self.agents[self.postdoc_id].grant_count = 0
self.agents[self.postdoc_id].set_random_time_grant(self.params['init_time'], self.rng)
self.agents[self.postdoc_id].research = 0.0
self.agents[self.postdoc_id].research_sum = 0.0
self.agents[self.postdoc_id].memory = []
self.agents[self.postdoc_id].former_postdoc = 0
self.agents[self.postdoc_id].exited_postdoc = 0
self.agents[self.postdoc_id].num_postdocs = 1
self.agents[self.postdoc_id].postdoc_status = 1
self.agents[self.postdoc_id].contract_length = randint(4,10)
self.agents[self.postdoc_id].time_grant = 0.0
self.agents[self.postdoc_id].newb = 2
## self.agents[a].research_quality = self.rng.random()
## self.agents[a].applying = False
## self.agents[a].grant_held = False
## self.agents[a].tenured = False
## self.agents[a].grant_count = 0
## self.agents[a].set_random_time_grant(self.params['init_time'], self.rng)
## self.agents[a].research = 0.0
## self.agents[a].research_sum = 0.0
## self.agents[a].memory = []
## self.agents[a].former_postdoc = 0
## self.agents[a].exited_postdoc = 0
## self.agents[a].num_postdocs = 1
## self.agents[a].postdoc_status = 1
## self.agents[a].contract_length = randint(4,10)
## self.agents[a].time_grant = 0.0
## self.agents[a].newb = 2
self.postdocs_generated += 1
## DATA ACCESS ############################################################
def all_stats(self):
"""
Return a table of (for the current iteration):
[ID, rq, app, tg, g, r, PDR status]
"""
return [
(a.id, a.research_quality, a.applying,
a.time_grant, a.grant_held, a.research, a.postdoc_status)
for a in self.agents
]
def acceptance_rate(self):
"""
Return tuple containing # grants allocated and acceptance rate:
(# grants allocated, (# grants allocated) / (# grants submitted)).
"""
submitted = 0
allocated = 0
for a in self.agents:
if a.applying: submitted += 1
if a.grant_held: allocated += 1
if submitted > 0:
return allocated, float(allocated) / submitted
else:
return allocated, 0.0
def all_rq(self):
"Return a list of all research quality values."
return [a.research_quality for a in self.agents]
def all_r(self):
"Return a list of all research output values for current year."
return [a.research for a in self.agents]
def all_tg(self):
"Return a list of all tg values."
return [a.time_grant for a in self.agents]
def all_apply(self):
"Return a list of all applying values."
return [a.applying for a in self.agents]
def all_held(self):
"Return a list of all grant_held values."
return [a.grant_held for a in self.agents]
def all_r_grant(self):
"Return a list of tg values of agents holding grants."
return [a.research for a in self.agents if a.grant_held]
def all_r_fail(self):
"""
Return a list of r values of agents who apply but fail.
"""
return [a.research for a in self.agents if
(a.applying and not a.grant_held)]
def all_r_no_grant(self):
"Return a list of r values of agents not holding grants."
return [a.research for a in self.agents if not a.grant_held]
def all_r_pdr(self):
"Return a list of r values of agents who are postdocs."
return [a.research for a in self.agents if a.postdoc_status == 1 and a.made_redundant == False]
def all_r_former_pdr(self):
"Return a list of r values of agents who were promoted."
return [a.research for a in self.agents if a.former_postdoc == 1 and a.made_redundant == False]
def all_r_old_academic(self):
"Return a list of r values of agents who are established academics."
#return [a.research for a in self.agents if a.postdoc_status == 0 and a.former_postdoc == 0 and a.made_redundant == False]
return [a.research for a in self.agents if a.postdoc_status == 0 and a.former_postdoc == 0 and a.career_length <= 20]
def all_tg_grant(self):
"Return a list of tg values of agents holding grants."
return [a.time_grant for a in self.agents if a.grant_held]
def all_tg_fail(self):
"""
Returns a list of tg values of agents who apply but fail.
"""
return [a.time_grant for a in self.agents if
(a.applying and not a.grant_held)]
def all_tg_no_grant(self):
"Return a list of tg values of agents not holding grants."
return [a.time_grant for a in self.agents if not a.grant_held]
def all_rq(self):
"Return a list of rq values of all agents."
return [a.research_quality for a in self.agents]
def all_rq_grant(self):
"Return a list of rq values of agents holding grants."
return [a.research_quality for a in self.agents if a.grant_held]
def all_rq_no_grant(self):
"Return a list of rq values of agents not holding grants."
return [a.research_quality for a in self.agents if not a.grant_held]
def all_rq_fail(self):
"Returns a list of rq values of agents who apply but fail."
return [a.research_quality for a in self.agents if
(a.applying and not a.grant_held)]
def all_rq_no_apply(self):
"Returns a list of rq values of agents who don't apply."
return [a.research_quality for a in self.agents if not a.applying]
def all_rq_pdr(self):
"Returns a list of rq values of agents who are postdocs."
return [a.research_quality for a in self.agents if a.postdoc_status == 1 and a.made_redundant == False]
def all_rq_former_pdr(self):
"Returns a list of RQ values of agents who have been promoted."
return [a.research_quality for a in self.agents if a.former_postdoc == 1 and a.made_redundant == False]
def all_rq_old_academic(self):
"Returns a list of RQ values of agents who are established academics."
return [a.research_quality for a in self.agents if a.postdoc_status == 0 and a.former_postdoc == 0 and a.made_redundant == False]
def all_grant_counts(self):
"Returns a list of lifetime grant counts for each agent."
return [a.grant_count for a in self.agents]
def postdoc_count(self):
"Returns a total population of postdocs ever generated."
return [self.postdocs_generated]
def academic_count(self):
"Returns a total number of academics."
academics = 0
for a in self.agents:
if a.tenured:
academics += 1
return [academics]
def int_academic_count(self):
"Adds academic count to agent list."
self.academics = 0
for a in self.agents:
if a.tenured:
self.academics += 1
return [self.academics]
## tenured = 0
## new_faculty = 0
## leavers = 0
## for a in self.agents:
## if a.former_postdoc == 1:
## new_faculty += 1
## for a in self.agents:
## if a.exited_postdoc == 1:
## leavers += 1
## for a in self.agents:
## if a.applying:
## tenured += 1
## return [tenured - self.postdocs_generated + new_faculty - leavers]
def exited_count(self):
"Returns number of exited postdocs."
self.leavers = 0
for a in self.agents:
if a.made_redundant == True:
self.leavers += 1
print('Leavers: {}" Total Postdocs: {}'.format(self.leavers, self.postdocs_generated))
return [self.leavers]
def current_postdocs(self):
"Returns number of active postdocs in the current semester."
active_pdrs = 0
pdrs = 0
for a in self.agents:
if a.postdoc_status == 1 and a.former_postdoc == 0 and a.made_redundant == False:
active_pdrs += 1
for a in self.agents:
if a.postdoc_status == 1:
pdrs += 1
print('Active Postdocs: {} Other postdocs: {}'.format(active_pdrs, pdrs))
# return [active_pdrs]
def print_all(self):
for a in self.agents:
a.print_all()
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
|
thorsilver/Modelling-academic-job-security
|
Population.py
|
Python
|
mit
| 20,911
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from dateutil.relativedelta import relativedelta
import datetime
import logging
import time
from openerp.osv import osv, fields
import openerp.tools
from openerp.tools.translate import _
from openerp.addons.decimal_precision import decimal_precision as dp
_logger = logging.getLogger(__name__)
class account_analytic_invoice_line(osv.osv):
_name = "account.analytic.invoice.line"
def _amount_line(self, cr, uid, ids, prop, unknow_none, unknow_dict, context=None):
res = {}
for line in self.browse(cr, uid, ids, context=context):
res[line.id] = line.quantity * line.price_unit
if line.analytic_account_id.pricelist_id:
cur = line.analytic_account_id.pricelist_id.currency_id
res[line.id] = self.pool.get('res.currency').round(cr, uid, cur, res[line.id])
return res
_columns = {
'product_id': fields.many2one('product.product','Product',required=True),
'analytic_account_id': fields.many2one('account.analytic.account', 'Analytic Account', ondelete='cascade'),
'name': fields.text('Description', required=True),
'quantity': fields.float('Quantity', required=True),
'uom_id': fields.many2one('product.uom', 'Unit of Measure',required=True),
'price_unit': fields.float('Unit Price', required=True),
'price_subtotal': fields.function(_amount_line, string='Sub Total', type="float",digits_compute= dp.get_precision('Account')),
}
_defaults = {
'quantity' : 1,
}
def product_id_change(self, cr, uid, ids, product, uom_id, qty=0, name='', partner_id=False, price_unit=False, pricelist_id=False, company_id=None, context=None):
context = context or {}
uom_obj = self.pool.get('product.uom')
company_id = company_id or False
local_context = dict(context, company_id=company_id, force_company=company_id, pricelist=pricelist_id)
if not product:
return {'value': {'price_unit': 0.0}, 'domain':{'product_uom':[]}}
if partner_id:
part = self.pool.get('res.partner').browse(cr, uid, partner_id, context=local_context)
if part.lang:
local_context.update({'lang': part.lang})
result = {}
res = self.pool.get('product.product').browse(cr, uid, product, context=local_context)
price = False
if price_unit is not False:
price = price_unit
elif pricelist_id:
price = res.price
if price is False:
price = res.list_price
if not name:
name = self.pool.get('product.product').name_get(cr, uid, [res.id], context=local_context)[0][1]
if res.description_sale:
name += '\n'+res.description_sale
result.update({'name': name or False,'uom_id': uom_id or res.uom_id.id or False, 'price_unit': price})
res_final = {'value':result}
if result['uom_id'] != res.uom_id.id:
selected_uom = uom_obj.browse(cr, uid, result['uom_id'], context=local_context)
new_price = uom_obj._compute_price(cr, uid, res.uom_id.id, res_final['value']['price_unit'], result['uom_id'])
res_final['value']['price_unit'] = new_price
return res_final
class account_analytic_account(osv.osv):
_name = "account.analytic.account"
_inherit = "account.analytic.account"
def _analysis_all(self, cr, uid, ids, fields, arg, context=None):
dp = 2
res = dict([(i, {}) for i in ids])
parent_ids = tuple(ids) #We don't want consolidation for each of these fields because those complex computation is resource-greedy.
accounts = self.browse(cr, uid, ids, context=context)
for f in fields:
if f == 'user_ids':
cr.execute('SELECT MAX(id) FROM res_users')
max_user = cr.fetchone()[0]
if parent_ids:
cr.execute('SELECT DISTINCT("user") FROM account_analytic_analysis_summary_user ' \
'WHERE account_id IN %s AND unit_amount <> 0.0', (parent_ids,))
result = cr.fetchall()
else:
result = []
for id in ids:
res[id][f] = [int((id * max_user) + x[0]) for x in result]
elif f == 'month_ids':
if parent_ids:
cr.execute('SELECT DISTINCT(month_id) FROM account_analytic_analysis_summary_month ' \
'WHERE account_id IN %s AND unit_amount <> 0.0', (parent_ids,))
result = cr.fetchall()
else:
result = []
for id in ids:
res[id][f] = [int(id * 1000000 + int(x[0])) for x in result]
elif f == 'last_worked_invoiced_date':
for id in ids:
res[id][f] = False
if parent_ids:
cr.execute("SELECT account_analytic_line.account_id, MAX(date) \
FROM account_analytic_line \
WHERE account_id IN %s \
AND invoice_id IS NOT NULL \
GROUP BY account_analytic_line.account_id;", (parent_ids,))
for account_id, sum in cr.fetchall():
if account_id not in res:
res[account_id] = {}
res[account_id][f] = sum
elif f == 'ca_to_invoice':
for id in ids:
res[id][f] = 0.0
res2 = {}
for account in accounts:
cr.execute("""
SELECT product_id, sum(amount), user_id, to_invoice, sum(unit_amount), product_uom_id, line.name
FROM account_analytic_line line
LEFT JOIN account_analytic_journal journal ON (journal.id = line.journal_id)
WHERE account_id = %s
AND journal.type != 'purchase'
AND invoice_id IS NULL
AND to_invoice IS NOT NULL
GROUP BY product_id, user_id, to_invoice, product_uom_id, line.name""", (account.id,))
res[account.id][f] = 0.0
for product_id, price, user_id, factor_id, qty, uom, line_name in cr.fetchall():
price = -price
if product_id:
price = self.pool.get('account.analytic.line')._get_invoice_price(cr, uid, account, product_id, user_id, qty, context)
factor = self.pool.get('hr_timesheet_invoice.factor').browse(cr, uid, factor_id, context=context)
res[account.id][f] += price * qty * (100-factor.factor or 0.0) / 100.0
# sum both result on account_id
for id in ids:
res[id][f] = round(res.get(id, {}).get(f, 0.0), dp) + round(res2.get(id, 0.0), 2)
elif f == 'last_invoice_date':
for id in ids:
res[id][f] = False
if parent_ids:
cr.execute ("SELECT account_analytic_line.account_id, \
DATE(MAX(account_invoice.date_invoice)) \
FROM account_analytic_line \
JOIN account_invoice \
ON account_analytic_line.invoice_id = account_invoice.id \
WHERE account_analytic_line.account_id IN %s \
AND account_analytic_line.invoice_id IS NOT NULL \
GROUP BY account_analytic_line.account_id",(parent_ids,))
for account_id, lid in cr.fetchall():
res[account_id][f] = lid
elif f == 'last_worked_date':
for id in ids:
res[id][f] = False
if parent_ids:
cr.execute("SELECT account_analytic_line.account_id, MAX(date) \
FROM account_analytic_line \
WHERE account_id IN %s \
AND invoice_id IS NULL \
GROUP BY account_analytic_line.account_id",(parent_ids,))
for account_id, lwd in cr.fetchall():
if account_id not in res:
res[account_id] = {}
res[account_id][f] = lwd
elif f == 'hours_qtt_non_invoiced':
for id in ids:
res[id][f] = 0.0
if parent_ids:
cr.execute("SELECT account_analytic_line.account_id, COALESCE(SUM(unit_amount), 0.0) \
FROM account_analytic_line \
JOIN account_analytic_journal \
ON account_analytic_line.journal_id = account_analytic_journal.id \
WHERE account_analytic_line.account_id IN %s \
AND account_analytic_journal.type='general' \
AND invoice_id IS NULL \
AND to_invoice IS NOT NULL \
GROUP BY account_analytic_line.account_id;",(parent_ids,))
for account_id, sua in cr.fetchall():
if account_id not in res:
res[account_id] = {}
res[account_id][f] = round(sua, dp)
for id in ids:
res[id][f] = round(res[id][f], dp)
elif f == 'hours_quantity':
for id in ids:
res[id][f] = 0.0
if parent_ids:
cr.execute("SELECT account_analytic_line.account_id, COALESCE(SUM(unit_amount), 0.0) \
FROM account_analytic_line \
JOIN account_analytic_journal \
ON account_analytic_line.journal_id = account_analytic_journal.id \
WHERE account_analytic_line.account_id IN %s \
AND account_analytic_journal.type='general' \
GROUP BY account_analytic_line.account_id",(parent_ids,))
ff = cr.fetchall()
for account_id, hq in ff:
if account_id not in res:
res[account_id] = {}
res[account_id][f] = round(hq, dp)
for id in ids:
res[id][f] = round(res[id][f], dp)
elif f == 'ca_theorical':
# TODO Take care of pricelist and purchase !
for id in ids:
res[id][f] = 0.0
# Warning
# This computation doesn't take care of pricelist !
# Just consider list_price
if parent_ids:
cr.execute("""SELECT account_analytic_line.account_id AS account_id, \
COALESCE(SUM((account_analytic_line.unit_amount * pt.list_price) \
- (account_analytic_line.unit_amount * pt.list_price \
* hr.factor)), 0.0) AS somme
FROM account_analytic_line \
LEFT JOIN account_analytic_journal \
ON (account_analytic_line.journal_id = account_analytic_journal.id) \
JOIN product_product pp \
ON (account_analytic_line.product_id = pp.id) \
JOIN product_template pt \
ON (pp.product_tmpl_id = pt.id) \
JOIN account_analytic_account a \
ON (a.id=account_analytic_line.account_id) \
JOIN hr_timesheet_invoice_factor hr \
ON (hr.id=a.to_invoice) \
WHERE account_analytic_line.account_id IN %s \
AND a.to_invoice IS NOT NULL \
AND account_analytic_journal.type IN ('purchase', 'general')
GROUP BY account_analytic_line.account_id""",(parent_ids,))
for account_id, sum in cr.fetchall():
res[account_id][f] = round(sum, dp)
return res
def _ca_invoiced_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
res_final = {}
child_ids = tuple(ids) #We don't want consolidation for each of these fields because those complex computation is resource-greedy.
for i in child_ids:
res[i] = 0.0
if not child_ids:
return res
if child_ids:
#Search all invoice lines not in cancelled state that refer to this analytic account
inv_line_obj = self.pool.get("account.invoice.line")
inv_lines = inv_line_obj.search(cr, uid, ['&', ('account_analytic_id', 'in', child_ids), ('invoice_id.state', 'not in', ['draft', 'cancel']), ('invoice_id.type', 'in', ['out_invoice', 'out_refund'])], context=context)
for line in inv_line_obj.browse(cr, uid, inv_lines, context=context):
if line.invoice_id.type == 'out_refund':
res[line.account_analytic_id.id] -= line.price_subtotal
else:
res[line.account_analytic_id.id] += line.price_subtotal
for acc in self.browse(cr, uid, res.keys(), context=context):
res[acc.id] = res[acc.id] - (acc.timesheet_ca_invoiced or 0.0)
res_final = res
return res_final
def _total_cost_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
res_final = {}
child_ids = tuple(ids) #We don't want consolidation for each of these fields because those complex computation is resource-greedy.
for i in child_ids:
res[i] = 0.0
if not child_ids:
return res
if child_ids:
cr.execute("""SELECT account_analytic_line.account_id, COALESCE(SUM(amount), 0.0) \
FROM account_analytic_line \
JOIN account_analytic_journal \
ON account_analytic_line.journal_id = account_analytic_journal.id \
WHERE account_analytic_line.account_id IN %s \
AND amount<0 \
GROUP BY account_analytic_line.account_id""",(child_ids,))
for account_id, sum in cr.fetchall():
res[account_id] = round(sum,2)
res_final = res
return res_final
def _remaining_hours_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
for account in self.browse(cr, uid, ids, context=context):
if account.quantity_max != 0:
res[account.id] = account.quantity_max - account.hours_quantity
else:
res[account.id] = 0.0
for id in ids:
res[id] = round(res.get(id, 0.0),2)
return res
def _remaining_hours_to_invoice_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
for account in self.browse(cr, uid, ids, context=context):
res[account.id] = max(account.hours_qtt_est - account.timesheet_ca_invoiced, account.ca_to_invoice)
return res
def _hours_qtt_invoiced_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
for account in self.browse(cr, uid, ids, context=context):
res[account.id] = account.hours_quantity - account.hours_qtt_non_invoiced
if res[account.id] < 0:
res[account.id] = 0.0
for id in ids:
res[id] = round(res.get(id, 0.0),2)
return res
def _revenue_per_hour_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
for account in self.browse(cr, uid, ids, context=context):
if account.hours_qtt_invoiced == 0:
res[account.id]=0.0
else:
res[account.id] = account.ca_invoiced / account.hours_qtt_invoiced
for id in ids:
res[id] = round(res.get(id, 0.0),2)
return res
def _real_margin_rate_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
for account in self.browse(cr, uid, ids, context=context):
if account.ca_invoiced == 0:
res[account.id]=0.0
elif account.total_cost != 0.0:
res[account.id] = -(account.real_margin / account.total_cost) * 100
else:
res[account.id] = 0.0
for id in ids:
res[id] = round(res.get(id, 0.0),2)
return res
def _fix_price_to_invoice_calc(self, cr, uid, ids, name, arg, context=None):
sale_obj = self.pool.get('sale.order')
res = {}
for account in self.browse(cr, uid, ids, context=context):
res[account.id] = 0.0
sale_ids = sale_obj.search(cr, uid, [('project_id','=', account.id), ('state', '=', 'manual')], context=context)
for sale in sale_obj.browse(cr, uid, sale_ids, context=context):
res[account.id] += sale.amount_untaxed
for invoice in sale.invoice_ids:
if invoice.state != 'cancel':
res[account.id] -= invoice.amount_untaxed
return res
def _timesheet_ca_invoiced_calc(self, cr, uid, ids, name, arg, context=None):
lines_obj = self.pool.get('account.analytic.line')
res = {}
inv_ids = []
for account in self.browse(cr, uid, ids, context=context):
res[account.id] = 0.0
line_ids = lines_obj.search(cr, uid, [('account_id','=', account.id), ('invoice_id','!=',False), ('invoice_id.state', 'not in', ['draft', 'cancel']), ('to_invoice','!=', False), ('journal_id.type', '=', 'general'), ('invoice_id.type', 'in', ['out_invoice', 'out_refund'])], context=context)
for line in lines_obj.browse(cr, uid, line_ids, context=context):
if line.invoice_id not in inv_ids:
inv_ids.append(line.invoice_id)
if line.invoice_id.type == 'out_refund':
res[account.id] -= line.invoice_id.amount_untaxed
else:
res[account.id] += line.invoice_id.amount_untaxed
return res
def _remaining_ca_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
for account in self.browse(cr, uid, ids, context=context):
res[account.id] = max(account.amount_max - account.ca_invoiced, account.fix_price_to_invoice)
return res
def _real_margin_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
for account in self.browse(cr, uid, ids, context=context):
res[account.id] = account.ca_invoiced + account.total_cost
for id in ids:
res[id] = round(res.get(id, 0.0),2)
return res
def _theorical_margin_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
for account in self.browse(cr, uid, ids, context=context):
res[account.id] = account.ca_theorical + account.total_cost
for id in ids:
res[id] = round(res.get(id, 0.0),2)
return res
def _is_overdue_quantity(self, cr, uid, ids, fieldnames, args, context=None):
result = dict.fromkeys(ids, 0)
for record in self.browse(cr, uid, ids, context=context):
if record.quantity_max > 0.0:
result[record.id] = int(record.hours_quantity > record.quantity_max)
else:
result[record.id] = 0
return result
def _get_analytic_account(self, cr, uid, ids, context=None):
result = set()
for line in self.pool.get('account.analytic.line').browse(cr, uid, ids, context=context):
result.add(line.account_id.id)
return list(result)
def _get_total_estimation(self, account):
tot_est = 0.0
if account.fix_price_invoices:
tot_est += account.amount_max
if account.invoice_on_timesheets:
tot_est += account.hours_qtt_est
return tot_est
def _get_total_invoiced(self, account):
total_invoiced = 0.0
if account.fix_price_invoices:
total_invoiced += account.ca_invoiced
if account.invoice_on_timesheets:
total_invoiced += account.timesheet_ca_invoiced
return total_invoiced
def _get_total_remaining(self, account):
total_remaining = 0.0
if account.fix_price_invoices:
total_remaining += account.remaining_ca
if account.invoice_on_timesheets:
total_remaining += account.remaining_hours_to_invoice
return total_remaining
def _get_total_toinvoice(self, account):
total_toinvoice = 0.0
if account.fix_price_invoices:
total_toinvoice += account.fix_price_to_invoice
if account.invoice_on_timesheets:
total_toinvoice += account.ca_to_invoice
return total_toinvoice
def _sum_of_fields(self, cr, uid, ids, name, arg, context=None):
res = dict([(i, {}) for i in ids])
for account in self.browse(cr, uid, ids, context=context):
res[account.id]['est_total'] = self._get_total_estimation(account)
res[account.id]['invoiced_total'] = self._get_total_invoiced(account)
res[account.id]['remaining_total'] = self._get_total_remaining(account)
res[account.id]['toinvoice_total'] = self._get_total_toinvoice(account)
return res
_columns = {
'is_overdue_quantity' : fields.function(_is_overdue_quantity, method=True, type='boolean', string='Overdue Quantity',
store={
'account.analytic.line' : (_get_analytic_account, None, 20),
'account.analytic.account': (lambda self, cr, uid, ids, c=None: ids, ['quantity_max'], 10),
}),
'ca_invoiced': fields.function(_ca_invoiced_calc, type='float', string='Invoiced Amount',
help="Total customer invoiced amount for this account.",
digits_compute=dp.get_precision('Account')),
'total_cost': fields.function(_total_cost_calc, type='float', string='Total Costs',
help="Total of costs for this account. It includes real costs (from invoices) and indirect costs, like time spent on timesheets.",
digits_compute=dp.get_precision('Account')),
'ca_to_invoice': fields.function(_analysis_all, multi='analytic_analysis', type='float', string='Uninvoiced Amount',
help="If invoice from analytic account, the remaining amount you can invoice to the customer based on the total costs.",
digits_compute=dp.get_precision('Account')),
'ca_theorical': fields.function(_analysis_all, multi='analytic_analysis', type='float', string='Theoretical Revenue',
help="Based on the costs you had on the project, what would have been the revenue if all these costs have been invoiced at the normal sale price provided by the pricelist.",
digits_compute=dp.get_precision('Account')),
'hours_quantity': fields.function(_analysis_all, multi='analytic_analysis', type='float', string='Total Worked Time',
help="Number of time you spent on the analytic account (from timesheet). It computes quantities on all journal of type 'general'."),
'last_invoice_date': fields.function(_analysis_all, multi='analytic_analysis', type='date', string='Last Invoice Date',
help="If invoice from the costs, this is the date of the latest invoiced."),
'last_worked_invoiced_date': fields.function(_analysis_all, multi='analytic_analysis', type='date', string='Date of Last Invoiced Cost',
help="If invoice from the costs, this is the date of the latest work or cost that have been invoiced."),
'last_worked_date': fields.function(_analysis_all, multi='analytic_analysis', type='date', string='Date of Last Cost/Work',
help="Date of the latest work done on this account."),
'hours_qtt_non_invoiced': fields.function(_analysis_all, multi='analytic_analysis', type='float', string='Uninvoiced Time',
help="Number of time (hours/days) (from journal of type 'general') that can be invoiced if you invoice based on analytic account."),
'hours_qtt_invoiced': fields.function(_hours_qtt_invoiced_calc, type='float', string='Invoiced Time',
help="Number of time (hours/days) that can be invoiced plus those that already have been invoiced."),
'remaining_hours': fields.function(_remaining_hours_calc, type='float', string='Remaining Time',
help="Computed using the formula: Maximum Time - Total Worked Time"),
'remaining_hours_to_invoice': fields.function(_remaining_hours_to_invoice_calc, type='float', string='Remaining Time',
help="Computed using the formula: Expected on timesheets - Total invoiced on timesheets"),
'fix_price_to_invoice': fields.function(_fix_price_to_invoice_calc, type='float', string='Remaining Time',
help="Sum of quotations for this contract."),
'timesheet_ca_invoiced': fields.function(_timesheet_ca_invoiced_calc, type='float', string='Remaining Time',
help="Sum of timesheet lines invoiced for this contract."),
'remaining_ca': fields.function(_remaining_ca_calc, type='float', string='Remaining Revenue',
help="Computed using the formula: Max Invoice Price - Invoiced Amount.",
digits_compute=dp.get_precision('Account')),
'revenue_per_hour': fields.function(_revenue_per_hour_calc, type='float', string='Revenue per Time (real)',
help="Computed using the formula: Invoiced Amount / Total Time",
digits_compute=dp.get_precision('Account')),
'real_margin': fields.function(_real_margin_calc, type='float', string='Real Margin',
help="Computed using the formula: Invoiced Amount - Total Costs.",
digits_compute=dp.get_precision('Account')),
'theorical_margin': fields.function(_theorical_margin_calc, type='float', string='Theoretical Margin',
help="Computed using the formula: Theoretical Revenue - Total Costs",
digits_compute=dp.get_precision('Account')),
'real_margin_rate': fields.function(_real_margin_rate_calc, type='float', string='Real Margin Rate (%)',
help="Computes using the formula: (Real Margin / Total Costs) * 100.",
digits_compute=dp.get_precision('Account')),
'fix_price_invoices' : fields.boolean('Fixed Price'),
'invoice_on_timesheets' : fields.boolean("On Timesheets"),
'month_ids': fields.function(_analysis_all, multi='analytic_analysis', type='many2many', relation='account_analytic_analysis.summary.month', string='Month'),
'user_ids': fields.function(_analysis_all, multi='analytic_analysis', type="many2many", relation='account_analytic_analysis.summary.user', string='User'),
'hours_qtt_est': fields.float('Estimation of Hours to Invoice'),
'est_total' : fields.function(_sum_of_fields, type="float",multi="sum_of_all", string="Total Estimation"),
'invoiced_total' : fields.function(_sum_of_fields, type="float",multi="sum_of_all", string="Total Invoiced"),
'remaining_total' : fields.function(_sum_of_fields, type="float",multi="sum_of_all", string="Total Remaining", help="Expectation of remaining income for this contract. Computed as the sum of remaining subtotals which, in turn, are computed as the maximum between '(Estimation - Invoiced)' and 'To Invoice' amounts"),
'toinvoice_total' : fields.function(_sum_of_fields, type="float",multi="sum_of_all", string="Total to Invoice", help=" Sum of everything that could be invoiced for this contract."),
'recurring_invoice_line_ids': fields.one2many('account.analytic.invoice.line', 'analytic_account_id', 'Invoice Lines', copy=True),
'recurring_invoices' : fields.boolean('Generate recurring invoices automatically'),
'recurring_rule_type': fields.selection([
('daily', 'Day(s)'),
('weekly', 'Week(s)'),
('monthly', 'Month(s)'),
('yearly', 'Year(s)'),
], 'Recurrency', help="Invoice automatically repeat at specified interval"),
'recurring_interval': fields.integer('Repeat Every', help="Repeat every (Days/Week/Month/Year)"),
'recurring_next_date': fields.date('Date of Next Invoice'),
}
_defaults = {
'recurring_interval': 1,
'recurring_next_date': lambda *a: time.strftime('%Y-%m-%d'),
'recurring_rule_type':'monthly'
}
def open_sale_order_lines(self,cr,uid,ids,context=None):
if context is None:
context = {}
sale_ids = self.pool.get('sale.order').search(cr,uid,[('project_id','=',context.get('search_default_project_id',False)),('partner_id','in',context.get('search_default_partner_id',False))])
names = [record.name for record in self.browse(cr, uid, ids, context=context)]
name = _('Sales Order Lines to Invoice of %s') % ','.join(names)
return {
'type': 'ir.actions.act_window',
'name': name,
'view_type': 'form',
'view_mode': 'tree,form',
'context': context,
'domain' : [('order_id','in',sale_ids)],
'res_model': 'sale.order.line',
'nodestroy': True,
}
def on_change_template(self, cr, uid, ids, template_id, date_start=False, context=None):
if not template_id:
return {}
res = super(account_analytic_account, self).on_change_template(cr, uid, ids, template_id, date_start=date_start, context=context)
template = self.browse(cr, uid, template_id, context=context)
if not ids:
res['value']['fix_price_invoices'] = template.fix_price_invoices
res['value']['amount_max'] = template.amount_max
if not ids:
res['value']['invoice_on_timesheets'] = template.invoice_on_timesheets
res['value']['hours_qtt_est'] = template.hours_qtt_est
if template.to_invoice.id:
res['value']['to_invoice'] = template.to_invoice.id
if template.pricelist_id.id:
res['value']['pricelist_id'] = template.pricelist_id.id
if not ids:
invoice_line_ids = []
for x in template.recurring_invoice_line_ids:
invoice_line_ids.append((0, 0, {
'product_id': x.product_id.id,
'uom_id': x.uom_id.id,
'name': x.name,
'quantity': x.quantity,
'price_unit': x.price_unit,
'analytic_account_id': x.analytic_account_id and x.analytic_account_id.id or False,
}))
res['value']['recurring_invoices'] = template.recurring_invoices
res['value']['recurring_interval'] = template.recurring_interval
res['value']['recurring_rule_type'] = template.recurring_rule_type
res['value']['recurring_invoice_line_ids'] = invoice_line_ids
return res
def onchange_recurring_invoices(self, cr, uid, ids, recurring_invoices, date_start=False, context=None):
value = {}
if date_start and recurring_invoices:
value = {'value': {'recurring_next_date': date_start}}
return value
def cron_account_analytic_account(self, cr, uid, context=None):
context = dict(context or {})
remind = {}
def fill_remind(key, domain, write_pending=False):
base_domain = [
('type', '=', 'contract'),
('partner_id', '!=', False),
('manager_id', '!=', False),
('manager_id.email', '!=', False),
]
base_domain.extend(domain)
accounts_ids = self.search(cr, uid, base_domain, context=context, order='name asc')
accounts = self.browse(cr, uid, accounts_ids, context=context)
for account in accounts:
if write_pending:
account.write({'state' : 'pending'})
remind_user = remind.setdefault(account.manager_id.id, {})
remind_type = remind_user.setdefault(key, {})
remind_partner = remind_type.setdefault(account.partner_id, []).append(account)
# Already expired
fill_remind("old", [('state', 'in', ['pending'])])
# Expires now
fill_remind("new", [('state', 'in', ['draft', 'open']), '|', '&', ('date', '!=', False), ('date', '<=', time.strftime('%Y-%m-%d')), ('is_overdue_quantity', '=', True)], True)
# Expires in less than 30 days
fill_remind("future", [('state', 'in', ['draft', 'open']), ('date', '!=', False), ('date', '<', (datetime.datetime.now() + datetime.timedelta(30)).strftime("%Y-%m-%d"))])
context['base_url'] = self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url')
context['action_id'] = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'account_analytic_analysis', 'action_account_analytic_overdue_all')[1]
template_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'account_analytic_analysis', 'account_analytic_cron_email_template')[1]
for user_id, data in remind.items():
context["data"] = data
_logger.debug("Sending reminder to uid %s", user_id)
self.pool.get('email.template').send_mail(cr, uid, template_id, user_id, force_send=True, context=context)
return True
def onchange_invoice_on_timesheets(self, cr, uid, ids, invoice_on_timesheets, context=None):
if not invoice_on_timesheets:
return {'value': {'to_invoice': False}}
result = {'value': {'use_timesheets': True}}
try:
to_invoice = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'hr_timesheet_invoice', 'timesheet_invoice_factor1')
result['value']['to_invoice'] = to_invoice[1]
except ValueError:
pass
return result
def hr_to_invoice_timesheets(self, cr, uid, ids, context=None):
domain = [('invoice_id','=',False),('to_invoice','!=',False), ('journal_id.type', '=', 'general'), ('account_id', 'in', ids)]
names = [record.name for record in self.browse(cr, uid, ids, context=context)]
name = _('Timesheets to Invoice of %s') % ','.join(names)
return {
'type': 'ir.actions.act_window',
'name': name,
'view_type': 'form',
'view_mode': 'tree,form',
'domain' : domain,
'res_model': 'account.analytic.line',
'nodestroy': True,
}
def _prepare_invoice_data(self, cr, uid, contract, context=None):
context = context or {}
journal_obj = self.pool.get('account.journal')
fpos_obj = self.pool['account.fiscal.position']
partner = contract.partner_id
if not partner:
raise osv.except_osv(_('No Customer Defined!'),_("You must first select a Customer for Contract %s!") % contract.name )
fpos_id = fpos_obj.get_fiscal_position(cr, uid, partner.company_id.id, partner.id, context=context)
journal_ids = journal_obj.search(cr, uid, [('type', '=','sale'),('company_id', '=', contract.company_id.id or False)], limit=1)
if not journal_ids:
raise osv.except_osv(_('Error!'),
_('Please define a sale journal for the company "%s".') % (contract.company_id.name or '', ))
partner_payment_term = partner.property_payment_term and partner.property_payment_term.id or False
currency_id = False
if contract.pricelist_id:
currency_id = contract.pricelist_id.currency_id.id
elif partner.property_product_pricelist:
currency_id = partner.property_product_pricelist.currency_id.id
elif contract.company_id:
currency_id = contract.company_id.currency_id.id
invoice = {
'account_id': partner.property_account_receivable.id,
'type': 'out_invoice',
'partner_id': partner.id,
'currency_id': currency_id,
'journal_id': len(journal_ids) and journal_ids[0] or False,
'date_invoice': contract.recurring_next_date,
'origin': contract.code,
'fiscal_position': fpos_id,
'payment_term': partner_payment_term,
'company_id': contract.company_id.id or False,
'user_id': contract.manager_id.id or uid,
'comment': contract.description,
}
return invoice
def _prepare_invoice_line(self, cr, uid, line, fiscal_position, context=None):
fpos_obj = self.pool.get('account.fiscal.position')
res = line.product_id
account_id = res.property_account_income.id
if not account_id:
account_id = res.categ_id.property_account_income_categ.id
account_id = fpos_obj.map_account(cr, uid, fiscal_position, account_id)
taxes = res.taxes_id or False
tax_id = fpos_obj.map_tax(cr, uid, fiscal_position, taxes, context=context)
values = {
'name': line.name,
'account_id': account_id,
'account_analytic_id': line.analytic_account_id.id,
'price_unit': line.price_unit or 0.0,
'quantity': line.quantity,
'uos_id': line.uom_id.id or False,
'product_id': line.product_id.id or False,
'invoice_line_tax_id': [(6, 0, tax_id)],
}
return values
def _prepare_invoice_lines(self, cr, uid, contract, fiscal_position_id, context=None):
fpos_obj = self.pool.get('account.fiscal.position')
fiscal_position = None
if fiscal_position_id:
fiscal_position = fpos_obj.browse(cr, uid, fiscal_position_id, context=context)
invoice_lines = []
for line in contract.recurring_invoice_line_ids:
values = self._prepare_invoice_line(cr, uid, line, fiscal_position, context=context)
invoice_lines.append((0, 0, values))
return invoice_lines
def _prepare_invoice(self, cr, uid, contract, context=None):
invoice = self._prepare_invoice_data(cr, uid, contract, context=context)
invoice['invoice_line'] = self._prepare_invoice_lines(cr, uid, contract, invoice['fiscal_position'], context=context)
return invoice
def recurring_create_invoice(self, cr, uid, ids, context=None):
return self._recurring_create_invoice(cr, uid, ids, context=context)
def _cron_recurring_create_invoice(self, cr, uid, context=None):
return self._recurring_create_invoice(cr, uid, [], automatic=True, context=context)
def _recurring_create_invoice(self, cr, uid, ids, automatic=False, context=None):
context = context or {}
invoice_ids = []
current_date = time.strftime('%Y-%m-%d')
if ids:
contract_ids = ids
else:
contract_ids = self.search(cr, uid, [('recurring_next_date','<=', current_date), ('state','=', 'open'), ('recurring_invoices','=', True), ('type', '=', 'contract')])
if contract_ids:
cr.execute('SELECT company_id, array_agg(id) as ids FROM account_analytic_account WHERE id IN %s GROUP BY company_id', (tuple(contract_ids),))
for company_id, ids in cr.fetchall():
for contract in self.browse(cr, uid, ids, context=dict(context, company_id=company_id, force_company=company_id)):
try:
invoice_values = self._prepare_invoice(cr, uid, contract, context=context)
invoice_ids.append(self.pool['account.invoice'].create(cr, uid, invoice_values, context=context))
next_date = datetime.datetime.strptime(contract.recurring_next_date or current_date, "%Y-%m-%d")
interval = contract.recurring_interval
if contract.recurring_rule_type == 'daily':
new_date = next_date+relativedelta(days=+interval)
elif contract.recurring_rule_type == 'weekly':
new_date = next_date+relativedelta(weeks=+interval)
elif contract.recurring_rule_type == 'monthly':
new_date = next_date+relativedelta(months=+interval)
else:
new_date = next_date+relativedelta(years=+interval)
self.write(cr, uid, [contract.id], {'recurring_next_date': new_date.strftime('%Y-%m-%d')}, context=context)
if automatic:
cr.commit()
except Exception:
if automatic:
cr.rollback()
_logger.exception('Fail to create recurring invoice for contract %s', contract.code)
else:
raise
return invoice_ids
class account_analytic_account_summary_user(osv.osv):
_name = "account_analytic_analysis.summary.user"
_description = "Hours Summary by User"
_order='user'
_auto = False
_rec_name = 'user'
def _unit_amount(self, cr, uid, ids, name, arg, context=None):
res = {}
account_obj = self.pool.get('account.analytic.account')
cr.execute('SELECT MAX(id) FROM res_users')
max_user = cr.fetchone()[0]
account_ids = [int(str(x/max_user - (x%max_user == 0 and 1 or 0))) for x in ids]
user_ids = [int(str(x-((x/max_user - (x%max_user == 0 and 1 or 0)) *max_user))) for x in ids]
parent_ids = tuple(account_ids) #We don't want consolidation for each of these fields because those complex computation is resource-greedy.
if parent_ids:
cr.execute('SELECT id, unit_amount ' \
'FROM account_analytic_analysis_summary_user ' \
'WHERE account_id IN %s ' \
'AND "user" IN %s',(parent_ids, tuple(user_ids),))
for sum_id, unit_amount in cr.fetchall():
res[sum_id] = unit_amount
for id in ids:
res[id] = round(res.get(id, 0.0), 2)
return res
_columns = {
'account_id': fields.many2one('account.analytic.account', 'Analytic Account', readonly=True),
'unit_amount': fields.float('Total Time'),
'user': fields.many2one('res.users', 'User'),
}
_depends = {
'res.users': ['id'],
'account.analytic.line': ['account_id', 'journal_id', 'unit_amount', 'user_id'],
'account.analytic.journal': ['type'],
}
def init(self, cr):
openerp.tools.sql.drop_view_if_exists(cr, 'account_analytic_analysis_summary_user')
cr.execute('''CREATE OR REPLACE VIEW account_analytic_analysis_summary_user AS (
with mu as
(select max(id) as max_user from res_users)
, lu AS
(SELECT
l.account_id AS account_id,
coalesce(l.user_id, 0) AS user_id,
SUM(l.unit_amount) AS unit_amount
FROM account_analytic_line AS l,
account_analytic_journal AS j
WHERE (j.type = 'general' ) and (j.id=l.journal_id)
GROUP BY l.account_id, l.user_id
)
select (lu.account_id::bigint * mu.max_user) + lu.user_id as id,
lu.account_id as account_id,
lu.user_id as "user",
unit_amount
from lu, mu)''')
class account_analytic_account_summary_month(osv.osv):
_name = "account_analytic_analysis.summary.month"
_description = "Hours summary by month"
_auto = False
_rec_name = 'month'
_columns = {
'account_id': fields.many2one('account.analytic.account', 'Analytic Account', readonly=True),
'unit_amount': fields.float('Total Time'),
'month': fields.char('Month', size=32, readonly=True),
}
_depends = {
'account.analytic.line': ['account_id', 'date', 'journal_id', 'unit_amount'],
'account.analytic.journal': ['type'],
}
def init(self, cr):
openerp.tools.sql.drop_view_if_exists(cr, 'account_analytic_analysis_summary_month')
cr.execute('CREATE VIEW account_analytic_analysis_summary_month AS (' \
'SELECT ' \
'(TO_NUMBER(TO_CHAR(d.month, \'YYYYMM\'), \'999999\') + (d.account_id * 1000000::bigint))::bigint AS id, ' \
'd.account_id AS account_id, ' \
'TO_CHAR(d.month, \'Mon YYYY\') AS month, ' \
'TO_NUMBER(TO_CHAR(d.month, \'YYYYMM\'), \'999999\') AS month_id, ' \
'COALESCE(SUM(l.unit_amount), 0.0) AS unit_amount ' \
'FROM ' \
'(SELECT ' \
'd2.account_id, ' \
'd2.month ' \
'FROM ' \
'(SELECT ' \
'a.id AS account_id, ' \
'l.month AS month ' \
'FROM ' \
'(SELECT ' \
'DATE_TRUNC(\'month\', l.date) AS month ' \
'FROM account_analytic_line AS l, ' \
'account_analytic_journal AS j ' \
'WHERE j.type = \'general\' ' \
'GROUP BY DATE_TRUNC(\'month\', l.date) ' \
') AS l, ' \
'account_analytic_account AS a ' \
'GROUP BY l.month, a.id ' \
') AS d2 ' \
'GROUP BY d2.account_id, d2.month ' \
') AS d ' \
'LEFT JOIN ' \
'(SELECT ' \
'l.account_id AS account_id, ' \
'DATE_TRUNC(\'month\', l.date) AS month, ' \
'SUM(l.unit_amount) AS unit_amount ' \
'FROM account_analytic_line AS l, ' \
'account_analytic_journal AS j ' \
'WHERE (j.type = \'general\') and (j.id=l.journal_id) ' \
'GROUP BY l.account_id, DATE_TRUNC(\'month\', l.date) ' \
') AS l '
'ON (' \
'd.account_id = l.account_id ' \
'AND d.month = l.month' \
') ' \
'GROUP BY d.month, d.account_id ' \
')')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
provaleks/o8
|
addons/account_analytic_analysis/account_analytic_analysis.py
|
Python
|
agpl-3.0
| 48,515
|
#!/usr/bin/python
# coding: utf-8
import sys
import feedparser
from common import send_message
COUNTRIES = ['jp', 'us']
LIMIT = 10
URLS = ['https://itunes.apple.com/%s/rss/topfreeapplications/limit=%d/xml',
'https://itunes.apple.com/%s/rss/toppaidapplications/limit=%d/xml',
'https://itunes.apple.com/%s/rss/topgrossingapplications/limit=%d/xml']
def get_feed(url):
rss = feedparser.parse(url)
send_message(rss['feed']['title'].encode('utf8'))
rank = 1
for entry in rss['entries']:
title = entry['title'].encode('utf8')
message = '%d: %s' % (rank, title)
send_message(message)
rank = rank + 1
for country in COUNTRIES:
for url in URLS:
try:
url = url % (country, LIMIT)
get_feed(url)
except Exception, e:
send_message('Error: %s' % str(sys.exc_info()))
|
Atrac613/notification-tools
|
app_store_ranking.py
|
Python
|
mit
| 873
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'OrganizationOnboardingTask'
db.create_table(
'sentry_organizationonboardingtask', (
(
'id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(
primary_key=True
)
), (
'organization',
self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(
to=orm['sentry.Organization']
)
), (
'user', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(
to=orm['sentry.User'], null=True
)
),
('task', self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')()),
(
'status',
self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')()
), (
'date_completed',
self.gf('django.db.models.fields.DateTimeField')()
), (
'project_id', self.gf('sentry.db.models.fields.bounded.BoundedBigIntegerField')(
null=True, blank=True
)
), ('data', self.gf('sentry.db.models.fields.jsonfield.JSONField')(default={})),
)
)
db.send_create_signal('sentry', ['OrganizationOnboardingTask'])
# Adding unique constraint on 'OrganizationOnboardingTask', fields ['organization', 'task']
db.create_unique('sentry_organizationonboardingtask', ['organization_id', 'task'])
def backwards(self, orm):
# Removing unique constraint on 'OrganizationOnboardingTask', fields
# ['organization', 'task']
db.delete_unique('sentry_organizationonboardingtask', ['organization_id', 'task'])
# Deleting model 'OrganizationOnboardingTask'
db.delete_table('sentry_organizationonboardingtask')
models = {
'sentry.activity': {
'Meta': {
'object_name': 'Activity'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True'
}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
)
},
'sentry.apikey': {
'Meta': {
'object_name': 'ApiKey'
},
'allowed_origins':
('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '32'
}),
'label': (
'django.db.models.fields.CharField', [], {
'default': "'Default'",
'max_length': '64',
'blank': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Organization']"
}
),
'scopes': ('django.db.models.fields.BigIntegerField', [], {
'default': 'None'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.auditlogentry': {
'Meta': {
'object_name': 'AuditLogEntry'
},
'actor': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'audit_actors'",
'null': 'True',
'to': "orm['sentry.User']"
}
),
'actor_key': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.ApiKey']",
'null': 'True',
'blank': 'True'
}
),
'actor_label': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ip_address': (
'django.db.models.fields.GenericIPAddressField', [], {
'max_length': '39',
'null': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'target_object':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'target_user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'audit_targets'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.authidentity': {
'Meta': {
'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))",
'object_name': 'AuthIdentity'
},
'auth_provider': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.AuthProvider']"
}
),
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'last_synced':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'last_verified':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.authprovider': {
'Meta': {
'object_name': 'AuthProvider'
},
'config': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'default_global_access':
('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'default_role':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50'
}),
'default_teams': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Team']",
'symmetrical': 'False',
'blank': 'True'
}
),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '0'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']",
'unique': 'True'
}
),
'provider': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'sync_time':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
})
},
'sentry.broadcast': {
'Meta': {
'object_name': 'Broadcast'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'date_expires': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime(2016, 3, 1, 0, 0)',
'null': 'True',
'blank': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_active':
('django.db.models.fields.BooleanField', [], {
'default': 'True',
'db_index': 'True'
}),
'link': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.CharField', [], {
'max_length': '256'
}),
'title': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'upstream_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'blank': 'True'
}
)
},
'sentry.broadcastseen': {
'Meta': {
'unique_together': "(('broadcast', 'user'),)",
'object_name': 'BroadcastSeen'
},
'broadcast': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Broadcast']"
}
),
'date_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.event': {
'Meta': {
'unique_together': "(('project_id', 'event_id'),)",
'object_name': 'Event',
'db_table': "'sentry_message'",
'index_together': "(('group_id', 'datetime'),)"
},
'data':
('sentry.db.models.fields.node.NodeField', [], {
'null': 'True',
'blank': 'True'
}),
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'event_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'db_column': "'message_id'"
}
),
'group_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'time_spent':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'null': 'True'
})
},
'sentry.eventmapping': {
'Meta': {
'unique_together': "(('project_id', 'event_id'),)",
'object_name': 'EventMapping'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event_id': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventuser': {
'Meta': {
'unique_together':
"(('project', 'ident'), ('project', 'hash'))",
'object_name':
'EventUser',
'index_together':
"(('project', 'email'), ('project', 'username'), ('project', 'ip_address'))"
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'null': 'True'
}),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
}),
'ip_address': (
'django.db.models.fields.GenericIPAddressField', [], {
'max_length': '39',
'null': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'username':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
})
},
'sentry.file': {
'Meta': {
'object_name': 'File'
},
'blob': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'legacy_blob'",
'null': 'True',
'to': "orm['sentry.FileBlob']"
}
),
'blobs': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.FileBlob']",
'through': "orm['sentry.FileBlobIndex']",
'symmetrical': 'False'
}
),
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '40',
'null': 'True'
}),
'headers': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'path': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'size':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'timestamp': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'type': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.fileblob': {
'Meta': {
'object_name': 'FileBlob'
},
'checksum':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '40'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'path': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'size':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'timestamp': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
)
},
'sentry.fileblobindex': {
'Meta': {
'unique_together': "(('file', 'blob', 'offset'),)",
'object_name': 'FileBlobIndex'
},
'blob': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.FileBlob']"
}
),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'offset': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.group': {
'Meta': {
'object_name': 'Group',
'db_table': "'sentry_groupedmessage'",
'index_together': "(('project', 'first_release'),)"
},
'active_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'culprit': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True',
'db_column': "'view'",
'blank': 'True'
}
),
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'first_release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']",
'null': 'True',
'on_delete': 'models.PROTECT'
}
),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_public': (
'django.db.models.fields.NullBooleanField', [], {
'default': 'False',
'null': 'True',
'blank': 'True'
}
),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'level': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '40',
'db_index': 'True',
'blank': 'True'
}
),
'logger': (
'django.db.models.fields.CharField', [], {
'default': "''",
'max_length': '64',
'db_index': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'null': 'True'
}
),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'resolved_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'time_spent_count':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'time_spent_total':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'times_seen': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '1',
'db_index': 'True'
}
)
},
'sentry.groupassignee': {
'Meta': {
'object_name': 'GroupAssignee',
'db_table': "'sentry_groupasignee'"
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'unique': 'True',
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'sentry_assignee_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupbookmark': {
'Meta': {
'unique_together': "(('project', 'user', 'group'),)",
'object_name': 'GroupBookmark'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'sentry_bookmark_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupemailthread': {
'Meta': {
'unique_together': "(('email', 'group'), ('email', 'msgid'))",
'object_name': 'GroupEmailThread'
},
'date': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'groupemail_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'msgid': ('django.db.models.fields.CharField', [], {
'max_length': '100'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'groupemail_set'",
'to': "orm['sentry.Project']"
}
)
},
'sentry.grouphash': {
'Meta': {
'unique_together': "(('project', 'hash'),)",
'object_name': 'GroupHash'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
)
},
'sentry.groupmeta': {
'Meta': {
'unique_together': "(('group', 'key'),)",
'object_name': 'GroupMeta'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupresolution': {
'Meta': {
'object_name': 'GroupResolution'
},
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'unique': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.grouprulestatus': {
'Meta': {
'unique_together': "(('rule', 'group'),)",
'object_name': 'GroupRuleStatus'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_active': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'rule': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Rule']"
}
),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {
'default': '0'
})
},
'sentry.groupseen': {
'Meta': {
'unique_together': "(('user', 'group'),)",
'object_name': 'GroupSeen'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'db_index': 'False'
}
)
},
'sentry.groupsnooze': {
'Meta': {
'object_name': 'GroupSnooze'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'unique': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'until': ('django.db.models.fields.DateTimeField', [], {})
},
'sentry.grouptagkey': {
'Meta': {
'unique_together': "(('project', 'group', 'key'),)",
'object_name': 'GroupTagKey'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'values_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.grouptagvalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value', 'group'),)",
'object_name': 'GroupTagValue',
'db_table': "'sentry_messagefiltervalue'"
},
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'grouptag'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'grouptag'",
'null': 'True',
'to': "orm['sentry.Project']"
}
),
'times_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.helppage': {
'Meta': {
'object_name': 'HelpPage'
},
'content': ('django.db.models.fields.TextField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_visible': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'key': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'unique': 'True',
'null': 'True'
}
),
'priority':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50'
}),
'title': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.lostpasswordhash': {
'Meta': {
'object_name': 'LostPasswordHash'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'unique': 'True'
}
)
},
'sentry.option': {
'Meta': {
'object_name': 'Option'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '64'
}),
'last_updated':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {
'object_name': 'Organization'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'default_role':
('django.db.models.fields.CharField', [], {
'default': "'member'",
'max_length': '32'
}),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '1'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'members': (
'django.db.models.fields.related.ManyToManyField', [], {
'related_name': "'org_memberships'",
'symmetrical': 'False',
'through': "orm['sentry.OrganizationMember']",
'to': "orm['sentry.User']"
}
),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'slug':
('django.db.models.fields.SlugField', [], {
'unique': 'True',
'max_length': '50'
}),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.organizationaccessrequest': {
'Meta': {
'unique_together': "(('team', 'member'),)",
'object_name': 'OrganizationAccessRequest'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'member': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.OrganizationMember']"
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.organizationmember': {
'Meta': {
'unique_together': "(('organization', 'user'), ('organization', 'email'))",
'object_name': 'OrganizationMember'
},
'counter': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': (
'django.db.models.fields.EmailField', [], {
'max_length': '75',
'null': 'True',
'blank': 'True'
}
),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '0'
}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'member_set'",
'to': "orm['sentry.Organization']"
}
),
'role':
('django.db.models.fields.CharField', [], {
'default': "'member'",
'max_length': '32'
}),
'teams': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Team']",
'symmetrical': 'False',
'through': "orm['sentry.OrganizationMemberTeam']",
'blank': 'True'
}
),
'type': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50',
'blank': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'sentry_orgmember_set'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.organizationmemberteam': {
'Meta': {
'unique_together': "(('team', 'organizationmember'),)",
'object_name': 'OrganizationMemberTeam',
'db_table': "'sentry_organizationmember_teams'"
},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'organizationmember': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.OrganizationMember']"
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.organizationonboardingtask': {
'Meta': {
'unique_together': "(('organization', 'task'),)",
'object_name': 'OrganizationOnboardingTask'
},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_completed':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'project_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'task': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
)
},
'sentry.organizationoption': {
'Meta': {
'unique_together': "(('organization', 'key'),)",
'object_name': 'OrganizationOption',
'db_table': "'sentry_organizationoptions'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.project': {
'Meta': {
'unique_together': "(('team', 'slug'), ('organization', 'slug'))",
'object_name': 'Project'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'first_event': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '200'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'public': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50',
'null': 'True'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.projectbookmark': {
'Meta': {
'unique_together': "(('project_id', 'user'),)",
'object_name': 'ProjectBookmark'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.projectkey': {
'Meta': {
'object_name': 'ProjectKey'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Project']"
}
),
'public_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'roles': ('django.db.models.fields.BigIntegerField', [], {
'default': '1'
}),
'secret_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.projectoption': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'ProjectOption',
'db_table': "'sentry_projectoptions'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.release': {
'Meta': {
'unique_together': "(('project', 'version'),)",
'object_name': 'Release'
},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'date_released':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'blank': 'True'
}),
'date_started':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'blank': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'new_groups':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'owner': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True',
'blank': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'ref': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'url': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'version': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.releasefile': {
'Meta': {
'unique_together': "(('release', 'ident'),)",
'object_name': 'ReleaseFile'
},
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'name': ('django.db.models.fields.TextField', [], {}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
)
},
'sentry.rule': {
'Meta': {
'object_name': 'Rule'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
},
'sentry.savedsearch': {
'Meta': {
'unique_together': "(('project', 'name'),)",
'object_name': 'SavedSearch'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_default': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.savedsearchuserdefault': {
'Meta': {
'unique_together': "(('project', 'user'),)",
'object_name': 'SavedSearchUserDefault',
'db_table': "'sentry_savedsearch_userdefault'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'savedsearch': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.SavedSearch']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.tagkey': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'TagKey',
'db_table': "'sentry_filterkey'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'label':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'values_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.tagvalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value'),)",
'object_name': 'TagValue',
'db_table': "'sentry_filtervalue'"
},
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'times_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.team': {
'Meta': {
'unique_together': "(('organization', 'slug'),)",
'object_name': 'Team'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50'
}),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.user': {
'Meta': {
'object_name': 'User',
'db_table': "'auth_user'"
},
'date_joined':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'blank': 'True'
}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'is_managed': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_staff': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'last_login':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'name': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'db_column': "'first_name'",
'blank': 'True'
}
),
'password': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'username':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '128'
})
},
'sentry.useroption': {
'Meta': {
'unique_together': "(('user', 'project', 'key'),)",
'object_name': 'UserOption'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.userreport': {
'Meta': {
'object_name': 'UserReport',
'index_together': "(('project', 'event_id'), ('project', 'date_added'))"
},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'event_id': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
}
}
complete_apps = ['sentry']
|
mvaled/sentry
|
src/sentry/south_migrations/0236_auto__add_organizationonboardingtask__add_unique_organizationonboardin.py
|
Python
|
bsd-3-clause
| 67,095
|
#!/usr/bin/env python
#
# Use the raw transactions API to spend innovas received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a innovad or Innova-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the Innova Core data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/InnovaCore/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "InnovaCore")
return os.path.expanduser("~/.innovacore")
def read_bitcoin_config(dbdir):
"""Read the innova.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "innova.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a Innova Core JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 19998 if testnet else 9998
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the innovad we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(innovad):
info = innovad.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
innovad.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = innovad.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(innovad):
address_summary = dict()
address_to_account = dict()
for info in innovad.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = innovad.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = innovad.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-innova-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(innovad, fromaddresses, toaddress, amount, fee):
all_coins = list_available(innovad)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to innovad.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = innovad.createrawtransaction(inputs, outputs)
signed_rawtx = innovad.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(innovad, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = innovad.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(innovad, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = innovad.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(innovad, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get innovas from")
parser.add_option("--to", dest="to", default=None,
help="address to get send innovas to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of innova.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
innovad = connect_JSON(config)
if options.amount is None:
address_summary = list_available(innovad)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(innovad) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(innovad, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(innovad, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = innovad.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
|
innovacoin/innova
|
contrib/spendfrom/spendfrom.py
|
Python
|
mit
| 10,026
|
# Python stubs generated by omniidl from /usr/local/share/idl/omniORB/COS/CosRelationships.idl
# DO NOT EDIT THIS FILE!
import omniORB, _omnipy
from omniORB import CORBA, PortableServer
_0_CORBA = CORBA
_omnipy.checkVersion(4,2, __file__, 1)
try:
property
except NameError:
def property(*args):
return None
# #include "corbaidl.idl"
import corbaidl_idl
_0_CORBA = omniORB.openModule("CORBA")
_0_CORBA__POA = omniORB.openModule("CORBA__POA")
# #include "boxes.idl"
import boxes_idl
_0_CORBA = omniORB.openModule("CORBA")
_0_CORBA__POA = omniORB.openModule("CORBA__POA")
# #include "ir.idl"
import ir_idl
_0_CORBA = omniORB.openModule("CORBA")
_0_CORBA__POA = omniORB.openModule("CORBA__POA")
# #include "CosObjectIdentity.idl"
import CosObjectIdentity_idl
_0_CosObjectIdentity = omniORB.openModule("CosObjectIdentity")
_0_CosObjectIdentity__POA = omniORB.openModule("CosObjectIdentity__POA")
#
# Start of module "CosRelationships"
#
__name__ = "CosRelationships"
_0_CosRelationships = omniORB.openModule("CosRelationships", r"/usr/local/share/idl/omniORB/COS/CosRelationships.idl")
_0_CosRelationships__POA = omniORB.openModule("CosRelationships__POA", r"/usr/local/share/idl/omniORB/COS/CosRelationships.idl")
# forward interface RoleFactory;
_0_CosRelationships._d_RoleFactory = (omniORB.tcInternal.tv_objref, "IDL:omg.org/CosRelationships/RoleFactory:1.0", "RoleFactory")
omniORB.typeMapping["IDL:omg.org/CosRelationships/RoleFactory:1.0"] = _0_CosRelationships._d_RoleFactory
# forward interface RelationshipFactory;
_0_CosRelationships._d_RelationshipFactory = (omniORB.tcInternal.tv_objref, "IDL:omg.org/CosRelationships/RelationshipFactory:1.0", "RelationshipFactory")
omniORB.typeMapping["IDL:omg.org/CosRelationships/RelationshipFactory:1.0"] = _0_CosRelationships._d_RelationshipFactory
# forward interface Relationship;
_0_CosRelationships._d_Relationship = (omniORB.tcInternal.tv_objref, "IDL:omg.org/CosRelationships/Relationship:1.0", "Relationship")
omniORB.typeMapping["IDL:omg.org/CosRelationships/Relationship:1.0"] = _0_CosRelationships._d_Relationship
# forward interface Role;
_0_CosRelationships._d_Role = (omniORB.tcInternal.tv_objref, "IDL:omg.org/CosRelationships/Role:1.0", "Role")
omniORB.typeMapping["IDL:omg.org/CosRelationships/Role:1.0"] = _0_CosRelationships._d_Role
# forward interface RelationshipIterator;
_0_CosRelationships._d_RelationshipIterator = (omniORB.tcInternal.tv_objref, "IDL:omg.org/CosRelationships/RelationshipIterator:1.0", "RelationshipIterator")
omniORB.typeMapping["IDL:omg.org/CosRelationships/RelationshipIterator:1.0"] = _0_CosRelationships._d_RelationshipIterator
# typedef ... RelatedObject
class RelatedObject:
_NP_RepositoryId = "IDL:omg.org/CosRelationships/RelatedObject:1.0"
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_0_CosRelationships.RelatedObject = RelatedObject
_0_CosRelationships._d_RelatedObject = omniORB.typeMapping["IDL:omg.org/CORBA/Object:1.0"]
_0_CosRelationships._ad_RelatedObject = (omniORB.tcInternal.tv_alias, RelatedObject._NP_RepositoryId, "RelatedObject", omniORB.typeMapping["IDL:omg.org/CORBA/Object:1.0"])
_0_CosRelationships._tc_RelatedObject = omniORB.tcInternal.createTypeCode(_0_CosRelationships._ad_RelatedObject)
omniORB.registerType(RelatedObject._NP_RepositoryId, _0_CosRelationships._ad_RelatedObject, _0_CosRelationships._tc_RelatedObject)
del RelatedObject
# typedef ... Roles
class Roles:
_NP_RepositoryId = "IDL:omg.org/CosRelationships/Roles:1.0"
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_0_CosRelationships.Roles = Roles
_0_CosRelationships._d_Roles = (omniORB.tcInternal.tv_sequence, omniORB.typeMapping["IDL:omg.org/CosRelationships/Role:1.0"], 0)
_0_CosRelationships._ad_Roles = (omniORB.tcInternal.tv_alias, Roles._NP_RepositoryId, "Roles", (omniORB.tcInternal.tv_sequence, omniORB.typeMapping["IDL:omg.org/CosRelationships/Role:1.0"], 0))
_0_CosRelationships._tc_Roles = omniORB.tcInternal.createTypeCode(_0_CosRelationships._ad_Roles)
omniORB.registerType(Roles._NP_RepositoryId, _0_CosRelationships._ad_Roles, _0_CosRelationships._tc_Roles)
del Roles
# typedef ... RoleName
class RoleName:
_NP_RepositoryId = "IDL:omg.org/CosRelationships/RoleName:1.0"
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_0_CosRelationships.RoleName = RoleName
_0_CosRelationships._d_RoleName = (omniORB.tcInternal.tv_string,0)
_0_CosRelationships._ad_RoleName = (omniORB.tcInternal.tv_alias, RoleName._NP_RepositoryId, "RoleName", (omniORB.tcInternal.tv_string,0))
_0_CosRelationships._tc_RoleName = omniORB.tcInternal.createTypeCode(_0_CosRelationships._ad_RoleName)
omniORB.registerType(RoleName._NP_RepositoryId, _0_CosRelationships._ad_RoleName, _0_CosRelationships._tc_RoleName)
del RoleName
# typedef ... RoleNames
class RoleNames:
_NP_RepositoryId = "IDL:omg.org/CosRelationships/RoleNames:1.0"
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_0_CosRelationships.RoleNames = RoleNames
_0_CosRelationships._d_RoleNames = (omniORB.tcInternal.tv_sequence, omniORB.typeMapping["IDL:omg.org/CosRelationships/RoleName:1.0"], 0)
_0_CosRelationships._ad_RoleNames = (omniORB.tcInternal.tv_alias, RoleNames._NP_RepositoryId, "RoleNames", (omniORB.tcInternal.tv_sequence, omniORB.typeMapping["IDL:omg.org/CosRelationships/RoleName:1.0"], 0))
_0_CosRelationships._tc_RoleNames = omniORB.tcInternal.createTypeCode(_0_CosRelationships._ad_RoleNames)
omniORB.registerType(RoleNames._NP_RepositoryId, _0_CosRelationships._ad_RoleNames, _0_CosRelationships._tc_RoleNames)
del RoleNames
# struct NamedRole
_0_CosRelationships.NamedRole = omniORB.newEmptyClass()
class NamedRole (omniORB.StructBase):
_NP_RepositoryId = "IDL:omg.org/CosRelationships/NamedRole:1.0"
def __init__(self, name, aRole):
self.name = name
self.aRole = aRole
_0_CosRelationships.NamedRole = NamedRole
_0_CosRelationships._d_NamedRole = (omniORB.tcInternal.tv_struct, NamedRole, NamedRole._NP_RepositoryId, "NamedRole", "name", omniORB.typeMapping["IDL:omg.org/CosRelationships/RoleName:1.0"], "aRole", omniORB.typeMapping["IDL:omg.org/CosRelationships/Role:1.0"])
_0_CosRelationships._tc_NamedRole = omniORB.tcInternal.createTypeCode(_0_CosRelationships._d_NamedRole)
omniORB.registerType(NamedRole._NP_RepositoryId, _0_CosRelationships._d_NamedRole, _0_CosRelationships._tc_NamedRole)
del NamedRole
# typedef ... NamedRoles
class NamedRoles:
_NP_RepositoryId = "IDL:omg.org/CosRelationships/NamedRoles:1.0"
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_0_CosRelationships.NamedRoles = NamedRoles
_0_CosRelationships._d_NamedRoles = (omniORB.tcInternal.tv_sequence, omniORB.typeMapping["IDL:omg.org/CosRelationships/NamedRole:1.0"], 0)
_0_CosRelationships._ad_NamedRoles = (omniORB.tcInternal.tv_alias, NamedRoles._NP_RepositoryId, "NamedRoles", (omniORB.tcInternal.tv_sequence, omniORB.typeMapping["IDL:omg.org/CosRelationships/NamedRole:1.0"], 0))
_0_CosRelationships._tc_NamedRoles = omniORB.tcInternal.createTypeCode(_0_CosRelationships._ad_NamedRoles)
omniORB.registerType(NamedRoles._NP_RepositoryId, _0_CosRelationships._ad_NamedRoles, _0_CosRelationships._tc_NamedRoles)
del NamedRoles
# struct RelationshipHandle
_0_CosRelationships.RelationshipHandle = omniORB.newEmptyClass()
class RelationshipHandle (omniORB.StructBase):
_NP_RepositoryId = "IDL:omg.org/CosRelationships/RelationshipHandle:1.0"
def __init__(self, the_relationship, constant_random_id):
self.the_relationship = the_relationship
self.constant_random_id = constant_random_id
_0_CosRelationships.RelationshipHandle = RelationshipHandle
_0_CosRelationships._d_RelationshipHandle = (omniORB.tcInternal.tv_struct, RelationshipHandle, RelationshipHandle._NP_RepositoryId, "RelationshipHandle", "the_relationship", omniORB.typeMapping["IDL:omg.org/CosRelationships/Relationship:1.0"], "constant_random_id", omniORB.typeMapping["IDL:omg.org/CosObjectIdentity/ObjectIdentifier:1.0"])
_0_CosRelationships._tc_RelationshipHandle = omniORB.tcInternal.createTypeCode(_0_CosRelationships._d_RelationshipHandle)
omniORB.registerType(RelationshipHandle._NP_RepositoryId, _0_CosRelationships._d_RelationshipHandle, _0_CosRelationships._tc_RelationshipHandle)
del RelationshipHandle
# typedef ... RelationshipHandles
class RelationshipHandles:
_NP_RepositoryId = "IDL:omg.org/CosRelationships/RelationshipHandles:1.0"
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_0_CosRelationships.RelationshipHandles = RelationshipHandles
_0_CosRelationships._d_RelationshipHandles = (omniORB.tcInternal.tv_sequence, omniORB.typeMapping["IDL:omg.org/CosRelationships/RelationshipHandle:1.0"], 0)
_0_CosRelationships._ad_RelationshipHandles = (omniORB.tcInternal.tv_alias, RelationshipHandles._NP_RepositoryId, "RelationshipHandles", (omniORB.tcInternal.tv_sequence, omniORB.typeMapping["IDL:omg.org/CosRelationships/RelationshipHandle:1.0"], 0))
_0_CosRelationships._tc_RelationshipHandles = omniORB.tcInternal.createTypeCode(_0_CosRelationships._ad_RelationshipHandles)
omniORB.registerType(RelationshipHandles._NP_RepositoryId, _0_CosRelationships._ad_RelationshipHandles, _0_CosRelationships._tc_RelationshipHandles)
del RelationshipHandles
# interface RelationshipFactory
_0_CosRelationships._d_RelationshipFactory = (omniORB.tcInternal.tv_objref, "IDL:omg.org/CosRelationships/RelationshipFactory:1.0", "RelationshipFactory")
omniORB.typeMapping["IDL:omg.org/CosRelationships/RelationshipFactory:1.0"] = _0_CosRelationships._d_RelationshipFactory
_0_CosRelationships.RelationshipFactory = omniORB.newEmptyClass()
class RelationshipFactory :
_NP_RepositoryId = _0_CosRelationships._d_RelationshipFactory[1]
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_nil = CORBA.Object._nil
# struct NamedRoleType
_0_CosRelationships.RelationshipFactory.NamedRoleType = omniORB.newEmptyClass()
class NamedRoleType (omniORB.StructBase):
_NP_RepositoryId = "IDL:omg.org/CosRelationships/RelationshipFactory/NamedRoleType:1.0"
_NP_ClassName = "CosRelationships.RelationshipFactory.NamedRoleType"
def __init__(self, name, named_role_type):
self.name = name
self.named_role_type = named_role_type
_d_NamedRoleType = _0_CosRelationships.RelationshipFactory._d_NamedRoleType = (omniORB.tcInternal.tv_struct, NamedRoleType, NamedRoleType._NP_RepositoryId, "NamedRoleType", "name", omniORB.typeMapping["IDL:omg.org/CosRelationships/RoleName:1.0"], "named_role_type", omniORB.typeMapping["IDL:omg.org/CORBA/InterfaceDef:1.0"])
_tc_NamedRoleType = omniORB.tcInternal.createTypeCode(_d_NamedRoleType)
omniORB.registerType(NamedRoleType._NP_RepositoryId, _d_NamedRoleType, _tc_NamedRoleType)
# typedef ... NamedRoleTypes
class NamedRoleTypes:
_NP_RepositoryId = "IDL:omg.org/CosRelationships/RelationshipFactory/NamedRoleTypes:1.0"
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_d_NamedRoleTypes = (omniORB.tcInternal.tv_sequence, omniORB.typeMapping["IDL:omg.org/CosRelationships/RelationshipFactory/NamedRoleType:1.0"], 0)
_ad_NamedRoleTypes = (omniORB.tcInternal.tv_alias, NamedRoleTypes._NP_RepositoryId, "NamedRoleTypes", (omniORB.tcInternal.tv_sequence, omniORB.typeMapping["IDL:omg.org/CosRelationships/RelationshipFactory/NamedRoleType:1.0"], 0))
_tc_NamedRoleTypes = omniORB.tcInternal.createTypeCode(_ad_NamedRoleTypes)
omniORB.registerType(NamedRoleTypes._NP_RepositoryId, _ad_NamedRoleTypes, _tc_NamedRoleTypes)
# exception RoleTypeError
_0_CosRelationships.RelationshipFactory.RoleTypeError = omniORB.newEmptyClass()
class RoleTypeError (CORBA.UserException):
_NP_RepositoryId = "IDL:omg.org/CosRelationships/RelationshipFactory/RoleTypeError:1.0"
_NP_ClassName = "CosRelationships.RelationshipFactory.RoleTypeError"
def __init__(self, culprits):
CORBA.UserException.__init__(self, culprits)
self.culprits = culprits
_d_RoleTypeError = (omniORB.tcInternal.tv_except, RoleTypeError, RoleTypeError._NP_RepositoryId, "RoleTypeError", "culprits", omniORB.typeMapping["IDL:omg.org/CosRelationships/NamedRoles:1.0"])
_tc_RoleTypeError = omniORB.tcInternal.createTypeCode(_d_RoleTypeError)
omniORB.registerType(RoleTypeError._NP_RepositoryId, _d_RoleTypeError, _tc_RoleTypeError)
# exception MaxCardinalityExceeded
_0_CosRelationships.RelationshipFactory.MaxCardinalityExceeded = omniORB.newEmptyClass()
class MaxCardinalityExceeded (CORBA.UserException):
_NP_RepositoryId = "IDL:omg.org/CosRelationships/RelationshipFactory/MaxCardinalityExceeded:1.0"
_NP_ClassName = "CosRelationships.RelationshipFactory.MaxCardinalityExceeded"
def __init__(self, culprits):
CORBA.UserException.__init__(self, culprits)
self.culprits = culprits
_d_MaxCardinalityExceeded = (omniORB.tcInternal.tv_except, MaxCardinalityExceeded, MaxCardinalityExceeded._NP_RepositoryId, "MaxCardinalityExceeded", "culprits", omniORB.typeMapping["IDL:omg.org/CosRelationships/NamedRoles:1.0"])
_tc_MaxCardinalityExceeded = omniORB.tcInternal.createTypeCode(_d_MaxCardinalityExceeded)
omniORB.registerType(MaxCardinalityExceeded._NP_RepositoryId, _d_MaxCardinalityExceeded, _tc_MaxCardinalityExceeded)
# exception DegreeError
_0_CosRelationships.RelationshipFactory.DegreeError = omniORB.newEmptyClass()
class DegreeError (CORBA.UserException):
_NP_RepositoryId = "IDL:omg.org/CosRelationships/RelationshipFactory/DegreeError:1.0"
_NP_ClassName = "CosRelationships.RelationshipFactory.DegreeError"
def __init__(self, required_degree):
CORBA.UserException.__init__(self, required_degree)
self.required_degree = required_degree
_d_DegreeError = (omniORB.tcInternal.tv_except, DegreeError, DegreeError._NP_RepositoryId, "DegreeError", "required_degree", omniORB.tcInternal.tv_ushort)
_tc_DegreeError = omniORB.tcInternal.createTypeCode(_d_DegreeError)
omniORB.registerType(DegreeError._NP_RepositoryId, _d_DegreeError, _tc_DegreeError)
# exception DuplicateRoleName
_0_CosRelationships.RelationshipFactory.DuplicateRoleName = omniORB.newEmptyClass()
class DuplicateRoleName (CORBA.UserException):
_NP_RepositoryId = "IDL:omg.org/CosRelationships/RelationshipFactory/DuplicateRoleName:1.0"
_NP_ClassName = "CosRelationships.RelationshipFactory.DuplicateRoleName"
def __init__(self, culprits):
CORBA.UserException.__init__(self, culprits)
self.culprits = culprits
_d_DuplicateRoleName = (omniORB.tcInternal.tv_except, DuplicateRoleName, DuplicateRoleName._NP_RepositoryId, "DuplicateRoleName", "culprits", omniORB.typeMapping["IDL:omg.org/CosRelationships/NamedRoles:1.0"])
_tc_DuplicateRoleName = omniORB.tcInternal.createTypeCode(_d_DuplicateRoleName)
omniORB.registerType(DuplicateRoleName._NP_RepositoryId, _d_DuplicateRoleName, _tc_DuplicateRoleName)
# exception UnknownRoleName
_0_CosRelationships.RelationshipFactory.UnknownRoleName = omniORB.newEmptyClass()
class UnknownRoleName (CORBA.UserException):
_NP_RepositoryId = "IDL:omg.org/CosRelationships/RelationshipFactory/UnknownRoleName:1.0"
_NP_ClassName = "CosRelationships.RelationshipFactory.UnknownRoleName"
def __init__(self, culprits):
CORBA.UserException.__init__(self, culprits)
self.culprits = culprits
_d_UnknownRoleName = (omniORB.tcInternal.tv_except, UnknownRoleName, UnknownRoleName._NP_RepositoryId, "UnknownRoleName", "culprits", omniORB.typeMapping["IDL:omg.org/CosRelationships/NamedRoles:1.0"])
_tc_UnknownRoleName = omniORB.tcInternal.createTypeCode(_d_UnknownRoleName)
omniORB.registerType(UnknownRoleName._NP_RepositoryId, _d_UnknownRoleName, _tc_UnknownRoleName)
_0_CosRelationships.RelationshipFactory = RelationshipFactory
_0_CosRelationships._tc_RelationshipFactory = omniORB.tcInternal.createTypeCode(_0_CosRelationships._d_RelationshipFactory)
omniORB.registerType(RelationshipFactory._NP_RepositoryId, _0_CosRelationships._d_RelationshipFactory, _0_CosRelationships._tc_RelationshipFactory)
# RelationshipFactory operations and attributes
RelationshipFactory._d__get_relationship_type = ((),(omniORB.typeMapping["IDL:omg.org/CORBA/InterfaceDef:1.0"],),None)
RelationshipFactory._d__get_degree = ((),(omniORB.tcInternal.tv_ushort,),None)
RelationshipFactory._d__get_named_role_types = ((),(omniORB.typeMapping["IDL:omg.org/CosRelationships/RelationshipFactory/NamedRoleTypes:1.0"],),None)
RelationshipFactory._d_create = ((omniORB.typeMapping["IDL:omg.org/CosRelationships/NamedRoles:1.0"], ), (omniORB.typeMapping["IDL:omg.org/CosRelationships/Relationship:1.0"], ), {_0_CosRelationships.RelationshipFactory.RoleTypeError._NP_RepositoryId: _0_CosRelationships.RelationshipFactory._d_RoleTypeError, _0_CosRelationships.RelationshipFactory.MaxCardinalityExceeded._NP_RepositoryId: _0_CosRelationships.RelationshipFactory._d_MaxCardinalityExceeded, _0_CosRelationships.RelationshipFactory.DegreeError._NP_RepositoryId: _0_CosRelationships.RelationshipFactory._d_DegreeError, _0_CosRelationships.RelationshipFactory.DuplicateRoleName._NP_RepositoryId: _0_CosRelationships.RelationshipFactory._d_DuplicateRoleName, _0_CosRelationships.RelationshipFactory.UnknownRoleName._NP_RepositoryId: _0_CosRelationships.RelationshipFactory._d_UnknownRoleName})
# RelationshipFactory object reference
class _objref_RelationshipFactory (CORBA.Object):
_NP_RepositoryId = RelationshipFactory._NP_RepositoryId
def __init__(self, obj):
CORBA.Object.__init__(self, obj)
def _get_relationship_type(self, *args):
return self._obj.invoke("_get_relationship_type", _0_CosRelationships.RelationshipFactory._d__get_relationship_type, args)
relationship_type = property(_get_relationship_type)
def _get_degree(self, *args):
return self._obj.invoke("_get_degree", _0_CosRelationships.RelationshipFactory._d__get_degree, args)
degree = property(_get_degree)
def _get_named_role_types(self, *args):
return self._obj.invoke("_get_named_role_types", _0_CosRelationships.RelationshipFactory._d__get_named_role_types, args)
named_role_types = property(_get_named_role_types)
def create(self, *args):
return self._obj.invoke("create", _0_CosRelationships.RelationshipFactory._d_create, args)
omniORB.registerObjref(RelationshipFactory._NP_RepositoryId, _objref_RelationshipFactory)
_0_CosRelationships._objref_RelationshipFactory = _objref_RelationshipFactory
del RelationshipFactory, _objref_RelationshipFactory
# RelationshipFactory skeleton
__name__ = "CosRelationships__POA"
class RelationshipFactory (PortableServer.Servant):
_NP_RepositoryId = _0_CosRelationships.RelationshipFactory._NP_RepositoryId
_omni_op_d = {"_get_relationship_type": _0_CosRelationships.RelationshipFactory._d__get_relationship_type, "_get_degree": _0_CosRelationships.RelationshipFactory._d__get_degree, "_get_named_role_types": _0_CosRelationships.RelationshipFactory._d__get_named_role_types, "create": _0_CosRelationships.RelationshipFactory._d_create}
RelationshipFactory._omni_skeleton = RelationshipFactory
_0_CosRelationships__POA.RelationshipFactory = RelationshipFactory
omniORB.registerSkeleton(RelationshipFactory._NP_RepositoryId, RelationshipFactory)
del RelationshipFactory
__name__ = "CosRelationships"
# interface Relationship
_0_CosRelationships._d_Relationship = (omniORB.tcInternal.tv_objref, "IDL:omg.org/CosRelationships/Relationship:1.0", "Relationship")
omniORB.typeMapping["IDL:omg.org/CosRelationships/Relationship:1.0"] = _0_CosRelationships._d_Relationship
_0_CosRelationships.Relationship = omniORB.newEmptyClass()
class Relationship (_0_CosObjectIdentity.IdentifiableObject):
_NP_RepositoryId = _0_CosRelationships._d_Relationship[1]
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_nil = CORBA.Object._nil
# exception CannotUnlink
_0_CosRelationships.Relationship.CannotUnlink = omniORB.newEmptyClass()
class CannotUnlink (CORBA.UserException):
_NP_RepositoryId = "IDL:omg.org/CosRelationships/Relationship/CannotUnlink:1.0"
_NP_ClassName = "CosRelationships.Relationship.CannotUnlink"
def __init__(self, offending_roles):
CORBA.UserException.__init__(self, offending_roles)
self.offending_roles = offending_roles
_d_CannotUnlink = (omniORB.tcInternal.tv_except, CannotUnlink, CannotUnlink._NP_RepositoryId, "CannotUnlink", "offending_roles", omniORB.typeMapping["IDL:omg.org/CosRelationships/Roles:1.0"])
_tc_CannotUnlink = omniORB.tcInternal.createTypeCode(_d_CannotUnlink)
omniORB.registerType(CannotUnlink._NP_RepositoryId, _d_CannotUnlink, _tc_CannotUnlink)
_0_CosRelationships.Relationship = Relationship
_0_CosRelationships._tc_Relationship = omniORB.tcInternal.createTypeCode(_0_CosRelationships._d_Relationship)
omniORB.registerType(Relationship._NP_RepositoryId, _0_CosRelationships._d_Relationship, _0_CosRelationships._tc_Relationship)
# Relationship operations and attributes
Relationship._d__get_named_roles = ((),(omniORB.typeMapping["IDL:omg.org/CosRelationships/NamedRoles:1.0"],),None)
Relationship._d_destroy = ((), (), {_0_CosRelationships.Relationship.CannotUnlink._NP_RepositoryId: _0_CosRelationships.Relationship._d_CannotUnlink})
# Relationship object reference
class _objref_Relationship (_0_CosObjectIdentity._objref_IdentifiableObject):
_NP_RepositoryId = Relationship._NP_RepositoryId
def __init__(self, obj):
_0_CosObjectIdentity._objref_IdentifiableObject.__init__(self, obj)
def _get_named_roles(self, *args):
return self._obj.invoke("_get_named_roles", _0_CosRelationships.Relationship._d__get_named_roles, args)
named_roles = property(_get_named_roles)
def destroy(self, *args):
return self._obj.invoke("destroy", _0_CosRelationships.Relationship._d_destroy, args)
omniORB.registerObjref(Relationship._NP_RepositoryId, _objref_Relationship)
_0_CosRelationships._objref_Relationship = _objref_Relationship
del Relationship, _objref_Relationship
# Relationship skeleton
__name__ = "CosRelationships__POA"
class Relationship (_0_CosObjectIdentity__POA.IdentifiableObject):
_NP_RepositoryId = _0_CosRelationships.Relationship._NP_RepositoryId
_omni_op_d = {"_get_named_roles": _0_CosRelationships.Relationship._d__get_named_roles, "destroy": _0_CosRelationships.Relationship._d_destroy}
_omni_op_d.update(_0_CosObjectIdentity__POA.IdentifiableObject._omni_op_d)
Relationship._omni_skeleton = Relationship
_0_CosRelationships__POA.Relationship = Relationship
omniORB.registerSkeleton(Relationship._NP_RepositoryId, Relationship)
del Relationship
__name__ = "CosRelationships"
# interface Role
_0_CosRelationships._d_Role = (omniORB.tcInternal.tv_objref, "IDL:omg.org/CosRelationships/Role:1.0", "Role")
omniORB.typeMapping["IDL:omg.org/CosRelationships/Role:1.0"] = _0_CosRelationships._d_Role
_0_CosRelationships.Role = omniORB.newEmptyClass()
class Role :
_NP_RepositoryId = _0_CosRelationships._d_Role[1]
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_nil = CORBA.Object._nil
# exception UnknownRoleName
_0_CosRelationships.Role.UnknownRoleName = omniORB.newEmptyClass()
class UnknownRoleName (CORBA.UserException):
_NP_RepositoryId = "IDL:omg.org/CosRelationships/Role/UnknownRoleName:1.0"
_NP_ClassName = "CosRelationships.Role.UnknownRoleName"
def __init__(self):
CORBA.UserException.__init__(self)
_d_UnknownRoleName = (omniORB.tcInternal.tv_except, UnknownRoleName, UnknownRoleName._NP_RepositoryId, "UnknownRoleName")
_tc_UnknownRoleName = omniORB.tcInternal.createTypeCode(_d_UnknownRoleName)
omniORB.registerType(UnknownRoleName._NP_RepositoryId, _d_UnknownRoleName, _tc_UnknownRoleName)
# exception UnknownRelationship
_0_CosRelationships.Role.UnknownRelationship = omniORB.newEmptyClass()
class UnknownRelationship (CORBA.UserException):
_NP_RepositoryId = "IDL:omg.org/CosRelationships/Role/UnknownRelationship:1.0"
_NP_ClassName = "CosRelationships.Role.UnknownRelationship"
def __init__(self):
CORBA.UserException.__init__(self)
_d_UnknownRelationship = (omniORB.tcInternal.tv_except, UnknownRelationship, UnknownRelationship._NP_RepositoryId, "UnknownRelationship")
_tc_UnknownRelationship = omniORB.tcInternal.createTypeCode(_d_UnknownRelationship)
omniORB.registerType(UnknownRelationship._NP_RepositoryId, _d_UnknownRelationship, _tc_UnknownRelationship)
# exception RelationshipTypeError
_0_CosRelationships.Role.RelationshipTypeError = omniORB.newEmptyClass()
class RelationshipTypeError (CORBA.UserException):
_NP_RepositoryId = "IDL:omg.org/CosRelationships/Role/RelationshipTypeError:1.0"
_NP_ClassName = "CosRelationships.Role.RelationshipTypeError"
def __init__(self):
CORBA.UserException.__init__(self)
_d_RelationshipTypeError = (omniORB.tcInternal.tv_except, RelationshipTypeError, RelationshipTypeError._NP_RepositoryId, "RelationshipTypeError")
_tc_RelationshipTypeError = omniORB.tcInternal.createTypeCode(_d_RelationshipTypeError)
omniORB.registerType(RelationshipTypeError._NP_RepositoryId, _d_RelationshipTypeError, _tc_RelationshipTypeError)
# exception CannotDestroyRelationship
_0_CosRelationships.Role.CannotDestroyRelationship = omniORB.newEmptyClass()
class CannotDestroyRelationship (CORBA.UserException):
_NP_RepositoryId = "IDL:omg.org/CosRelationships/Role/CannotDestroyRelationship:1.0"
_NP_ClassName = "CosRelationships.Role.CannotDestroyRelationship"
def __init__(self, offenders):
CORBA.UserException.__init__(self, offenders)
self.offenders = offenders
_d_CannotDestroyRelationship = (omniORB.tcInternal.tv_except, CannotDestroyRelationship, CannotDestroyRelationship._NP_RepositoryId, "CannotDestroyRelationship", "offenders", omniORB.typeMapping["IDL:omg.org/CosRelationships/RelationshipHandles:1.0"])
_tc_CannotDestroyRelationship = omniORB.tcInternal.createTypeCode(_d_CannotDestroyRelationship)
omniORB.registerType(CannotDestroyRelationship._NP_RepositoryId, _d_CannotDestroyRelationship, _tc_CannotDestroyRelationship)
# exception ParticipatingInRelationship
_0_CosRelationships.Role.ParticipatingInRelationship = omniORB.newEmptyClass()
class ParticipatingInRelationship (CORBA.UserException):
_NP_RepositoryId = "IDL:omg.org/CosRelationships/Role/ParticipatingInRelationship:1.0"
_NP_ClassName = "CosRelationships.Role.ParticipatingInRelationship"
def __init__(self, the_relationships):
CORBA.UserException.__init__(self, the_relationships)
self.the_relationships = the_relationships
_d_ParticipatingInRelationship = (omniORB.tcInternal.tv_except, ParticipatingInRelationship, ParticipatingInRelationship._NP_RepositoryId, "ParticipatingInRelationship", "the_relationships", omniORB.typeMapping["IDL:omg.org/CosRelationships/RelationshipHandles:1.0"])
_tc_ParticipatingInRelationship = omniORB.tcInternal.createTypeCode(_d_ParticipatingInRelationship)
omniORB.registerType(ParticipatingInRelationship._NP_RepositoryId, _d_ParticipatingInRelationship, _tc_ParticipatingInRelationship)
_0_CosRelationships.Role = Role
_0_CosRelationships._tc_Role = omniORB.tcInternal.createTypeCode(_0_CosRelationships._d_Role)
omniORB.registerType(Role._NP_RepositoryId, _0_CosRelationships._d_Role, _0_CosRelationships._tc_Role)
# Role operations and attributes
Role._d__get_related_object = ((),(omniORB.typeMapping["IDL:omg.org/CosRelationships/RelatedObject:1.0"],),None)
Role._d_get_other_related_object = ((omniORB.typeMapping["IDL:omg.org/CosRelationships/RelationshipHandle:1.0"], omniORB.typeMapping["IDL:omg.org/CosRelationships/RoleName:1.0"]), (omniORB.typeMapping["IDL:omg.org/CosRelationships/RelatedObject:1.0"], ), {_0_CosRelationships.Role.UnknownRoleName._NP_RepositoryId: _0_CosRelationships.Role._d_UnknownRoleName, _0_CosRelationships.Role.UnknownRelationship._NP_RepositoryId: _0_CosRelationships.Role._d_UnknownRelationship})
Role._d_get_other_role = ((omniORB.typeMapping["IDL:omg.org/CosRelationships/RelationshipHandle:1.0"], omniORB.typeMapping["IDL:omg.org/CosRelationships/RoleName:1.0"]), (omniORB.typeMapping["IDL:omg.org/CosRelationships/Role:1.0"], ), {_0_CosRelationships.Role.UnknownRoleName._NP_RepositoryId: _0_CosRelationships.Role._d_UnknownRoleName, _0_CosRelationships.Role.UnknownRelationship._NP_RepositoryId: _0_CosRelationships.Role._d_UnknownRelationship})
Role._d_get_relationships = ((omniORB.tcInternal.tv_ulong, ), (omniORB.typeMapping["IDL:omg.org/CosRelationships/RelationshipHandles:1.0"], omniORB.typeMapping["IDL:omg.org/CosRelationships/RelationshipIterator:1.0"]), None)
Role._d_destroy_relationships = ((), (), {_0_CosRelationships.Role.CannotDestroyRelationship._NP_RepositoryId: _0_CosRelationships.Role._d_CannotDestroyRelationship})
Role._d_destroy = ((), (), {_0_CosRelationships.Role.ParticipatingInRelationship._NP_RepositoryId: _0_CosRelationships.Role._d_ParticipatingInRelationship})
Role._d_check_minimum_cardinality = ((), (omniORB.tcInternal.tv_boolean, ), None)
Role._d_link = ((omniORB.typeMapping["IDL:omg.org/CosRelationships/RelationshipHandle:1.0"], omniORB.typeMapping["IDL:omg.org/CosRelationships/NamedRoles:1.0"]), (), {_0_CosRelationships.RelationshipFactory.MaxCardinalityExceeded._NP_RepositoryId: _0_CosRelationships.RelationshipFactory._d_MaxCardinalityExceeded, _0_CosRelationships.Role.RelationshipTypeError._NP_RepositoryId: _0_CosRelationships.Role._d_RelationshipTypeError})
Role._d_unlink = ((omniORB.typeMapping["IDL:omg.org/CosRelationships/RelationshipHandle:1.0"], ), (), {_0_CosRelationships.Role.UnknownRelationship._NP_RepositoryId: _0_CosRelationships.Role._d_UnknownRelationship})
# Role object reference
class _objref_Role (CORBA.Object):
_NP_RepositoryId = Role._NP_RepositoryId
def __init__(self, obj):
CORBA.Object.__init__(self, obj)
def _get_related_object(self, *args):
return self._obj.invoke("_get_related_object", _0_CosRelationships.Role._d__get_related_object, args)
related_object = property(_get_related_object)
def get_other_related_object(self, *args):
return self._obj.invoke("get_other_related_object", _0_CosRelationships.Role._d_get_other_related_object, args)
def get_other_role(self, *args):
return self._obj.invoke("get_other_role", _0_CosRelationships.Role._d_get_other_role, args)
def get_relationships(self, *args):
return self._obj.invoke("get_relationships", _0_CosRelationships.Role._d_get_relationships, args)
def destroy_relationships(self, *args):
return self._obj.invoke("destroy_relationships", _0_CosRelationships.Role._d_destroy_relationships, args)
def destroy(self, *args):
return self._obj.invoke("destroy", _0_CosRelationships.Role._d_destroy, args)
def check_minimum_cardinality(self, *args):
return self._obj.invoke("check_minimum_cardinality", _0_CosRelationships.Role._d_check_minimum_cardinality, args)
def link(self, *args):
return self._obj.invoke("link", _0_CosRelationships.Role._d_link, args)
def unlink(self, *args):
return self._obj.invoke("unlink", _0_CosRelationships.Role._d_unlink, args)
omniORB.registerObjref(Role._NP_RepositoryId, _objref_Role)
_0_CosRelationships._objref_Role = _objref_Role
del Role, _objref_Role
# Role skeleton
__name__ = "CosRelationships__POA"
class Role (PortableServer.Servant):
_NP_RepositoryId = _0_CosRelationships.Role._NP_RepositoryId
_omni_op_d = {"_get_related_object": _0_CosRelationships.Role._d__get_related_object, "get_other_related_object": _0_CosRelationships.Role._d_get_other_related_object, "get_other_role": _0_CosRelationships.Role._d_get_other_role, "get_relationships": _0_CosRelationships.Role._d_get_relationships, "destroy_relationships": _0_CosRelationships.Role._d_destroy_relationships, "destroy": _0_CosRelationships.Role._d_destroy, "check_minimum_cardinality": _0_CosRelationships.Role._d_check_minimum_cardinality, "link": _0_CosRelationships.Role._d_link, "unlink": _0_CosRelationships.Role._d_unlink}
Role._omni_skeleton = Role
_0_CosRelationships__POA.Role = Role
omniORB.registerSkeleton(Role._NP_RepositoryId, Role)
del Role
__name__ = "CosRelationships"
# interface RoleFactory
_0_CosRelationships._d_RoleFactory = (omniORB.tcInternal.tv_objref, "IDL:omg.org/CosRelationships/RoleFactory:1.0", "RoleFactory")
omniORB.typeMapping["IDL:omg.org/CosRelationships/RoleFactory:1.0"] = _0_CosRelationships._d_RoleFactory
_0_CosRelationships.RoleFactory = omniORB.newEmptyClass()
class RoleFactory :
_NP_RepositoryId = _0_CosRelationships._d_RoleFactory[1]
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_nil = CORBA.Object._nil
# exception NilRelatedObject
_0_CosRelationships.RoleFactory.NilRelatedObject = omniORB.newEmptyClass()
class NilRelatedObject (CORBA.UserException):
_NP_RepositoryId = "IDL:omg.org/CosRelationships/RoleFactory/NilRelatedObject:1.0"
_NP_ClassName = "CosRelationships.RoleFactory.NilRelatedObject"
def __init__(self):
CORBA.UserException.__init__(self)
_d_NilRelatedObject = (omniORB.tcInternal.tv_except, NilRelatedObject, NilRelatedObject._NP_RepositoryId, "NilRelatedObject")
_tc_NilRelatedObject = omniORB.tcInternal.createTypeCode(_d_NilRelatedObject)
omniORB.registerType(NilRelatedObject._NP_RepositoryId, _d_NilRelatedObject, _tc_NilRelatedObject)
# exception RelatedObjectTypeError
_0_CosRelationships.RoleFactory.RelatedObjectTypeError = omniORB.newEmptyClass()
class RelatedObjectTypeError (CORBA.UserException):
_NP_RepositoryId = "IDL:omg.org/CosRelationships/RoleFactory/RelatedObjectTypeError:1.0"
_NP_ClassName = "CosRelationships.RoleFactory.RelatedObjectTypeError"
def __init__(self):
CORBA.UserException.__init__(self)
_d_RelatedObjectTypeError = (omniORB.tcInternal.tv_except, RelatedObjectTypeError, RelatedObjectTypeError._NP_RepositoryId, "RelatedObjectTypeError")
_tc_RelatedObjectTypeError = omniORB.tcInternal.createTypeCode(_d_RelatedObjectTypeError)
omniORB.registerType(RelatedObjectTypeError._NP_RepositoryId, _d_RelatedObjectTypeError, _tc_RelatedObjectTypeError)
# typedef ... InterfaceDefs
class InterfaceDefs:
_NP_RepositoryId = "IDL:omg.org/CosRelationships/RoleFactory/InterfaceDefs:1.0"
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_d_InterfaceDefs = (omniORB.tcInternal.tv_sequence, omniORB.typeMapping["IDL:omg.org/CORBA/InterfaceDef:1.0"], 0)
_ad_InterfaceDefs = (omniORB.tcInternal.tv_alias, InterfaceDefs._NP_RepositoryId, "InterfaceDefs", (omniORB.tcInternal.tv_sequence, omniORB.typeMapping["IDL:omg.org/CORBA/InterfaceDef:1.0"], 0))
_tc_InterfaceDefs = omniORB.tcInternal.createTypeCode(_ad_InterfaceDefs)
omniORB.registerType(InterfaceDefs._NP_RepositoryId, _ad_InterfaceDefs, _tc_InterfaceDefs)
_0_CosRelationships.RoleFactory = RoleFactory
_0_CosRelationships._tc_RoleFactory = omniORB.tcInternal.createTypeCode(_0_CosRelationships._d_RoleFactory)
omniORB.registerType(RoleFactory._NP_RepositoryId, _0_CosRelationships._d_RoleFactory, _0_CosRelationships._tc_RoleFactory)
# RoleFactory operations and attributes
RoleFactory._d__get_role_type = ((),(omniORB.typeMapping["IDL:omg.org/CORBA/InterfaceDef:1.0"],),None)
RoleFactory._d__get_max_cardinality = ((),(omniORB.tcInternal.tv_ulong,),None)
RoleFactory._d__get_min_cardinality = ((),(omniORB.tcInternal.tv_ulong,),None)
RoleFactory._d__get_related_object_types = ((),(omniORB.typeMapping["IDL:omg.org/CosRelationships/RoleFactory/InterfaceDefs:1.0"],),None)
RoleFactory._d_create_role = ((omniORB.typeMapping["IDL:omg.org/CosRelationships/RelatedObject:1.0"], ), (omniORB.typeMapping["IDL:omg.org/CosRelationships/Role:1.0"], ), {_0_CosRelationships.RoleFactory.NilRelatedObject._NP_RepositoryId: _0_CosRelationships.RoleFactory._d_NilRelatedObject, _0_CosRelationships.RoleFactory.RelatedObjectTypeError._NP_RepositoryId: _0_CosRelationships.RoleFactory._d_RelatedObjectTypeError})
# RoleFactory object reference
class _objref_RoleFactory (CORBA.Object):
_NP_RepositoryId = RoleFactory._NP_RepositoryId
def __init__(self, obj):
CORBA.Object.__init__(self, obj)
def _get_role_type(self, *args):
return self._obj.invoke("_get_role_type", _0_CosRelationships.RoleFactory._d__get_role_type, args)
role_type = property(_get_role_type)
def _get_max_cardinality(self, *args):
return self._obj.invoke("_get_max_cardinality", _0_CosRelationships.RoleFactory._d__get_max_cardinality, args)
max_cardinality = property(_get_max_cardinality)
def _get_min_cardinality(self, *args):
return self._obj.invoke("_get_min_cardinality", _0_CosRelationships.RoleFactory._d__get_min_cardinality, args)
min_cardinality = property(_get_min_cardinality)
def _get_related_object_types(self, *args):
return self._obj.invoke("_get_related_object_types", _0_CosRelationships.RoleFactory._d__get_related_object_types, args)
related_object_types = property(_get_related_object_types)
def create_role(self, *args):
return self._obj.invoke("create_role", _0_CosRelationships.RoleFactory._d_create_role, args)
omniORB.registerObjref(RoleFactory._NP_RepositoryId, _objref_RoleFactory)
_0_CosRelationships._objref_RoleFactory = _objref_RoleFactory
del RoleFactory, _objref_RoleFactory
# RoleFactory skeleton
__name__ = "CosRelationships__POA"
class RoleFactory (PortableServer.Servant):
_NP_RepositoryId = _0_CosRelationships.RoleFactory._NP_RepositoryId
_omni_op_d = {"_get_role_type": _0_CosRelationships.RoleFactory._d__get_role_type, "_get_max_cardinality": _0_CosRelationships.RoleFactory._d__get_max_cardinality, "_get_min_cardinality": _0_CosRelationships.RoleFactory._d__get_min_cardinality, "_get_related_object_types": _0_CosRelationships.RoleFactory._d__get_related_object_types, "create_role": _0_CosRelationships.RoleFactory._d_create_role}
RoleFactory._omni_skeleton = RoleFactory
_0_CosRelationships__POA.RoleFactory = RoleFactory
omniORB.registerSkeleton(RoleFactory._NP_RepositoryId, RoleFactory)
del RoleFactory
__name__ = "CosRelationships"
# interface RelationshipIterator
_0_CosRelationships._d_RelationshipIterator = (omniORB.tcInternal.tv_objref, "IDL:omg.org/CosRelationships/RelationshipIterator:1.0", "RelationshipIterator")
omniORB.typeMapping["IDL:omg.org/CosRelationships/RelationshipIterator:1.0"] = _0_CosRelationships._d_RelationshipIterator
_0_CosRelationships.RelationshipIterator = omniORB.newEmptyClass()
class RelationshipIterator :
_NP_RepositoryId = _0_CosRelationships._d_RelationshipIterator[1]
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_nil = CORBA.Object._nil
_0_CosRelationships.RelationshipIterator = RelationshipIterator
_0_CosRelationships._tc_RelationshipIterator = omniORB.tcInternal.createTypeCode(_0_CosRelationships._d_RelationshipIterator)
omniORB.registerType(RelationshipIterator._NP_RepositoryId, _0_CosRelationships._d_RelationshipIterator, _0_CosRelationships._tc_RelationshipIterator)
# RelationshipIterator operations and attributes
RelationshipIterator._d_next_one = ((), (omniORB.tcInternal.tv_boolean, omniORB.typeMapping["IDL:omg.org/CosRelationships/RelationshipHandle:1.0"]), None)
RelationshipIterator._d_next_n = ((omniORB.tcInternal.tv_ulong, ), (omniORB.tcInternal.tv_boolean, omniORB.typeMapping["IDL:omg.org/CosRelationships/RelationshipHandles:1.0"]), None)
RelationshipIterator._d_destroy = ((), (), None)
# RelationshipIterator object reference
class _objref_RelationshipIterator (CORBA.Object):
_NP_RepositoryId = RelationshipIterator._NP_RepositoryId
def __init__(self, obj):
CORBA.Object.__init__(self, obj)
def next_one(self, *args):
return self._obj.invoke("next_one", _0_CosRelationships.RelationshipIterator._d_next_one, args)
def next_n(self, *args):
return self._obj.invoke("next_n", _0_CosRelationships.RelationshipIterator._d_next_n, args)
def destroy(self, *args):
return self._obj.invoke("destroy", _0_CosRelationships.RelationshipIterator._d_destroy, args)
omniORB.registerObjref(RelationshipIterator._NP_RepositoryId, _objref_RelationshipIterator)
_0_CosRelationships._objref_RelationshipIterator = _objref_RelationshipIterator
del RelationshipIterator, _objref_RelationshipIterator
# RelationshipIterator skeleton
__name__ = "CosRelationships__POA"
class RelationshipIterator (PortableServer.Servant):
_NP_RepositoryId = _0_CosRelationships.RelationshipIterator._NP_RepositoryId
_omni_op_d = {"next_one": _0_CosRelationships.RelationshipIterator._d_next_one, "next_n": _0_CosRelationships.RelationshipIterator._d_next_n, "destroy": _0_CosRelationships.RelationshipIterator._d_destroy}
RelationshipIterator._omni_skeleton = RelationshipIterator
_0_CosRelationships__POA.RelationshipIterator = RelationshipIterator
omniORB.registerSkeleton(RelationshipIterator._NP_RepositoryId, RelationshipIterator)
del RelationshipIterator
__name__ = "CosRelationships"
#
# End of module "CosRelationships"
#
__name__ = "CosRelationships_idl"
_exported_modules = ( "CosRelationships", )
# The end.
|
amonmoce/corba_examples
|
omniORBpy-4.2.1/build/python/COS/CosRelationships_idl.py
|
Python
|
mit
| 42,484
|
# -*- coding: utf-8 -*-
"""
werkzeug.serving
~~~~~~~~~~~~~~~~
There are many ways to serve a WSGI application. While you're developing
it you usually don't want a full blown webserver like Apache but a simple
standalone one. From Python 2.5 onwards there is the `wsgiref`_ server in
the standard library. If you're using older versions of Python you can
download the package from the cheeseshop.
However there are some caveats. Sourcecode won't reload itself when
changed and each time you kill the server using ``^C`` you get an
`KeyboardInterrupt` error. While the latter is easy to solve the first
one can be a pain in the ass in some situations.
The easiest way is creating a small ``start-myproject.py`` that runs the
application::
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from myproject import make_app
from werkzeug.serving import run_simple
app = make_app(...)
run_simple('localhost', 8080, app, use_reloader=True)
You can also pass it a `extra_files` keyword argument with a list of
additional files (like configuration files) you want to observe.
For bigger applications you should consider using `click`
(http://click.pocoo.org) instead of a simple start file.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import io
import os
import signal
import socket
import sys
from datetime import datetime as dt
from datetime import timedelta
from ._compat import PY2
from ._compat import reraise
from ._compat import WIN
from ._compat import wsgi_encoding_dance
from ._internal import _log
from .exceptions import InternalServerError
from .urls import uri_to_iri
from .urls import url_parse
from .urls import url_unquote
try:
import socketserver
from http.server import BaseHTTPRequestHandler
from http.server import HTTPServer
except ImportError:
import SocketServer as socketserver
from BaseHTTPServer import HTTPServer
from BaseHTTPServer import BaseHTTPRequestHandler
try:
import ssl
except ImportError:
class _SslDummy(object):
def __getattr__(self, name):
raise RuntimeError("SSL support unavailable")
ssl = _SslDummy()
try:
import click
except ImportError:
click = None
ThreadingMixIn = socketserver.ThreadingMixIn
can_fork = hasattr(os, "fork")
if can_fork:
ForkingMixIn = socketserver.ForkingMixIn
else:
class ForkingMixIn(object):
pass
try:
af_unix = socket.AF_UNIX
except AttributeError:
af_unix = None
LISTEN_QUEUE = 128
can_open_by_fd = not WIN and hasattr(socket, "fromfd")
# On Python 3, ConnectionError represents the same errnos as
# socket.error from Python 2, while socket.error is an alias for the
# more generic OSError.
if PY2:
_ConnectionError = socket.error
else:
_ConnectionError = ConnectionError
class DechunkedInput(io.RawIOBase):
"""An input stream that handles Transfer-Encoding 'chunked'"""
def __init__(self, rfile):
self._rfile = rfile
self._done = False
self._len = 0
def readable(self):
return True
def read_chunk_len(self):
try:
line = self._rfile.readline().decode("latin1")
_len = int(line.strip(), 16)
except ValueError:
raise IOError("Invalid chunk header")
if _len < 0:
raise IOError("Negative chunk length not allowed")
return _len
def readinto(self, buf):
read = 0
while not self._done and read < len(buf):
if self._len == 0:
# This is the first chunk or we fully consumed the previous
# one. Read the next length of the next chunk
self._len = self.read_chunk_len()
if self._len == 0:
# Found the final chunk of size 0. The stream is now exhausted,
# but there is still a final newline that should be consumed
self._done = True
if self._len > 0:
# There is data (left) in this chunk, so append it to the
# buffer. If this operation fully consumes the chunk, this will
# reset self._len to 0.
n = min(len(buf), self._len)
buf[read : read + n] = self._rfile.read(n)
self._len -= n
read += n
if self._len == 0:
# Skip the terminating newline of a chunk that has been fully
# consumed. This also applies to the 0-sized final chunk
terminator = self._rfile.readline()
if terminator not in (b"\n", b"\r\n", b"\r"):
raise IOError("Missing chunk terminating newline")
return read
class WSGIRequestHandler(BaseHTTPRequestHandler, object):
"""A request handler that implements WSGI dispatching."""
@property
def server_version(self):
from . import __version__
return "Werkzeug/" + __version__
def make_environ(self):
request_url = url_parse(self.path)
def shutdown_server():
self.server.shutdown_signal = True
url_scheme = "http" if self.server.ssl_context is None else "https"
if not self.client_address:
self.client_address = "<local>"
if isinstance(self.client_address, str):
self.client_address = (self.client_address, 0)
else:
pass
# If there was no scheme but the path started with two slashes,
# the first segment may have been incorrectly parsed as the
# netloc, prepend it to the path again.
if not request_url.scheme and request_url.netloc:
path_info = "/%s%s" % (request_url.netloc, request_url.path)
else:
path_info = request_url.path
path_info = url_unquote(path_info)
environ = {
"wsgi.version": (1, 0),
"wsgi.url_scheme": url_scheme,
"wsgi.input": self.rfile,
"wsgi.errors": sys.stderr,
"wsgi.multithread": self.server.multithread,
"wsgi.multiprocess": self.server.multiprocess,
"wsgi.run_once": False,
"werkzeug.server.shutdown": shutdown_server,
"SERVER_SOFTWARE": self.server_version,
"REQUEST_METHOD": self.command,
"SCRIPT_NAME": "",
"PATH_INFO": wsgi_encoding_dance(path_info),
"QUERY_STRING": wsgi_encoding_dance(request_url.query),
# Non-standard, added by mod_wsgi, uWSGI
"REQUEST_URI": wsgi_encoding_dance(self.path),
# Non-standard, added by gunicorn
"RAW_URI": wsgi_encoding_dance(self.path),
"REMOTE_ADDR": self.address_string(),
"REMOTE_PORT": self.port_integer(),
"SERVER_NAME": self.server.server_address[0],
"SERVER_PORT": str(self.server.server_address[1]),
"SERVER_PROTOCOL": self.request_version,
}
for key, value in self.get_header_items():
key = key.upper().replace("-", "_")
value = value.replace("\r\n", "")
if key not in ("CONTENT_TYPE", "CONTENT_LENGTH"):
key = "HTTP_" + key
if key in environ:
value = "{},{}".format(environ[key], value)
environ[key] = value
if environ.get("HTTP_TRANSFER_ENCODING", "").strip().lower() == "chunked":
environ["wsgi.input_terminated"] = True
environ["wsgi.input"] = DechunkedInput(environ["wsgi.input"])
# Per RFC 2616, if the URL is absolute, use that as the host.
# We're using "has a scheme" to indicate an absolute URL.
if request_url.scheme and request_url.netloc:
environ["HTTP_HOST"] = request_url.netloc
try:
# binary_form=False gives nicer information, but wouldn't be compatible with
# what Nginx or Apache could return.
peer_cert = self.connection.getpeercert(binary_form=True)
if peer_cert is not None:
# Nginx and Apache use PEM format.
environ["SSL_CLIENT_CERT"] = ssl.DER_cert_to_PEM_cert(peer_cert)
except ValueError:
# SSL handshake hasn't finished.
self.server.log("error", "Cannot fetch SSL peer certificate info")
except AttributeError:
# Not using TLS, the socket will not have getpeercert().
pass
return environ
def run_wsgi(self):
if self.headers.get("Expect", "").lower().strip() == "100-continue":
self.wfile.write(b"HTTP/1.1 100 Continue\r\n\r\n")
self.environ = environ = self.make_environ()
headers_set = []
headers_sent = []
def write(data):
assert headers_set, "write() before start_response"
if not headers_sent:
status, response_headers = headers_sent[:] = headers_set
try:
code, msg = status.split(None, 1)
except ValueError:
code, msg = status, ""
code = int(code)
self.send_response(code, msg)
header_keys = set()
for key, value in response_headers:
self.send_header(key, value)
key = key.lower()
header_keys.add(key)
if not (
"content-length" in header_keys
or environ["REQUEST_METHOD"] == "HEAD"
or code < 200
or code in (204, 304)
):
self.close_connection = True
self.send_header("Connection", "close")
if "server" not in header_keys:
self.send_header("Server", self.version_string())
if "date" not in header_keys:
self.send_header("Date", self.date_time_string())
self.end_headers()
assert isinstance(data, bytes), "applications must write bytes"
if data:
# Only write data if there is any to avoid Python 3.5 SSL bug
self.wfile.write(data)
self.wfile.flush()
def start_response(status, response_headers, exc_info=None):
if exc_info:
try:
if headers_sent:
reraise(*exc_info)
finally:
exc_info = None
elif headers_set:
raise AssertionError("Headers already set")
headers_set[:] = [status, response_headers]
return write
def execute(app):
application_iter = app(environ, start_response)
try:
for data in application_iter:
write(data)
if not headers_sent:
write(b"")
finally:
if hasattr(application_iter, "close"):
application_iter.close()
try:
execute(self.server.app)
except (_ConnectionError, socket.timeout) as e:
self.connection_dropped(e, environ)
except Exception:
if self.server.passthrough_errors:
raise
from .debug.tbtools import get_current_traceback
traceback = get_current_traceback(ignore_system_exceptions=True)
try:
# if we haven't yet sent the headers but they are set
# we roll back to be able to set them again.
if not headers_sent:
del headers_set[:]
execute(InternalServerError())
except Exception:
pass
self.server.log("error", "Error on request:\n%s", traceback.plaintext)
def handle(self):
"""Handles a request ignoring dropped connections."""
try:
BaseHTTPRequestHandler.handle(self)
except (_ConnectionError, socket.timeout) as e:
self.connection_dropped(e)
except Exception as e:
if self.server.ssl_context is None or not is_ssl_error(e):
raise
if self.server.shutdown_signal:
self.initiate_shutdown()
def initiate_shutdown(self):
"""A horrible, horrible way to kill the server for Python 2.6 and
later. It's the best we can do.
"""
# Windows does not provide SIGKILL, go with SIGTERM then.
sig = getattr(signal, "SIGKILL", signal.SIGTERM)
# reloader active
if is_running_from_reloader():
os.kill(os.getpid(), sig)
# python 2.7
self.server._BaseServer__shutdown_request = True
# python 2.6
self.server._BaseServer__serving = False
def connection_dropped(self, error, environ=None):
"""Called if the connection was closed by the client. By default
nothing happens.
"""
def handle_one_request(self):
"""Handle a single HTTP request."""
self.raw_requestline = self.rfile.readline()
if not self.raw_requestline:
self.close_connection = 1
elif self.parse_request():
return self.run_wsgi()
def send_response(self, code, message=None):
"""Send the response header and log the response code."""
self.log_request(code)
if message is None:
message = code in self.responses and self.responses[code][0] or ""
if self.request_version != "HTTP/0.9":
hdr = "%s %d %s\r\n" % (self.protocol_version, code, message)
self.wfile.write(hdr.encode("ascii"))
def version_string(self):
return BaseHTTPRequestHandler.version_string(self).strip()
def address_string(self):
if getattr(self, "environ", None):
return self.environ["REMOTE_ADDR"]
elif not self.client_address:
return "<local>"
elif isinstance(self.client_address, str):
return self.client_address
else:
return self.client_address[0]
def port_integer(self):
return self.client_address[1]
def log_request(self, code="-", size="-"):
try:
path = uri_to_iri(self.path)
msg = "%s %s %s" % (self.command, path, self.request_version)
except AttributeError:
# path isn't set if the requestline was bad
msg = self.requestline
code = str(code)
if click:
color = click.style
if code[0] == "1": # 1xx - Informational
msg = color(msg, bold=True)
elif code[0] == "2": # 2xx - Success
msg = color(msg, fg="white")
elif code == "304": # 304 - Resource Not Modified
msg = color(msg, fg="cyan")
elif code[0] == "3": # 3xx - Redirection
msg = color(msg, fg="green")
elif code == "404": # 404 - Resource Not Found
msg = color(msg, fg="yellow")
elif code[0] == "4": # 4xx - Client Error
msg = color(msg, fg="red", bold=True)
else: # 5xx, or any other response
msg = color(msg, fg="magenta", bold=True)
self.log("info", '"%s" %s %s', msg, code, size)
def log_error(self, *args):
self.log("error", *args)
def log_message(self, format, *args):
self.log("info", format, *args)
def log(self, type, message, *args):
_log(
type,
"%s - - [%s] %s\n"
% (self.address_string(), self.log_date_time_string(), message % args),
)
def get_header_items(self):
"""
Get an iterable list of key/value pairs representing headers.
This function provides Python 2/3 compatibility as related to the
parsing of request headers. Python 2.7 is not compliant with
RFC 3875 Section 4.1.18 which requires multiple values for headers
to be provided or RFC 2616 which allows for folding of multi-line
headers. This function will return a matching list regardless
of Python version. It can be removed once Python 2.7 support
is dropped.
:return: List of tuples containing header hey/value pairs
"""
if PY2:
# For Python 2, process the headers manually according to
# W3C RFC 2616 Section 4.2.
items = []
for header in self.headers.headers:
# Remove "\r\n" from the header and split on ":" to get
# the field name and value.
try:
key, value = header[0:-2].split(":", 1)
except ValueError:
# If header could not be slit with : but starts with white
# space and it follows an existing header, it's a folded
# header.
if header[0] in ("\t", " ") and items:
# Pop off the last header
key, value = items.pop()
# Append the current header to the value of the last
# header which will be placed back on the end of the
# list
value = value + header
# Otherwise it's just a bad header and should error
else:
# Re-raise the value error
raise
# Add the key and the value once stripped of leading
# white space. The specification allows for stripping
# trailing white space but the Python 3 code does not
# strip trailing white space. Therefore, trailing space
# will be left as is to match the Python 3 behavior.
items.append((key, value.lstrip()))
else:
items = self.headers.items()
return items
#: backwards compatible name if someone is subclassing it
BaseRequestHandler = WSGIRequestHandler
def generate_adhoc_ssl_pair(cn=None):
try:
from cryptography import x509
from cryptography.x509.oid import NameOID
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import rsa
except ImportError:
raise TypeError("Using ad-hoc certificates requires the cryptography library.")
pkey = rsa.generate_private_key(
public_exponent=65537, key_size=2048, backend=default_backend()
)
# pretty damn sure that this is not actually accepted by anyone
if cn is None:
cn = u"*"
subject = x509.Name(
[
x509.NameAttribute(NameOID.ORGANIZATION_NAME, u"Dummy Certificate"),
x509.NameAttribute(NameOID.COMMON_NAME, cn),
]
)
cert = (
x509.CertificateBuilder()
.subject_name(subject)
.issuer_name(subject)
.public_key(pkey.public_key())
.serial_number(x509.random_serial_number())
.not_valid_before(dt.utcnow())
.not_valid_after(dt.utcnow() + timedelta(days=365))
.add_extension(x509.ExtendedKeyUsage([x509.OID_SERVER_AUTH]), critical=False)
.add_extension(
x509.SubjectAlternativeName([x509.DNSName(u"*")]), critical=False
)
.sign(pkey, hashes.SHA256(), default_backend())
)
return cert, pkey
def make_ssl_devcert(base_path, host=None, cn=None):
"""Creates an SSL key for development. This should be used instead of
the ``'adhoc'`` key which generates a new cert on each server start.
It accepts a path for where it should store the key and cert and
either a host or CN. If a host is given it will use the CN
``*.host/CN=host``.
For more information see :func:`run_simple`.
.. versionadded:: 0.9
:param base_path: the path to the certificate and key. The extension
``.crt`` is added for the certificate, ``.key`` is
added for the key.
:param host: the name of the host. This can be used as an alternative
for the `cn`.
:param cn: the `CN` to use.
"""
if host is not None:
cn = u"*.%s/CN=%s" % (host, host)
cert, pkey = generate_adhoc_ssl_pair(cn=cn)
from cryptography.hazmat.primitives import serialization
cert_file = base_path + ".crt"
pkey_file = base_path + ".key"
with open(cert_file, "wb") as f:
f.write(cert.public_bytes(serialization.Encoding.PEM))
with open(pkey_file, "wb") as f:
f.write(
pkey.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption(),
)
)
return cert_file, pkey_file
def generate_adhoc_ssl_context():
"""Generates an adhoc SSL context for the development server."""
import tempfile
import atexit
cert, pkey = generate_adhoc_ssl_pair()
from cryptography.hazmat.primitives import serialization
cert_handle, cert_file = tempfile.mkstemp()
pkey_handle, pkey_file = tempfile.mkstemp()
atexit.register(os.remove, pkey_file)
atexit.register(os.remove, cert_file)
os.write(cert_handle, cert.public_bytes(serialization.Encoding.PEM))
os.write(
pkey_handle,
pkey.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption(),
),
)
os.close(cert_handle)
os.close(pkey_handle)
ctx = load_ssl_context(cert_file, pkey_file)
return ctx
def load_ssl_context(cert_file, pkey_file=None, protocol=None):
"""Loads SSL context from cert/private key files and optional protocol.
Many parameters are directly taken from the API of
:py:class:`ssl.SSLContext`.
:param cert_file: Path of the certificate to use.
:param pkey_file: Path of the private key to use. If not given, the key
will be obtained from the certificate file.
:param protocol: One of the ``PROTOCOL_*`` constants in the stdlib ``ssl``
module. Defaults to ``PROTOCOL_SSLv23``.
"""
if protocol is None:
try:
protocol = ssl.PROTOCOL_TLS_SERVER
except AttributeError:
# Python <= 3.5 compat
protocol = ssl.PROTOCOL_SSLv23
ctx = _SSLContext(protocol)
ctx.load_cert_chain(cert_file, pkey_file)
return ctx
class _SSLContext(object):
"""A dummy class with a small subset of Python3's ``ssl.SSLContext``, only
intended to be used with and by Werkzeug."""
def __init__(self, protocol):
self._protocol = protocol
self._certfile = None
self._keyfile = None
self._password = None
def load_cert_chain(self, certfile, keyfile=None, password=None):
self._certfile = certfile
self._keyfile = keyfile or certfile
self._password = password
def wrap_socket(self, sock, **kwargs):
return ssl.wrap_socket(
sock,
keyfile=self._keyfile,
certfile=self._certfile,
ssl_version=self._protocol,
**kwargs
)
def is_ssl_error(error=None):
"""Checks if the given error (or the current one) is an SSL error."""
if error is None:
error = sys.exc_info()[1]
return isinstance(error, ssl.SSLError)
def select_address_family(host, port):
"""Return ``AF_INET4``, ``AF_INET6``, or ``AF_UNIX`` depending on
the host and port."""
# disabled due to problems with current ipv6 implementations
# and various operating systems. Probably this code also is
# not supposed to work, but I can't come up with any other
# ways to implement this.
# try:
# info = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
# socket.SOCK_STREAM, 0,
# socket.AI_PASSIVE)
# if info:
# return info[0][0]
# except socket.gaierror:
# pass
if host.startswith("unix://"):
return socket.AF_UNIX
elif ":" in host and hasattr(socket, "AF_INET6"):
return socket.AF_INET6
return socket.AF_INET
def get_sockaddr(host, port, family):
"""Return a fully qualified socket address that can be passed to
:func:`socket.bind`."""
if family == af_unix:
return host.split("://", 1)[1]
try:
res = socket.getaddrinfo(
host, port, family, socket.SOCK_STREAM, socket.IPPROTO_TCP
)
except socket.gaierror:
return host, port
return res[0][4]
class BaseWSGIServer(HTTPServer, object):
"""Simple single-threaded, single-process WSGI server."""
multithread = False
multiprocess = False
request_queue_size = LISTEN_QUEUE
def __init__(
self,
host,
port,
app,
handler=None,
passthrough_errors=False,
ssl_context=None,
fd=None,
):
if handler is None:
handler = WSGIRequestHandler
self.address_family = select_address_family(host, port)
if fd is not None:
real_sock = socket.fromfd(fd, self.address_family, socket.SOCK_STREAM)
port = 0
server_address = get_sockaddr(host, int(port), self.address_family)
# remove socket file if it already exists
if self.address_family == af_unix and os.path.exists(server_address):
os.unlink(server_address)
HTTPServer.__init__(self, server_address, handler)
self.app = app
self.passthrough_errors = passthrough_errors
self.shutdown_signal = False
self.host = host
self.port = self.socket.getsockname()[1]
# Patch in the original socket.
if fd is not None:
self.socket.close()
self.socket = real_sock
self.server_address = self.socket.getsockname()
if ssl_context is not None:
if isinstance(ssl_context, tuple):
ssl_context = load_ssl_context(*ssl_context)
if ssl_context == "adhoc":
ssl_context = generate_adhoc_ssl_context()
# If we are on Python 2 the return value from socket.fromfd
# is an internal socket object but what we need for ssl wrap
# is the wrapper around it :(
sock = self.socket
if PY2 and not isinstance(sock, socket.socket):
sock = socket.socket(sock.family, sock.type, sock.proto, sock)
self.socket = ssl_context.wrap_socket(sock, server_side=True)
self.ssl_context = ssl_context
else:
self.ssl_context = None
def log(self, type, message, *args):
_log(type, message, *args)
def serve_forever(self):
self.shutdown_signal = False
try:
HTTPServer.serve_forever(self)
except KeyboardInterrupt:
pass
finally:
self.server_close()
def handle_error(self, request, client_address):
if self.passthrough_errors:
raise
# Python 2 still causes a socket.error after the earlier
# handling, so silence it here.
if isinstance(sys.exc_info()[1], _ConnectionError):
return
return HTTPServer.handle_error(self, request, client_address)
def get_request(self):
con, info = self.socket.accept()
return con, info
class ThreadedWSGIServer(ThreadingMixIn, BaseWSGIServer):
"""A WSGI server that does threading."""
multithread = True
daemon_threads = True
class ForkingWSGIServer(ForkingMixIn, BaseWSGIServer):
"""A WSGI server that does forking."""
multiprocess = True
def __init__(
self,
host,
port,
app,
processes=40,
handler=None,
passthrough_errors=False,
ssl_context=None,
fd=None,
):
if not can_fork:
raise ValueError("Your platform does not support forking.")
BaseWSGIServer.__init__(
self, host, port, app, handler, passthrough_errors, ssl_context, fd
)
self.max_children = processes
def make_server(
host=None,
port=None,
app=None,
threaded=False,
processes=1,
request_handler=None,
passthrough_errors=False,
ssl_context=None,
fd=None,
):
"""Create a new server instance that is either threaded, or forks
or just processes one request after another.
"""
if threaded and processes > 1:
raise ValueError("cannot have a multithreaded and multi process server.")
elif threaded:
return ThreadedWSGIServer(
host, port, app, request_handler, passthrough_errors, ssl_context, fd=fd
)
elif processes > 1:
return ForkingWSGIServer(
host,
port,
app,
processes,
request_handler,
passthrough_errors,
ssl_context,
fd=fd,
)
else:
return BaseWSGIServer(
host, port, app, request_handler, passthrough_errors, ssl_context, fd=fd
)
def is_running_from_reloader():
"""Checks if the application is running from within the Werkzeug
reloader subprocess.
.. versionadded:: 0.10
"""
return os.environ.get("WERKZEUG_RUN_MAIN") == "true"
def run_simple(
hostname,
port,
application,
use_reloader=False,
use_debugger=False,
use_evalex=True,
extra_files=None,
reloader_interval=1,
reloader_type="auto",
threaded=False,
processes=1,
request_handler=None,
static_files=None,
passthrough_errors=False,
ssl_context=None,
):
"""Start a WSGI application. Optional features include a reloader,
multithreading and fork support.
This function has a command-line interface too::
python -m werkzeug.serving --help
.. versionadded:: 0.5
`static_files` was added to simplify serving of static files as well
as `passthrough_errors`.
.. versionadded:: 0.6
support for SSL was added.
.. versionadded:: 0.8
Added support for automatically loading a SSL context from certificate
file and private key.
.. versionadded:: 0.9
Added command-line interface.
.. versionadded:: 0.10
Improved the reloader and added support for changing the backend
through the `reloader_type` parameter. See :ref:`reloader`
for more information.
.. versionchanged:: 0.15
Bind to a Unix socket by passing a path that starts with
``unix://`` as the ``hostname``.
:param hostname: The host to bind to, for example ``'localhost'``.
If the value is a path that starts with ``unix://`` it will bind
to a Unix socket instead of a TCP socket..
:param port: The port for the server. eg: ``8080``
:param application: the WSGI application to execute
:param use_reloader: should the server automatically restart the python
process if modules were changed?
:param use_debugger: should the werkzeug debugging system be used?
:param use_evalex: should the exception evaluation feature be enabled?
:param extra_files: a list of files the reloader should watch
additionally to the modules. For example configuration
files.
:param reloader_interval: the interval for the reloader in seconds.
:param reloader_type: the type of reloader to use. The default is
auto detection. Valid values are ``'stat'`` and
``'watchdog'``. See :ref:`reloader` for more
information.
:param threaded: should the process handle each request in a separate
thread?
:param processes: if greater than 1 then handle each request in a new process
up to this maximum number of concurrent processes.
:param request_handler: optional parameter that can be used to replace
the default one. You can use this to replace it
with a different
:class:`~BaseHTTPServer.BaseHTTPRequestHandler`
subclass.
:param static_files: a list or dict of paths for static files. This works
exactly like :class:`SharedDataMiddleware`, it's actually
just wrapping the application in that middleware before
serving.
:param passthrough_errors: set this to `True` to disable the error catching.
This means that the server will die on errors but
it can be useful to hook debuggers in (pdb etc.)
:param ssl_context: an SSL context for the connection. Either an
:class:`ssl.SSLContext`, a tuple in the form
``(cert_file, pkey_file)``, the string ``'adhoc'`` if
the server should automatically create one, or ``None``
to disable SSL (which is the default).
"""
if not isinstance(port, int):
raise TypeError("port must be an integer")
if use_debugger:
from .debug import DebuggedApplication
application = DebuggedApplication(application, use_evalex)
if static_files:
from .middleware.shared_data import SharedDataMiddleware
application = SharedDataMiddleware(application, static_files)
def log_startup(sock):
display_hostname = hostname if hostname not in ("", "*") else "localhost"
quit_msg = "(Press CTRL+C to quit)"
if sock.family == af_unix:
_log("info", " * Running on %s %s", display_hostname, quit_msg)
else:
if ":" in display_hostname:
display_hostname = "[%s]" % display_hostname
port = sock.getsockname()[1]
_log(
"info",
" * Running on %s://%s:%d/ %s",
"http" if ssl_context is None else "https",
display_hostname,
port,
quit_msg,
)
def inner():
try:
fd = int(os.environ["WERKZEUG_SERVER_FD"])
except (LookupError, ValueError):
fd = None
srv = make_server(
hostname,
port,
application,
threaded,
processes,
request_handler,
passthrough_errors,
ssl_context,
fd=fd,
)
if fd is None:
log_startup(srv.socket)
srv.serve_forever()
if use_reloader:
# If we're not running already in the subprocess that is the
# reloader we want to open up a socket early to make sure the
# port is actually available.
if not is_running_from_reloader():
if port == 0 and not can_open_by_fd:
raise ValueError(
"Cannot bind to a random port with enabled "
"reloader if the Python interpreter does "
"not support socket opening by fd."
)
# Create and destroy a socket so that any exceptions are
# raised before we spawn a separate Python interpreter and
# lose this ability.
address_family = select_address_family(hostname, port)
server_address = get_sockaddr(hostname, port, address_family)
s = socket.socket(address_family, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(server_address)
if hasattr(s, "set_inheritable"):
s.set_inheritable(True)
# If we can open the socket by file descriptor, then we can just
# reuse this one and our socket will survive the restarts.
if can_open_by_fd:
os.environ["WERKZEUG_SERVER_FD"] = str(s.fileno())
s.listen(LISTEN_QUEUE)
log_startup(s)
else:
s.close()
if address_family == af_unix:
_log("info", "Unlinking %s" % server_address)
os.unlink(server_address)
# Do not use relative imports, otherwise "python -m werkzeug.serving"
# breaks.
from ._reloader import run_with_reloader
run_with_reloader(inner, extra_files, reloader_interval, reloader_type)
else:
inner()
def run_with_reloader(*args, **kwargs):
# People keep using undocumented APIs. Do not use this function
# please, we do not guarantee that it continues working.
from ._reloader import run_with_reloader
return run_with_reloader(*args, **kwargs)
def main():
"""A simple command-line interface for :py:func:`run_simple`."""
# in contrast to argparse, this works at least under Python < 2.7
import optparse
from .utils import import_string
parser = optparse.OptionParser(usage="Usage: %prog [options] app_module:app_object")
parser.add_option(
"-b",
"--bind",
dest="address",
help="The hostname:port the app should listen on.",
)
parser.add_option(
"-d",
"--debug",
dest="use_debugger",
action="store_true",
default=False,
help="Use Werkzeug's debugger.",
)
parser.add_option(
"-r",
"--reload",
dest="use_reloader",
action="store_true",
default=False,
help="Reload Python process if modules change.",
)
options, args = parser.parse_args()
hostname, port = None, None
if options.address:
address = options.address.split(":")
hostname = address[0]
if len(address) > 1:
port = address[1]
if len(args) != 1:
sys.stdout.write("No application supplied, or too much. See --help\n")
sys.exit(1)
app = import_string(args[0])
run_simple(
hostname=(hostname or "127.0.0.1"),
port=int(port or 5000),
application=app,
use_reloader=options.use_reloader,
use_debugger=options.use_debugger,
)
if __name__ == "__main__":
main()
|
aksinghdce/aksinghdce.github.io
|
students/subjects/Programming/projects/knowledge-tree/server/knowledgetree/lib/python3.6/site-packages/werkzeug/serving.py
|
Python
|
mit
| 38,694
|
""" Interface to the noaa site and grab daily 10.7 cm solar flux measurements
"""
from __future__ import print_function, absolute_import, division
from astropy.io import ascii
import numpy as np
import os
import glob
from ftplib import FTP
import logging
logger = logging.getLogger(__name__)
from astropy.time import Time
from ..utils import remove_if_there
#-------------------------------------------------------------------------------
def grab_solar_files(file_dir):
"""Pull solar data files from NOAA website
Solar data is FTPd from NOAO and written to text files for use in plotting
and monitoring of COS dark-rates and TDS.
Parameters
----------
file_dir : str
Directory to write the files to
"""
logging.debug("Connecting to NOAO site")
ftp = FTP('ftp.swpc.noaa.gov')
ftp.login()
ftp.cwd('/pub/indices/old_indices/')
for item in sorted(ftp.nlst()):
if item.endswith('_DSD.txt'):
year = int(item[:4])
if year >= 2000:
logging.debug('Retrieving: {}'.format(item))
destination = os.path.join(file_dir, item)
ftp.retrbinary('RETR {}'.format(item), open(destination, 'wb').write)
os.chmod(destination, 0o777)
#-------------------------------------------------------------------------------
def compile_txt(file_dir):
""" Pull desired columns from solar data text files
Parameters
----------
file_dir : str
Returns
-------
date : np.ndarray
mjd of each measurements
flux : np.ndarray
solar flux measurements
"""
date = []
flux = []
input_list = glob.glob(os.path.join(file_dir, '*DSD.txt'))
input_list.sort()
for item in input_list:
logging.debug('Reading {}'.format(item))
#-- clean up Q4 files when year-long file exists
if ('Q4_' in item) and os.path.exists(item.replace('Q4_', '_')):
logger.debug("Removing duplicate observations: {}".format(item))
os.remove(item)
continue
data = ascii.read(item, data_start=1, comment='[#,:]')
for line in data:
line_date = Time('{}-{}-{} 00:00:00'.format(line['col1'],
line['col2'],
line['col3']),
scale='utc', format='iso').mjd
line_flux = line[3]
if line_flux > 0:
date.append(line_date)
flux.append(line_flux)
return np.array(date), np.array(flux)
#-------------------------------------------------------------------------------
def get_solar_data(file_dir):
""" Compile the necessary solar data from NOAA
Parameters
----------
file_dir : str
directory containing retrieved solar data txt files
Outputs
-------
solar_flux.txt :
txt file containing mjd,flux of solar measurements
"""
logger.info('GETTING SOLAR FLUX DATA')
for txtfile in glob.glob(os.path.join(file_dir, '*_D?D.txt')):
logger.debug("Removing old file: {}".format(txtfile))
os.remove(txtfile)
grab_solar_files(file_dir)
date, flux = compile_txt(file_dir)
out_solar_file = os.path.join(file_dir, 'solar_flux.txt')
remove_if_there(out_solar_file)
with open(out_solar_file, 'w') as outfile:
for d, f in zip(date, flux):
outfile.write('%4.5f %d\n' % (d, f))
os.chmod(out_solar_file, 0o777)
#-------------------------------------------------------------------------------
|
mfixstsci/peewee4cosmo
|
cosmo_peewee/dark/solar.py
|
Python
|
bsd-3-clause
| 3,644
|
from flask import render_template, Blueprint, request, redirect, session
from flask_login import login_required, logout_user
main_blueprint = Blueprint("main", __name__)
@main_blueprint.route('/')
def index():
return render_template('index.html')
@main_blueprint.route('/about')
def about():
return render_template('about.html')
@main_blueprint.route('/get-involved')
def get_involved():
return render_template('get-involved.html')
@main_blueprint.route("/settings")
@login_required
def settings():
pass
@main_blueprint.route("/logout")
@login_required
def logout():
session.pop('twitter_oauth', None)
logout_user()
return redirect(request.referrer)
|
karissa/papertalk
|
papertalk/views/main.py
|
Python
|
mit
| 679
|
# -*- coding:utf-8 -*-
query = 'https://en.wikipedia.org/api/rest_v1/page/'
response = r"""{"items":["data-parsoid","graph","html","mobile-sections","mobile-sections-lead","mobile-sections-remaining","pdf","random","related","revision","segments","summary","title","wikitext"]}"""
info = {'content-type': 'TEST', 'status': 200}
cache = {'query': query, 'response': response, 'info': info}
|
siznax/wptools
|
tests/rest.py
|
Python
|
mit
| 392
|
# -*- coding: utf-8 -*-
#
# quickstart - Refreshing the GUI world.
# Copyright (C) 2013 Eugenio "g7" Paolantonio
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
import gettext
import locale
class Translation:
""" Translation() is the class that handles translations. """
def __init__(self, domain, localedir=None, languages=None):
""" Initializes the Translation class. """
self.domain = domain
self.localedir = localedir
self.languages = languages
self.load()
def load(self, install=False):
self.__gettext = gettext.translation(self.domain, self.localedir, self.languages, fallback=True)
self._ = self.__gettext.gettext
def install(self):
""" Installs the _() function into the builtins namespace. """
self.__gettext.install()
def change_locale(self, newlanguages, install=True):
""" Changes the current locale with a new one, specified in newlanguages. """
self.languages = newlanguages
self.load()
if install: self.install()
def bind_also_locale(self):
""" Properly binds also the 'locale' module. Use this if you want to make glade UI files translatable. """
locale.bindtextdomain(self.domain, self.localedir)
locale.textdomain(self.domain)
|
semplice/quickstart
|
quickstart/translations.py
|
Python
|
lgpl-2.1
| 1,902
|
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Contact: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import absolute_import, division, unicode_literals
import argparse as _argparse
import os
import sys
import tempfile
from mo_dots import coalesce, listwrap, unwrap, to_data
from mo_logs import Log
# PARAMETERS MATCH argparse.ArgumentParser.add_argument()
# https://docs.python.org/dev/library/argparse.html#the-add-argument-method
#
# name or flags - Either a name or a list of option strings, e.g. foo or -f, --foo.
# action - The basic type of action to be taken when this argument is encountered at the command line.
# nargs - The number of command-line arguments that should be consumed.
# const - A constant value required by some action and nargs selections.
# default - The value produced if the argument is absent from the command line.
# type - The type to which the command-line argument should be converted.
# choices - A container of the allowable values for the argument.
# required - Whether or not the command-line option may be omitted (optionals only).
# help - A brief description of what the argument does.
# metavar - A name for the argument in usage messages.
# dest - The name of the attribute to be added to the object returned by parse_args().
class _ArgParser(_argparse.ArgumentParser):
def error(self, message):
Log.error("argparse error: {{error}}", error=message)
def argparse(defs, complain=True):
parser = _ArgParser()
for d in listwrap(defs):
args = d.copy()
name = args.name
args.name = None
parser.add_argument(*unwrap(listwrap(name)), **args)
namespace, unknown = parser.parse_known_args()
if unknown and complain:
Log.warning("Ignoring arguments: {{unknown|json}}", unknown=unknown)
output = {k: getattr(namespace, k) for k in vars(namespace)}
return to_data(output)
def read_settings(defs=None, filename=None, default_filename=None, complain=True):
"""
:param filename: Force load a file
:param defs: more arguments you want to accept (see https://docs.python.org/3/library/argparse.html#argparse.ArgumentParser.add_argument)
:param default_filename: A config file from an environment variable (a fallback config file, if no other provided)
:parma complain: Complain about args mismatch
"""
from mo_files import File
import mo_json_config
# READ SETTINGS
defs = listwrap(defs)
defs.append({
"name": ["--config", "--settings", "--settings-file", "--settings_file"],
"help": "path to JSON file with settings",
"type": str,
"dest": "filename",
"default": None,
"required": False,
})
args = argparse(defs, complain)
args.filename = coalesce(
filename,
args.filename if args.filename.endswith(".json") else None,
default_filename,
"./config.json",
)
settings_file = File(args.filename)
if settings_file.exists:
Log.note("Using {{filename}} for configuration", filename=settings_file.abspath)
else:
Log.error(
"Can not read configuration file {{filename}}",
filename=settings_file.abspath,
)
settings = mo_json_config.get_file(settings_file)
settings.args = args
return settings
# snagged from https://github.com/pycontribs/tendo/blob/master/tendo/singleton.py (under licence PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2)
class SingleInstance:
"""
ONLY ONE INSTANCE OF PROGRAM ALLOWED
If you want to prevent your script from running in parallel just instantiate SingleInstance() class.
If is there another instance already running it will exist the application with the message
"Another instance is already running, quitting.", returning -1 error code.
with SingleInstance():
<your code here>
settings = startup.read_settings()
with SingleInstance(settings.args.filename):
<your code here>
This option is very useful if you have scripts executed by crontab at small intervals, causing multiple instances
Remember that this works by creating a lock file with a filename based on the full path to the script file.
"""
def __init__(self, flavor_id=""):
self.initialized = False
appname = os.path.splitext(os.path.abspath(sys.argv[0]))[0]
basename = ((appname + "-%s") % flavor_id).replace("/", "-").replace(
":", ""
).replace("\\", "-").replace("-.-", "-") + ".lock"
self.lockfile = os.path.normpath(tempfile.gettempdir() + "/" + basename)
def __enter__(self):
Log.note("SingleInstance.lockfile = " + self.lockfile)
if sys.platform == "win32":
try:
# file already exists, we try to remove (in case previous execution was interrupted)
if os.path.exists(self.lockfile):
os.unlink(self.lockfile)
self.fd = os.open(self.lockfile, os.O_CREAT | os.O_EXCL | os.O_RDWR)
except Exception as e:
Log.alarm("Another instance is already running, quitting.")
sys.exit(-1)
else: # non Windows
import fcntl
self.fp = open(self.lockfile, "w")
try:
fcntl.lockf(self.fp, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
Log.alarm("Another instance is already running, quitting.")
sys.exit(-1)
self.initialized = True
def __exit__(self, type, value, traceback):
self.__del__()
def __del__(self):
temp, self.initialized = self.initialized, False
if not temp:
return
try:
if sys.platform == "win32":
if hasattr(self, "fd"):
os.close(self.fd)
os.unlink(self.lockfile)
else:
import fcntl
fcntl.lockf(self.fp, fcntl.LOCK_UN)
if os.path.isfile(self.lockfile):
os.unlink(self.lockfile)
except Exception as e:
Log.warning("Problem with SingleInstance __del__()", e)
sys.exit(-1)
|
klahnakoski/ActiveData
|
vendor/mo_logs/startup.py
|
Python
|
mpl-2.0
| 6,384
|
import ctypes as ct
class Box(ct.Structure):
_fields_ = [
('tid', ct.c_size_t),
('ptr', ct.c_void_p),
('deleter', ct.CFUNCTYPE(None, ct.c_void_p)),
]
class TypeSystem(ct.Structure):
_fields_ = [
('type_counter', ct.c_size_t),
('add_type', ct.CFUNCTYPE(ct.c_size_t, ct.c_size_t, ct.c_char_p, ct.POINTER(ct.c_size_t), ct.c_size_t)),
('add_caller', ct.CFUNCTYPE(None, ct.c_size_t, Box)),
('add_callback', ct.CFUNCTYPE(None, ct.c_size_t, ct.c_size_t)),
('pre_init', ct.CFUNCTYPE(None)),
('post_init', ct.CFUNCTYPE(None)),
('add_box', ct.CFUNCTYPE(None, ct.py_object, ct.c_char_p, Box)),
('import_func', ct.CFUNCTYPE(None, ct.c_char_p, ct.c_char_p, ct.c_size_t, ct.POINTER(Box))),
]
|
drufat/pybindcpp
|
pybindcpp/api.py
|
Python
|
gpl-3.0
| 790
|
#!/usr/bin/env python
"""
crate_anon/crateweb/core/management/commands/runcpserver.py
===============================================================================
Copyright (C) 2015-2021 Rudolf Cardinal (rudolf@pobox.com).
This file is part of CRATE.
CRATE is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CRATE is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CRATE. If not, see <https://www.gnu.org/licenses/>.
===============================================================================
**Django management command framework for CherryPy.**
- Based on https://lincolnloop.com/blog/2008/mar/25/serving-django-cherrypy/
- Idea and code snippets borrowed from
http://www.xhtml.net/scripts/Django-CherryPy-server-DjangoCerise
- Adapted to run as a management command.
- Some bugs fixed by RNC.
- Then rewritten by RNC.
- Then modified to serve CRATE, with static files, etc.
- Then daemonizing code removed: https://code.djangoproject.com/ticket/4996
TEST COMMAND:
.. code-block:: bash
./manage.py runcpserver --port 8080 --ssl_certificate /etc/ssl/certs/ssl-cert-snakeoil.pem --ssl_private_key /etc/ssl/private/ssl-cert-snakeoil.key
""" # noqa
from argparse import ArgumentParser, Namespace
import logging
from typing import Any
# import errno
# import os
# import signal
# import time
# try:
# import grp
# import pwd
# unix = True
# except ImportError:
# grp = None
# pwd = None
# unix = False
import cherrypy
from django.conf import settings
from django.core.management.base import BaseCommand
from django.utils import translation
from crate_anon.common.constants import EnvVar
from crate_anon.crateweb.config.wsgi import application as wsgi_application
# COULD ALSO USE:
# from django.core.handlers.wsgi import WSGIHandler
# wsgi_application = WSGIHandler()
log = logging.getLogger(__name__)
CRATE_STATIC_URL_PATH = settings.STATIC_URL.rstrip('/')
NEED_UNIX = "Need UNIX for group/user operations"
if EnvVar.GENERATING_CRATE_DOCS:
DEFAULT_ROOT = "/crate/root/path/"
else:
DEFAULT_ROOT = settings.FORCE_SCRIPT_NAME
class Command(BaseCommand):
"""
Django management command to run this project in a CherryPy web server.
"""
help = ("Run this project in a CherryPy webserver. To do this, "
"CherryPy is required (pip install cherrypy).")
def add_arguments(self, parser: ArgumentParser) -> None:
# docstring in superclass
parser.add_argument(
'--host', type=str, default="127.0.0.1",
help="hostname to listen on (default: 127.0.0.1)")
parser.add_argument(
'--port', type=int, default=8088,
help="port to listen on (default: 8088)")
parser.add_argument(
"--server_name", type=str, default="localhost",
help="CherryPy's SERVER_NAME environ entry (default: localhost)")
# parser.add_argument(
# "--daemonize", action="store_true",
# help="whether to detach from terminal (default: False)")
# parser.add_argument(
# "--pidfile", type=str,
# help="write the spawned process ID to this file")
# parser.add_argument(
# "--workdir", type=str,
# help="change to this directory when daemonizing")
parser.add_argument(
"--threads", type=int, default=10,
help="Number of threads for server to use (default: 10)")
parser.add_argument(
"--ssl_certificate", type=str,
help="SSL certificate file "
"(e.g. /etc/ssl/certs/ssl-cert-snakeoil.pem)")
parser.add_argument(
"--ssl_private_key", type=str,
help="SSL private key file "
"(e.g. /etc/ssl/private/ssl-cert-snakeoil.key)")
# parser.add_argument(
# "--server_user", type=str, default="www-data",
# help="user to run daemonized process (default: www-data)")
# parser.add_argument(
# "--server_group", type=str, default="www-data",
# help="group to run daemonized process (default: www-data)")
parser.add_argument(
"--log_screen", dest="log_screen", action="store_true",
help="log access requests etc. to terminal (default)")
parser.add_argument(
"--no_log_screen", dest="log_screen", action="store_false",
help="don't log access requests etc. to terminal")
parser.add_argument(
"--debug_static", action="store_true",
help="show debug info for static file requests")
parser.add_argument(
"--root_path", type=str, default=DEFAULT_ROOT,
help=f"Root path to serve CRATE at. Default: {DEFAULT_ROOT}")
parser.set_defaults(log_screen=True)
# parser.add_argument(
# "--stop", action="store_true",
# help="stop server")
def handle(self, *args: str, **options: Any) -> None:
# docstring in superclass
opts = Namespace(**options)
# Activate the current language, because it won't get activated later.
try:
translation.activate(settings.LANGUAGE_CODE)
except AttributeError:
pass
# noinspection PyTypeChecker
runcpserver(opts)
# def change_uid_gid(uid, gid=None):
# """Try to change UID and GID to the provided values.
# UID and GID are given as names like 'nobody' not integer.
#
# Src: http://mail.mems-exchange.org/durusmail/quixote-users/4940/1/
# """
# if not unix:
# raise OSError(NEED_UNIX)
# if not os.geteuid() == 0:
# # Do not try to change the gid/uid if not root.
# return
# (uid, gid) = get_uid_gid(uid, gid)
# os.setgid(gid)
# os.setuid(uid)
# def get_uid_gid(uid, gid=None):
# """Try to change UID and GID to the provided values.
# UID and GID are given as names like 'nobody' not integer.
#
# Src: http://mail.mems-exchange.org/durusmail/quixote-users/4940/1/
# """
# if not unix:
# raise OSError(NEED_UNIX)
# uid, default_grp = pwd.getpwnam(uid)[2:4]
# if gid is None:
# gid = default_grp
# else:
# try:
# gid = grp.getgrnam(gid)[2]
# except KeyError:
# gid = default_grp
# return uid, gid
# def still_alive(pid):
# """
# Poll for process with given pid up to 10 times waiting .25 seconds in
# between each poll.
# Returns False if the process no longer exists otherwise, True.
# """
# for n in range(10):
# time.sleep(0.25)
# try:
# # poll the process state
# os.kill(pid, 0)
# except OSError as e:
# if e[0] == errno.ESRCH:
# # process has died
# return False
# else:
# raise # TODO
# return True
# def stop_server(pidfile):
# """
# Stop process whose pid was written to supplied pidfile.
# First try SIGTERM and if it fails, SIGKILL.
# If process is still running, an exception is raised.
# """
# if os.path.exists(pidfile):
# pid = int(open(pidfile).read())
# try:
# os.kill(pid, signal.SIGTERM)
# except OSError: # process does not exist
# os.remove(pidfile)
# return
# if still_alive(pid):
# # process didn't exit cleanly, make one last effort to kill it
# os.kill(pid, signal.SIGKILL)
# if still_alive(pid):
# raise OSError(f"Process {pid} did not stop.")
# os.remove(pidfile)
class Missing(object):
"""
CherryPy "application" that is a basic web interface to say "not here".
"""
config = {
'/': {
# Anything so as to prevent complaints about an empty config.
'tools.sessions.on': False,
}
}
@cherrypy.expose
def index(self) -> str:
return (
"[CRATE CherryPy server says:] "
"Nothing to see here. Wrong URL path. "
"(If you are sure it's right, has the server administrator "
"set the 'root_path' option correctly?)"
)
# noinspection PyUnresolvedReferences
def start_server(host: str,
port: int,
threads: int,
server_name: str,
root_path: str,
log_screen: bool,
ssl_certificate: str,
ssl_private_key: str,
debug_static: bool) -> None:
"""
Start CherryPy server.
Args:
host: hostname to listen on (e.g. ``127.0.0.1``)
port: port number to listen on
threads: number of threads to use in the thread pool
server_name: CherryPy SERVER_NAME environment variable (e.g.
``localhost``)
root_path: root path to mount server at
log_screen: show log to console?
ssl_certificate: optional filename of an SSL certificate
ssl_private_key: optional filename of an SSL private key
debug_static: show debug info for static requests?
"""
# if daemonize and server_user and server_group:
# # ensure the that the daemon runs as specified user
# change_uid_gid(server_user, server_group)
cherrypy.config.update({
'server.socket_host': host,
'server.socket_port': port,
'server.thread_pool': threads,
'server.server_name': server_name,
'server.log_screen': log_screen,
})
if ssl_certificate and ssl_private_key:
cherrypy.config.update({
'server.ssl_module': 'builtin',
'server.ssl_certificate': ssl_certificate,
'server.ssl_private_key': ssl_private_key,
})
log.info(f"Starting on host: {host}")
log.info(f"Starting on port: {port}")
log.info(f"Static files will be served from filesystem path: "
f"{settings.STATIC_ROOT}")
log.info(f"Static files will be served at URL path: "
f"{CRATE_STATIC_URL_PATH}")
log.info(f"CRATE will be at: {root_path}")
log.info(f"Thread pool size: {threads}")
static_config = {
'/': {
'tools.staticdir.root': settings.STATIC_ROOT,
'tools.staticdir.debug': debug_static,
},
CRATE_STATIC_URL_PATH: {
'tools.staticdir.on': True,
'tools.staticdir.dir': '',
},
}
cherrypy.tree.mount(Missing(), '', config=static_config)
cherrypy.tree.graft(wsgi_application, root_path)
# noinspection PyBroadException,PyPep8
try:
cherrypy.engine.start()
cherrypy.engine.block()
except Exception:
# 2017-03-13: shouldn't restrict to KeyboardInterrupt!
cherrypy.engine.stop()
def runcpserver(opts: Namespace) -> None:
"""
Launch the CherryPy server using arguments from an
:class:`argparse.Namespace`.
Args:
opts: the command-line :class:`argparse.Namespace`
"""
# if opts.stop:
# if not opts.pidfile:
# raise ValueError("Must specify --pidfile to use --stop")
# print('stopping server')
# stop_server(opts.pidfile)
# return True
# if opts.daemonize:
# if not opts.pidfile:
# opts.pidfile = f'/var/run/cpserver_{opts.port}.pid'
# stop_server(opts.pidfile)
#
# if opts.workdir:
# become_daemon(our_home_dir=opts.workdir)
# else:
# become_daemon()
#
# fp = open(opts.pidfile, 'w')
# fp.write(f"{os.getpid()}\n")
# fp.close()
# Start the webserver
log.info(f'starting server with options {opts}')
start_server(
host=opts.host,
port=opts.port,
threads=opts.threads,
server_name=opts.server_name,
root_path=opts.root_path,
log_screen=opts.log_screen,
ssl_certificate=opts.ssl_certificate,
ssl_private_key=opts.ssl_private_key,
debug_static=opts.debug_static,
)
def main() -> None:
"""
Command-line entry point (not typically used directly).
"""
command = Command()
parser = ArgumentParser()
command.add_arguments(parser)
cmdargs = parser.parse_args()
runcpserver(cmdargs)
if __name__ == '__main__':
main()
|
RudolfCardinal/crate
|
crate_anon/crateweb/core/management/commands/runcpserver.py
|
Python
|
gpl-3.0
| 12,825
|
#code created by NamanNimmo Gera
#12:42pm, April 10, 2019.
from itertools import permutations
perm = permutations([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
for count, item in enumerate(perm):
#to find the millionth permutation
if count == 999999:
tup_join = item;
str_join = "".join(str(x) for x in tup_join)
print(str_join);
break;
|
DestructHub/ProjectEuler
|
Problem024/Python/solution_2.py
|
Python
|
mit
| 385
|
# -*- coding: utf-8 -*-
#
#
# Copyright 2015 Camptocamp SA
# Author: Yannick Vaucher
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from openerp import models, fields, api
class SaleOrderLine(models.Model):
_inherit = 'sale.order.line'
sourcing_method = fields.Selection(
selection_add=[('donation', "In-kind Donation")],
compute='_get_sourcing_method',
related=False
)
# Limited selection for view
sourcing_method_donation = fields.Selection(
selection=[('donation', "In-kind Donation"),
('other', "Other")],
string="Sourcing Method",
default="donation",
)
# field for visibility on view
order_type = fields.Selection(
related='order_id.order_type',
selection=[
('standard', 'Standard'),
('cost_estimate_only', 'Cost Estimate Only'),
('donation', 'In-Kind Donation')
]
)
@api.one
@api.depends('lr_source_id.sourcing_method',
'order_id.order_type',
'sourcing_method_donation')
def _get_sourcing_method(self):
"""Compute value of sourcing_method
Sourcing method is related of lr_source_id.sourcing_method
Unless lr_source_id is not set
In case of donation it takes the value from sourcing_method_donation
Otherwise it is 'Other'
"""
method = 'other'
if self.lr_source_id:
method = self.lr_source_id.sourcing_method
elif self.order_id.order_type == 'donation':
method = self.sourcing_method_donation
self.sourcing_method = method
|
jorsea/vertical-ngo
|
logistic_order_requisition_donation/model/sale_order.py
|
Python
|
agpl-3.0
| 2,298
|
class SpecExecutor:
def __init__(self, execution_framework) -> None:
super().__init__()
self.execution_framework = execution_framework
def execute(self, specs):
suite = self.execution_framework.create_suite()
for spec_metadata in specs:
spec_class_module = spec_metadata.owning_module
for spec_feature in spec_metadata.features:
self.execution_framework.append_test(suite, spec_class_module(spec_feature))
return self.execution_framework.run(suite)
|
Luftzig/nimoy
|
nimoy/runner/spec_executor.py
|
Python
|
apache-2.0
| 541
|
import pytest
from api.base.settings.defaults import API_BASE
from osf_tests.factories import (
NodeFactory,
ProjectFactory,
RegistrationFactory,
AuthUserFactory,
PrivateLinkFactory,
)
@pytest.fixture()
def user():
return AuthUserFactory()
@pytest.fixture()
def registration_with_children(user):
project = ProjectFactory(creator=user)
NodeFactory(parent=project, creator=user)
NodeFactory(parent=project, creator=user)
return RegistrationFactory(
project=project
)
@pytest.fixture()
def registration_with_children_url(registration_with_children):
return '/{}registrations/{}/children/'.format(
API_BASE,
registration_with_children._id,
)
@pytest.fixture()
def view_only_link(registration_with_children):
view_only_link = PrivateLinkFactory(name='testlink')
view_only_link.nodes.add(registration_with_children)
view_only_link.save()
return view_only_link
@pytest.fixture()
def registration_with_children_approved(user, registration_with_children):
registration_with_children._initiate_approval(user)
approval_token = registration_with_children.registration_approval.approval_state[user._id]['approval_token']
registration_with_children.registration_approval.approve(user, approval_token)
return registration_with_children
@pytest.fixture()
def registration_with_children_approved_url(registration_with_children_approved):
return '/{}registrations/{}/children/'.format(
API_BASE,
registration_with_children_approved._id,
)
@pytest.mark.django_db
class TestRegistrationsChildrenList:
def test_registrations_children_list(self, user, app, registration_with_children, registration_with_children_url):
component_one, component_two = registration_with_children.nodes
res = app.get(registration_with_children_url, auth=user.auth)
ids = [node['id'] for node in res.json['data']]
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert component_one._id in ids
assert component_two._id in ids
def test_return_registrations_list_no_auth_approved(self, user, app, registration_with_children_approved, registration_with_children_approved_url):
component_one, component_two = registration_with_children_approved.nodes
res = app.get(registration_with_children_approved_url)
ids = [node['id'] for node in res.json['data']]
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert component_one._id in ids
assert component_two._id in ids
def test_registrations_list_no_auth_unapproved(self, user, app, registration_with_children, registration_with_children_url):
res = app.get(registration_with_children_url, expect_errors=True)
assert res.status_code == 401
assert res.content_type == 'application/vnd.api+json'
def test_registration_children_no_auth_vol(self, user, app, registration_with_children,
registration_with_children_url, view_only_link):
# viewed through private link
component_one, component_two = registration_with_children.nodes
# get registration related_counts with vol before vol is attached to components
node_url = '/{}registrations/{}/?related_counts=children&view_only={}'.format(API_BASE,
registration_with_children._id, view_only_link.key)
res = app.get(node_url)
assert res.json['data']['relationships']['children']['links']['related']['meta']['count'] == 0
# view only link is not attached to components
view_only_link_url = '{}?view_only={}'.format(registration_with_children_url, view_only_link.key)
res = app.get(view_only_link_url)
ids = [node['id'] for node in res.json['data']]
assert res.status_code == 200
assert len(ids) == 0
assert component_one._id not in ids
assert component_two._id not in ids
# view only link now attached to components
view_only_link.nodes.add(component_one)
view_only_link.nodes.add(component_two)
res = app.get(view_only_link_url)
ids = [node['id'] for node in res.json['data']]
assert res.status_code == 200
assert component_one._id in ids
assert component_two._id in ids
# get registration related_counts with vol once vol is attached to components
res = app.get(node_url)
assert res.json['data']['relationships']['children']['links']['related']['meta']['count'] == 2
# make private vol anonymous
view_only_link.anonymous = True
view_only_link.save()
res = app.get(view_only_link_url)
assert 'contributors' not in res.json['data'][0]['relationships']
# delete vol
view_only_link.is_deleted = True
view_only_link.save()
res = app.get(view_only_link_url, expect_errors=True)
assert res.status_code == 401
@pytest.mark.django_db
class TestRegistrationChildrenListFiltering:
def test_registration_child_filtering(self, app, user, registration_with_children):
component_one, component_two = registration_with_children.nodes
url = '/{}registrations/{}/children/?filter[title]={}'.format(
API_BASE,
registration_with_children._id,
component_one.title
)
res = app.get(url, auth=user.auth)
ids = [node['id'] for node in res.json['data']]
assert component_one._id in ids
assert component_two._id not in ids
|
pattisdr/osf.io
|
api_tests/registrations/views/test_registrations_childrens_list.py
|
Python
|
apache-2.0
| 5,625
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'EventGoal'
db.create_table('events_eventgoal', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=127)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=127, blank=True)),
('active', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal('events', ['EventGoal'])
# Adding M2M table for field goals on 'Event'
db.create_table('events_event_goals', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('event', models.ForeignKey(orm['events.event'], null=False)),
('eventgoal', models.ForeignKey(orm['events.eventgoal'], null=False))
))
db.create_unique('events_event_goals', ['event_id', 'eventgoal_id'])
def backwards(self, orm):
# Deleting model 'EventGoal'
db.delete_table('events_eventgoal')
# Removing M2M table for field goals on 'Event'
db.delete_table('events_event_goals')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'events.attendance': {
'Meta': {'object_name': 'Attendance'},
'date_subscribed': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['events.Event']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'events.event': {
'Meta': {'ordering': "['start']", 'object_name': 'Event'},
'attendees': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'events_attended'", 'symmetrical': 'False', 'through': "orm['events.Attendance']", 'to': "orm['auth.User']"}),
'budget_bug': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'event_budget_requests'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['remozilla.Bug']"}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'events_categories'", 'symmetrical': 'False', 'to': "orm['profiles.FunctionalArea']"}),
'city': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50'}),
'converted_visitors': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'description': ('django.db.models.fields.TextField', [], {}),
'end': ('django.db.models.fields.DateTimeField', [], {}),
'estimated_attendance': ('django.db.models.fields.PositiveIntegerField', [], {}),
'external_link': ('django.db.models.fields.URLField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'extra_content': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'goals': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'events_goals'", 'symmetrical': 'False', 'to': "orm['events.EventGoal']"}),
'hashtag': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lat': ('django.db.models.fields.FloatField', [], {}),
'lon': ('django.db.models.fields.FloatField', [], {}),
'mozilla_event': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'events_created'", 'to': "orm['auth.User']"}),
'planning_pad_url': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'}),
'region': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '100', 'blank': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {}),
'swag_bug': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'event_swag_requests'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['remozilla.Bug']"}),
'times_edited': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'timezone': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'venue': ('django.db.models.fields.CharField', [], {'max_length': '150'})
},
'events.eventcomment': {
'Meta': {'ordering': "['id']", 'object_name': 'EventComment'},
'comment': ('django.db.models.fields.TextField', [], {}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['events.Event']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'events.eventgoal': {
'Meta': {'ordering': "['name']", 'object_name': 'EventGoal'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '127'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '127', 'blank': 'True'})
},
'events.metric': {
'Meta': {'object_name': 'Metric'},
'event': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'metrics'", 'to': "orm['events.Event']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'outcome': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
'profiles.functionalarea': {
'Meta': {'ordering': "['name']", 'object_name': 'FunctionalArea'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '100', 'blank': 'True'})
},
'remozilla.bug': {
'Meta': {'ordering': "['-bug_last_change_time']", 'object_name': 'Bug'},
'assigned_to': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'bugs_assigned'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'bug_creation_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'bug_id': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True'}),
'bug_last_change_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'cc': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'bugs_cced'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'component': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'council_vote_requested': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'bugs_created'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'first_comment': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'resolution': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '30'}),
'status': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '30'}),
'summary': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '500'}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'whiteboard': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '500'})
}
}
complete_apps = ['events']
|
chirilo/remo
|
remo/events/migrations/0012_auto__add_eventgoal.py
|
Python
|
bsd-3-clause
| 12,466
|
#!/usr/bin/python2
#
# Copyright 2019 The ANGLE Project Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# trigger.py:
# Helper script for triggering GPU tests on swarming.
import argparse
import json
import hashlib
import logging
import os
import re
import subprocess
import sys
# This is the same as the trybots.
DEFAULT_TASK_PRIORITY = 30
def parse_args():
parser = argparse.ArgumentParser(os.path.basename(sys.argv[0]))
parser.add_argument('gn_path', help='path to GN. (e.g. out/Release)')
parser.add_argument('test', help='test name. (e.g. angle_end2end_tests)')
parser.add_argument('os_dim', help='OS dimension. (e.g. Windows-10)')
parser.add_argument('-s', '--shards', default=1, help='number of shards', type=int)
parser.add_argument('-p', '--pool', default='Chrome-GPU', help='swarming pool')
parser.add_argument('-g', '--gpu', help='GPU dimension. (e.g. intel-hd-630-win10-stable)')
parser.add_argument('-t', '--device-type', help='Android device type (e.g. bullhead)')
parser.add_argument('-o', '--device-os', help='Android OS.')
parser.add_argument('-v', '--verbose', action='store_true', help='Verbose logging.')
parser.add_argument(
'--priority',
help='Task priority. Default is %s. Use judiciously.' % DEFAULT_TASK_PRIORITY,
default=DEFAULT_TASK_PRIORITY)
return parser.parse_known_args()
def invoke_mb(args):
mb_script_path = os.path.join('tools', 'mb', 'mb.py')
mb_args = ['python', mb_script_path] + args
# Attempt to detect standalone vs chromium component build.
is_standalone = not os.path.isdir(os.path.join('third_party', 'angle'))
if is_standalone:
logging.info('Standalone mode detected.')
mb_args += ['-i', os.path.join('infra', 'gn_isolate_map.pyl')]
logging.info('Invoking mb: %s' % ' '.join(mb_args))
return subprocess.check_output(mb_args)
def main():
args, unknown = parse_args()
if args.verbose:
logging.basicConfig(level='INFO')
path = args.gn_path.replace('\\', '/')
out_gn_path = '//' + path
out_file_path = os.path.join(*path.split('/'))
get_command_output = invoke_mb(['get-swarming-command', out_gn_path, args.test, '--as-list'])
swarming_cmd = json.loads(get_command_output)
logging.info('Swarming command: %s' % ' '.join(swarming_cmd))
invoke_mb(['isolate', out_gn_path, args.test])
isolate_cmd_path = os.path.join('tools', 'luci-go', 'isolate')
isolate_file = os.path.join(out_file_path, '%s.isolate' % args.test)
archive_file = os.path.join(out_file_path, '%s.archive.json' % args.test)
isolate_args = [
isolate_cmd_path, 'archive', '-i', isolate_file, '-cas-instance', 'chromium-swarm',
'-dump-json', archive_file
]
logging.info('Invoking isolate: %s' % ' '.join(isolate_args))
subprocess.check_call(isolate_args)
with open(archive_file) as f:
digest = json.load(f).get(args.test)
logging.info('Got an CAS digest %s' % digest)
swarming_script_path = os.path.join('tools', 'luci-go', 'swarming')
swarming_args = [
swarming_script_path, 'trigger', '-S', 'chromium-swarm.appspot.com', '-d',
'os=' + args.os_dim, '-d', 'pool=' + args.pool, '-digest', digest
]
# Set priority. Don't abuse this!
swarming_args += ['-priority', str(args.priority)]
# Define a user tag.
try:
whoami = subprocess.check_output(['whoami'])
# Strip extra stuff (e.g. on Windows we are 'hostname\username')
whoami = re.sub(r'\w+[^\w]', '', whoami.strip())
swarming_args += ['-user', whoami]
except:
pass
if args.gpu:
swarming_args += ['-d', 'gpu=' + args.gpu]
if args.device_type:
swarming_args += ['-d', 'device_type=' + args.device_type]
if args.device_os:
swarming_args += ['-d', 'device_os=' + args.device_os]
cmd_args = ['-relative-cwd', args.gn_path, '-raw-cmd', '--'] + swarming_cmd
if unknown:
cmd_args += unknown
if args.shards > 1:
for i in range(args.shards):
shard_args = swarming_args[:]
shard_args.extend([
'--env',
'GTEST_TOTAL_SHARDS=%d' % args.shards,
'--env',
'GTEST_SHARD_INDEX=%d' % i,
])
shard_args += cmd_args
logging.info('Invoking swarming: %s' % ' '.join(shard_args))
subprocess.call(shard_args)
else:
swarming_args += cmd_args
logging.info('Invoking swarming: %s' % ' '.join(swarming_args))
subprocess.call(swarming_args)
return 0
if __name__ == '__main__':
sys.exit(main())
|
ppy/angle
|
scripts/trigger.py
|
Python
|
bsd-3-clause
| 4,776
|
import math
from PySide import QtCore
from guide import Guide
PI2 = 2 * math.pi
class GuideCircle(Guide):
CW = 1
CCW = -1
def __init__(self, rect, startAngle=0.0, span=360.0, dir=CCW, follows=None):
super(GuideCircle, self).__init__(follows)
self.radiusX = rect.width() / 2.0
self.radiusY = rect.height() / 2.0
self.posX = rect.topLeft().x()
self.posY = rect.topLeft().y()
self.spanRad = span * PI2 / -360.0
if dir == GuideCircle.CCW:
self.startAngleRad = startAngle * PI2 / -360.0
self.endAngleRad = self.startAngleRad + self.spanRad
self.stepAngleRad = self.spanRad / self.length()
else:
self.startAngleRad = self.spanRad + (startAngle * PI2 / -360.0)
self.endAngleRad = startAngle * PI2 / -360.0
self.stepAngleRad = -self.spanRad / self.length()
def length(self):
return abs(self.radiusX * self.spanRad)
def startPos(self):
return QtCore.QPointF((self.posX + self.radiusX + self.radiusX * math.cos(self.startAngleRad)) * self.scaleX,
(self.posY + self.radiusY + self.radiusY * math.sin(self.startAngleRad)) * self.scaleY)
def endPos(self):
return QtCore.QPointF((self.posX + self.radiusX + self.radiusX * math.cos(self.endAngleRad)) * self.scaleX,
(self.posY + self.radiusY + self.radiusY * math.sin(self.endAngleRad)) * self.scaleY)
def guide(self, item, moveSpeed):
frame = item.guideFrame - self.startLength
end = QtCore.QPointF((self.posX + self.radiusX + self.radiusX * math.cos(self.startAngleRad + (frame * self.stepAngleRad))) * self.scaleX,
(self.posY + self.radiusY + self.radiusY * math.sin(self.startAngleRad + (frame * self.stepAngleRad))) * self.scaleY)
self.move(item, end, moveSpeed)
|
cherry-wb/SideTools
|
examples/demos/qtdemo/guidecircle.py
|
Python
|
apache-2.0
| 1,924
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2016 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
# Copyright 2015-2016 Antoni Boucher (antoyo) <bouanto@zoho.com>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
#
# pylint complains when using .render() on jinja templates, so we make it shut
# up for this whole module.
"""Handler functions for file:... pages."""
import os
from qutebrowser.browser.webkit.network import schemehandler, networkreply
from qutebrowser.utils import jinja
def get_file_list(basedir, all_files, filterfunc):
"""Get a list of files filtered by a filter function and sorted by name.
Args:
basedir: The parent directory of all files.
all_files: The list of files to filter and sort.
filterfunc: The filter function.
Return:
A list of dicts. Each dict contains the name and absname keys.
"""
items = []
for filename in all_files:
absname = os.path.join(basedir, filename)
if filterfunc(absname):
items.append({'name': filename, 'absname': absname})
return sorted(items, key=lambda v: v['name'].lower())
def is_root(directory):
"""Check if the directory is the root directory.
Args:
directory: The directory to check.
Return:
Whether the directory is a root directory or not.
"""
# If you're curious as why this works:
# dirname('/') = '/'
# dirname('/home') = '/'
# dirname('/home/') = '/home'
# dirname('/home/foo') = '/home'
# basically, for files (no trailing slash) it removes the file part, and
# for directories, it removes the trailing slash, so the only way for this
# to be equal is if the directory is the root directory.
return os.path.dirname(directory) == directory
def parent_dir(directory):
"""Return the parent directory for the given directory.
Args:
directory: The path to the directory.
Return:
The path to the parent directory.
"""
return os.path.normpath(os.path.join(directory, os.pardir))
def dirbrowser_html(path):
"""Get the directory browser web page.
Args:
path: The directory path.
Return:
The HTML of the web page.
"""
title = "Browse directory: {}".format(path)
if is_root(path):
parent = None
else:
parent = parent_dir(path)
try:
all_files = os.listdir(path)
except OSError as e:
html = jinja.render('error.html',
title="Error while reading directory",
url='file:///{}'.format(path), error=str(e),
icon='', qutescheme=False)
return html.encode('UTF-8', errors='xmlcharrefreplace')
files = get_file_list(path, all_files, os.path.isfile)
directories = get_file_list(path, all_files, os.path.isdir)
html = jinja.render('dirbrowser.html', title=title, url=path, icon='',
parent=parent, files=files, directories=directories)
return html.encode('UTF-8', errors='xmlcharrefreplace')
class FileSchemeHandler(schemehandler.SchemeHandler):
"""Scheme handler for file: URLs."""
def createRequest(self, _op, request, _outgoing_data):
"""Create a new request.
Args:
request: const QNetworkRequest & req
_op: Operation op
_outgoing_data: QIODevice * outgoingData
Return:
A QNetworkReply for directories, None for files.
"""
path = request.url().toLocalFile()
if os.path.isdir(path):
data = dirbrowser_html(path)
return networkreply.FixedDataNetworkReply(
request, data, 'text/html', self.parent())
|
EliteTK/qutebrowser
|
qutebrowser/browser/webkit/network/filescheme.py
|
Python
|
gpl-3.0
| 4,374
|
#!/usr/bin/python
import logging
import logging.handlers
def setupLogger(log_path, verbose):
logger = logging.getLogger('hive')
logger.setLevel(logging.DEBUG)
logger.propagate = False
fh = logging.handlers.TimedRotatingFileHandler(log_path, when="midnight", backupCount=5)
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
if verbose:
ch.setLevel(logging.DEBUG)
else:
ch.setLevel(logging.ERROR)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
fh.setFormatter(formatter)
# Add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
|
krcooke/hive-home
|
bin/utils/logger.py
|
Python
|
apache-2.0
| 803
|
"""
Implements an indirect method to solve the optimal control
problem of a varying mass spacecraft controlled by one
thruster capable of vectoring.
Dario Izzo 2016
"""
from PyGMO.problem._base import base
from numpy.linalg import norm
from math import sqrt, sin, cos, atan2, pi
from scipy.integrate import odeint
from numpy import linspace, vstack, hstack
import numpy as np
from copy import deepcopy
import sys
class tv_landing(base):
def __init__(
self,
state0 = [0., 1000., 20., -5., 0., 0., 10000.],
statet = [0., 0., 0., 0., 0., 0., 9758.695805],
c1 = 44000.,
c2 = 311. * 9.81,
c3 = 300.,
g = 1.6229,
homotopy = 0.,
pinpoint = False
):
"""
USAGE: tv_landing(self, start, end, Isp, Tmax, mu):
* state0: initial state [x, y, vx, vy, theta, omega, m] in m, m , m/s, m/s, rad, rad/s, kg
* statet: target state [x, y, vx, vy, theta, omega, m] in m, m, m/s, m/s, rad, rad/s, kg
* c1: maximum thrusts for the main thruster [N]
* c2: veff, Isp*g0 (m / s)
* c3: characteristic length (I / m / d) [m]
* g: planet gravity [m/s**2]
* homotopy: homotopy parameter, 0->QC, 1->MOC
* pinpoint: if True toggles the final constraint on the landing x
"""
super(tv_landing, self).__init__(8, 0, 1, 8, 0, 1e-4)
# We store the raw inputs for convenience
self.state0_input = state0
self.statet_input = statet
# We define the non dimensional units (will use these from here on)
self.R = 1000.
self.V = 100.
self.M = 10000.
self.A = (self.V * self.V) / self.R
self.T = self.R / self.V
self.F = self.M * self.A
# We store the parameters
self.c1 = c1 / self.F
self.c2 = c2 / self.V
self.c3 = c3 / self.R
self.g = g / self.A
# We compute the initial and final state in the new units
self.state0 = self._non_dim(self.state0_input)
self.statet = self._non_dim(self.statet_input)
# We set the bounds (these will only be used to initialize the population)
self.set_bounds([-1] * 7 + [1. / self.T], [1] * 7 + [200. / self.T])
# Activates a pinpoint landing
self.pinpoint = pinpoint
# Selects the homotopy parameter, 0->QC, 1->MOC
self.homotopy = homotopy
def _objfun_impl(self, x):
return(1.,) # constraint satisfaction, no objfun
def _compute_constraints_impl(self, x):
# Perform one forward shooting
xf, info = self._shoot(x)
# Assembling the equality constraint vector
ceq = list([0]*8)
# Final conditions
if self.pinpoint:
#Pinpoint landing x is fixed lx is free
ceq[0] = (xf[-1][0] - self.statet[0] ) * 1
else:
#Transversality condition: x is free lx is 0
ceq[0] = xf[-1][7] * 1
ceq[1] = (xf[-1][1] - self.statet[1] ) * 1
ceq[2] = (xf[-1][2] - self.statet[2] ) * 1
ceq[3] = (xf[-1][3] - self.statet[3] ) * 1
ceq[4] = (xf[-1][4] - self.statet[4] ) * 1
ceq[5] = (xf[-1][5] - self.statet[5] ) * 1
# Transversality condition on omega and mass (free)
# ceq[5] = xf[-1][12] * 1
ceq[6] = xf[-1][13] * 1
# Free time problem, Hamiltonian must be 0
ceq[7] = self._hamiltonian(xf[-1]) * 1
return ceq
def _hamiltonian(self, full_state):
state = full_state[:7]
costate = full_state[7:]
# Applying Pontryagin minimum principle
controls = self._pontryagin_minimum_principle(full_state)
# Computing the R.H.S. of the state eom
f_vett = self._eom_state(state, controls)
# Assembling the Hamiltonian
H = 0.
for l, f in zip(costate, f_vett):
H += l * f
# Adding the integral cost function (WHY -)
H += self._cost(state, controls)
return H
def _cost(self,state, controls):
c1 = self.c1
c2 = self.c2
c3 = self.c3
u, ut = controls
retval = self.homotopy * c1 / c2 * u + (1 - self.homotopy) * c1**2 / c2 * u**2
return retval
def _eom_state(self, state, controls):
# Renaming variables
x,y,vx,vy,theta,omega,m = state
g = self.g
c1 = self.c1
c2 = self.c2
c3 = self.c3
u, ut = controls
tdotit = ut[0] * cos(theta) - ut[1] * sin(theta)
# Equations for the state
dx = vx
dy = vy
dvx = c1 * u / m * ut[0]
dvy = c1 * u / m * ut[1] - g
dtheta = omega
domega = - c1 / c3 * u / m * tdotit
dm = - c1 / c2 * u
if m < 1e-4:
dm = 0
return [dx, dy, dvx, dvy, dtheta, domega, dm]
def _eom_costate(self, full_state, controls):
# Renaming variables
x,y,vx,vy,theta,omega,m,lx,ly,lvx,lvy,ltheta,lomega,lm = full_state
c1 = self.c1
c2 = self.c2
c3 = self.c3
u, ut = controls
# Equations for the costate
tdotit = ut[0] * cos(theta) - ut[1] * sin(theta)
tdotitheta = ut[0] * sin(theta) + ut[1] * cos(theta)
lvdott = lvx * ut[0] + lvy * ut[1]
dlx = 0.
dly = 0.
dlvx = - lx
dlvy = - ly
dltheta = - lomega / c3 * c1 * u / m * tdotitheta
dlomega = - ltheta
dlm = c1 / m**2 * u * (lvdott - lomega / c3 * tdotit)
return [dlx, dly, dlvx, dlvy, dltheta, dlomega, dlm]
def _pontryagin_minimum_principle(self, full_state):
# Renaming variables
x,y,vx,vy,theta,omega,m,lx,ly,lvx,lvy,ltheta,lomega,lm = full_state
c1 = self.c1
c2 = self.c2
c3 = self.c3
lauxx = lvx - lomega / c3 * cos(theta)
lauxy = lvy + lomega / c3 * sin(theta)
laux = sqrt(lauxx**2 + lauxy**2)
# ut
ut = [0]*2
ut[0] = - lauxx / laux
ut[1] = - lauxy / laux
ang = np.arctan2(ut[0],ut[1])
ang = ang-theta
lim = 10/360 *2 *np.pi
t_t = min(max(-lim, ang),lim)
t_t += theta
# print(np.arctan2(ut[0],ut[1]),ang,t_t, lim)
ut[0] = sin(t_t)
ut[1] = cos(t_t)
# u
if self.homotopy==1:
S = 1. - lm - laux * c2 / m
if S >= 0:
u=0.
if S < 0:
u=1.
else:
u = 1. / 2. / c1 / (1.-self.homotopy) * (lm + laux * c2 / m - self.homotopy)
u = min(u,1.) # NOTE: this can be increased to help convergence?
u = max(u,0.)
return u, ut
def _eom(self, full_state, t):
# Applying Pontryagin minimum principle
state = full_state[:7]
controls = self._pontryagin_minimum_principle(full_state)
# Equations for the state
dstate = self._eom_state(state, controls)
# Equations for the co-states
dcostate = self._eom_costate(full_state, controls)
return dstate + dcostate
def _shoot(self, x):
# Numerical Integration
xf, info = odeint(lambda a,b: self._eom(a,b), self.state0 + list(x[:-1]), linspace(0, x[-1],100), rtol=1e-13, atol=1e-13, full_output=1, mxstep=2000)
return xf, info
def _simulate(self, x, tspan):
# Numerical Integration
xf, info = odeint(lambda a,b: self._eom(a,b), self.state0 + list(x[:-1]), tspan, rtol=1e-12, atol=1e-12, full_output=1, mxstep=2000)
return xf, info
def _non_dim(self, state):
xnd = deepcopy(state)
xnd[0] /= self.R
xnd[1] /= self.R
xnd[2] /= self.V
xnd[3] /= self.V
xnd[4] /= 1.
xnd[5] *= self.T
xnd[6] /= self.M
return xnd
def _dim_back(self, state):
xd = deepcopy(state)
xd[0] *= self.R
xd[1] *= self.R
xd[2] *= self.V
xd[3] *= self.V
xd[4] *= 1.
xd[5] /= self.T
xd[6] *= self.M
return xd
def plot(self, x):
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
mpl.rcParams['legend.fontsize'] = 10
# Producing the data
tspan = linspace(0, x[-1], 300)
full_state, info = self._simulate(x, tspan)
# Putting dimensions back
res = list()
controls = list()
ux = list(); uy=list()
for line in full_state:
res.append(self._dim_back(line[:7]))
controls.append(self._pontryagin_minimum_principle(line))
ux.append(controls[-1][0] * controls[-1][1][0])
uy.append(controls[-1][0] * controls[-1][1][1])
tspan = [it * self.T for it in tspan]
x = list(); y=list()
vx = list(); vy = list()
theta = list()
omega = list()
m = list()
for state in res:
x.append(state[0])
y.append(state[1])
vx.append(state[2])
vy.append(state[3])
theta.append(state[4])
omega.append(state[5])
m.append(state[6])
fig = plt.figure()
ax = fig.gca()
ax.plot(x, y, color='r', label='Trajectory')
ax.quiver(x, y, ux, uy, label='Thrust', pivot='tail', width=0.001)
ax.set_ylim(0,self.state0_input[1]+500)
f, axarr = plt.subplots(3, 2)
axarr[0,0].plot(x, y)
axarr[0,0].set_xlabel('x'); axarr[0,0].set_ylabel('y');
axarr[1,0].plot(vx, vy)
axarr[1,0].set_xlabel('vx'); axarr[1,0].set_ylabel('vy');
axarr[2,0].plot(tspan, theta)
axarr[2,0].set_xlabel('t'); axarr[2,0].set_ylabel('theta');
axarr[0,1].plot(tspan, [controls[ix][0] for ix in range(len(controls))],'r')
axarr[0,1].set_ylabel('u')
axarr[0,1].set_xlabel('t')
axarr[1,1].plot(tspan, [controls[ix][1][0] for ix in range(len(controls))],'k')
axarr[1,1].set_ylabel('sin(ut)')
axarr[1,1].set_xlabel('t')
axarr[2,1].plot(tspan, m)
axarr[2,1].set_xlabel('t'); axarr[2,1].set_ylabel('m');
plt.ion()
plt.show()
return axarr
def human_readable_extra(self):
s = "\n\tDimensional inputs:\n"
s = s + "\tStarting state: " + str(self.state0_input) + "\n"
s = s + "\tTarget state: " + str(self.statet_input) + "\n"
s = s + "\tThrusters maximum magnitude [N]: " + str(self.c1 * self.F) + "\n"
s = s + "\tIsp*g0: " + str(self.c2 * self.V) + ", gravity: " + str(self.g * self.A) + "\n"
s = s + "\n\tNon-dimensional inputs:\n"
s = s + "\tStarting state: " + str(self.state0) + "\n"
s = s + "\tTarget state: " + str(self.statet) + "\n"
s = s + "\tThrusters maximum magnitude [N]: " + str(self.c1) + "\n"
s = s + "\tIsp*g0: " + str(self.c2) + ", gravity: " + str(self.g) + "\n\n"
s = s + "\tHomotopy parameter: " + str(self.homotopy)
s = s + "\n\tPinpoint?: " + str(self.pinpoint)
return s
def produce_data(self, x, npoints):
# Producing the data
tspan = linspace(0, x[-1], npoints)
full_state, info = self._simulate(x, tspan)
# Putting dimensions back
res = list()
controls = list()
u1 = list(); u2 = list()
for line in full_state:
res.append(self._dim_back(line[:7]))
controls.append(self._pontryagin_minimum_principle(line))
u1.append(controls[-1][0])
u2.append(controls[-1][1])
u1 = vstack(u1)
u2 = vstack(u2)
tspan = [it * self.T for it in tspan]
x = list(); y=list()
vx = list(); vy = list()
theta = list(); omega = list()
m = list()
for state in res:
x.append(state[0])
y.append(state[1])
vx.append(state[2])
vy.append(state[3])
theta.append(state[4])
omega.append(state[5])
m.append(state[6])
tspan = vstack(tspan)
x = vstack(x)
y = vstack(y)
vx = vstack(vx)
vy = vstack(vy)
theta =vstack(theta)
omega = vstack(omega)
m = vstack(m)
return (hstack((tspan, x, y, vx, vy, theta, omega, m)), hstack((u1, u2)))
if __name__ == "__main__":
from PyGMO import *
from random import random
algo = algorithm.snopt(200, opt_tol=1e-5, feas_tol=1e-5)
#algo = algorithm.scipy_slsqp(max_iter = 1000,acc = 1E-8,epsilon = 1.49e-08, screen_output = True)
algo.screen_output = False
# Define the starting area (x0 will be irrelevant if pinpoint is not True)
x0b = [-1, 1]
y0b = [500, 2000]
vx0b = [-1, 1]
vy0b = [5, -40]
m0b = [8000, 12000]
x0 = random() * (x0b[1] - x0b[0]) + x0b[0]
y0 = random() * (y0b[1] - y0b[0]) + y0b[0]
vx0 = random() * (vx0b[1] - vx0b[0]) + vx0b[0]
vy0 = random() * (vy0b[1] - vy0b[0]) + vy0b[0]
m0 = random() * (m0b[1] - m0b[0]) + m0b[0]
theta0 = 0.
omega0 = 0.
state0 = [x0, y0, vx0, vy0, theta0, omega0, m0]
# Problem definition
prob = tv_landing(state0 = state0, pinpoint=True, homotopy=0.)
print("IC: {}".format(state0))
# Attempting to solve the QC problem
n_attempts = 1
for i in range(1, n_attempts + 1):
# Start with attempts
print("Attempt # {}".format(i), end="")
pop = population(prob)
pop.push_back([0,0,0,-0.015,0,0,0,5])
#pop.push_back(x0)
pop = algo.evolve(pop)
# Log constraints and chormosome
print("\nc: ",end="")
print(["{0:.2g}".format(it) for it in pop[0].cur_c])
print("x: ",end="")
print(["{0:.2g}".format(it) for it in pop[0].cur_x])
# If succesfull proceed
if (prob.feasibility_x(pop[0].cur_x)):
break
if not prob.feasibility_x(pop[0].cur_x):
print("No QC solution! Ending here :(")
sys.exit(0)
else:
print("Found QC solution!! Starting Homotopy")
x = pop[0].cur_x
print("state0 = {}".format(state0))
print("x = {}".format(x))
#sys.exit(0)
# We proceed to solve by homotopy the mass optimal control
# Minimum and maximum step for the continuation
h_min = 1e-4
h_max = 0.2
# Starting step
h = 0.2
trial_alpha = h
alpha = 0
x = pop[0].cur_x
algo = algorithm.scipy_slsqp(max_iter = 40,acc = 1E-8,epsilon = 1.49e-08, screen_output = True)
algo.screen_output = False
while True:
if trial_alpha > 1:
trial_alpha = 1.
print("{0:.5g}, \t {1:.5g} \t".format(alpha, trial_alpha), end="")
print("({0:.5g})\t".format(h), end="")
prob = tv_landing(state0 = state0, pinpoint=True, homotopy=trial_alpha)
pop = population(prob)
pop.push_back(x)
pop = algo.evolve(pop)
if (prob.feasibility_x(pop[0].cur_x)):
x = pop[0].cur_x
if trial_alpha == 1:
print(" Success")
break
print(" Success")
h = h * 2.
h = min(h, h_max)
alpha = trial_alpha
trial_alpha = trial_alpha + h
else:
print(" - Failed, ", end="")
print("norm c: {0:.4g}".format(norm(pop[0].cur_c)))
h = h * 0.5
if h < h_min:
print("\nContinuation step too small aborting :(")
sys.exit(0)
trial_alpha = alpha + h
|
darioizzo/optimal_landing
|
indirect_method/tv_landing.py
|
Python
|
lgpl-3.0
| 15,654
|
from time import time
import math
import numpy as np
import tensorflow as tf
'''Building the CIFAR-10 VGG-Net
Summary of available functions:
'''
BATCH_SIZE = 64
NUM_ITERATION = 60000
NUM_CLASS = 10
NUM_IMAGE_CHANNEL = 3
NUM_IMAGE_WIDTH = 32
NUM_IMAGE_HEIGHT = 32
# CACHE_DIR = '/home/ubuntu/notebook/tensorboard/vgg-cifar10'
CACHE_DIR = '/Users/Zhang/Research/Programming/Learning-Tensorflow-by-Models'
def load_cifar10(path=None):
if path is None:
DATASET_DIR = '/Users/Zhang/Research/Deep Learning Dataset/CIFAR/cifar-10-batches-py/'
# DATASET_DIR = '/home/ubuntu/datasets/cifar-10-batches-py/'
else:
DATASET_DIR = path
# TRAINSET_NAME = ['data_batch_1', 'data_batch_2', 'data_batch_3', 'data_batch_4', 'data_batch_5']
TRAINSET_NAME = ['data_batch_1']
TESTSET_NAME = 'test_batch'
_train_batch_files = []
for name in TRAINSET_NAME:
_train_batch_files.append('{}{}'.format(DATASET_DIR, name))
test_batch = '{}{}'.format(DATASET_DIR, TESTSET_NAME)
import cPickle
_train_batches = []
for batch_file in _train_batch_files:
with open(batch_file, 'rb') as f:
_train_batches.append(cPickle.load(f))
with open(test_batch, 'rb') as f:
test_batch = cPickle.load(f)
train_images = np.vstack(batch['data'] for batch in _train_batches)
train_ys = np.hstack(batch['labels'] for batch in _train_batches)
test_images = test_batch['data']
test_ys = np.array(test_batch['labels'])
train_labels = np.zeros(shape=(len(train_ys), 10), dtype=np.float32)
train_labels[np.arange(len(train_ys)), train_ys] = 1
test_labels = np.zeros(shape=(len(test_ys), 10), dtype=np.float32)
test_labels[np.arange(len(test_ys)), test_ys] = 1
return train_images.astype(np.float32), train_labels, test_images.astype(np.float32), test_labels
def conv_layer(input, kernel_shape, stride, data_format='NCHW', name='conv'):
'''2-D Convolution Layer Operation in TensorFlow.
:param input: A 4-D tensor. The dimension order is determined by the value of
`data_format`.
:param kernel_shape: A list of `ints`.
A 4-D tensor of shape
`[filter_height, filter_width, in_channels, out_channels]`.
:param stride: A list of `ints`.
1-D tensor of length 4. The stride of the sliding window for each
dimension of `input`. The dimension order is determined by the value of
`data_format`.
:param data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `NCHW`.
Specify the data format of the input and output data. With the
default format "NCHW", the data is stored in the order of:
[batch, channels, height, width].
:param name: An optional `string` for the name of this operation.
:return:
A TensorFlow operation of 2-D Convolution Layer.
'''
with tf.variable_scope(name) as scope:
kernel = tf.get_variable('W', shape=kernel_shape,
initializer=tf.truncated_normal_initializer(
stddev=math.sqrt(2.0 / (kernel_shape[0] * kernel_shape[1] * kernel_shape[2])),
dtype=tf.float32),
dtype=tf.float32)
# kernel = tf.get_variable('W', shape=kernel_shape,
# initializer=tf.truncated_normal_initializer(
# stddev=0.05,
# dtype=tf.float32),
# dtype=tf.float32)
bias = tf.get_variable('b', kernel_shape[3], initializer=tf.constant_initializer(0.001))
conv = tf.nn.conv2d(input, kernel, stride, 'SAME', data_format=data_format)
pre_activation = tf.nn.bias_add(conv, bias, data_format=data_format)
conv = tf.nn.relu(pre_activation, name=scope.name)
tf.summary.histogram('Convolution_layers/{}_{}'.format(name, 'activation'), conv)
tf.summary.scalar('Convolution_layers/{}_{}'.format(name, 'sparsity'), tf.nn.zero_fraction(conv))
return conv
def fc_layer(input, size, name='fc', final=False):
'''Full Connected Layer in TensorFlow.
:param input: A 2-D tensor of shape
`[-1, NUM_INPUT]`.
:param size: A list of `ints`.
A 2-D tensor of shape
`[NUM_INPUT, NUM_OUTPUT]`.
:param final: An optinal `bool`. Default is `False`, while `True` is for the final layer.
:param name: An optional `string` for the name of this operation.
:return:
A TensorFlow operation of Full Connected Layer.
'''
with tf.variable_scope(name) as scope:
weights = tf.get_variable('W', shape=size,
initializer=tf.truncated_normal_initializer(
stddev=math.sqrt(1.0 / (size[0]+size[1])),
dtype=tf.float32),
dtype=tf.float32)
# weights = tf.get_variable('W', shape=size,
# initializer=tf.truncated_normal_initializer(
# stddev=0.01,
# dtype=tf.float32),
# dtype=tf.float32)
weight_decay = tf.multiply(tf.nn.l2_loss(weights), 0.0005, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
biases = tf.get_variable('b', size[1], initializer=tf.constant_initializer(0.0001))
if final is True:
fc = tf.add(tf.matmul(input, weights), biases, name=scope.name)
else:
fc = tf.nn.relu(tf.matmul(input, weights) + biases, name=scope.name)
tf.summary.histogram('Fully_connected_layers/{}_{}'.format(name, 'activation'), fc)
tf.summary.scalar('Fully_connected_layers/{}_{}'.format(name, 'sparsity'), tf.nn.zero_fraction(fc))
return fc
def inference(raw, keep_prob):
'''
:param raw:
:return:
'''
with tf.variable_scope('input') as scope:
# When running on GPU, transpose the data from channels_last (NHWC) to
# channels_first (NCHW) to improve performance.
# See https://www.tensorflow.org/performance/performance_guide#data_formats
x = tf.reshape(raw, shape=[-1, NUM_IMAGE_CHANNEL, NUM_IMAGE_WIDTH, NUM_IMAGE_HEIGHT], name='input_images')
data_format = 'NCHW'
if tf.test.is_built_with_cuda() is not True:
data_format = 'NHWC'
x = tf.transpose(x, [0, 2, 3, 1])
# convolution group 1, output - [16, 16, 64]
conv1 = conv_layer(x, [3, 3, 3, 64], [1, 1, 1, 1],
data_format=data_format, name='conv1')
norm1 = tf.nn.lrn(conv1, depth_radius=5, bias=2.0, alpha=0.001 / 9.0, beta=0.75, name='norm1')
if data_format == 'NCHW':
pool1 = tf.nn.max_pool(norm1, [1, 1, 2, 2], [1, 1, 2, 2], padding='SAME', data_format=data_format, name='pool1')
else:
pool1 = tf.nn.max_pool(norm1, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME', data_format=data_format, name='pool1')
# convolution group 2, output - [8, 8, 128]
conv2 = conv_layer(pool1, [3, 3, 64, 128], [1, 1, 1, 1],
data_format=data_format, name='conv2')
norm2 = tf.nn.lrn(conv2, depth_radius=5, bias=2.0, alpha=0.001 / 9.0, beta=0.75, name='norm2')
if data_format == 'NCHW':
pool2 = tf.nn.max_pool(norm2, [1, 1, 2, 2], [1, 1, 2, 2], padding='SAME', data_format=data_format, name='pool2')
else:
pool2 = tf.nn.max_pool(norm2, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME', data_format=data_format, name='pool2')
# convolution group 3, output - [4, 4, 256]
conv3_1 = conv_layer(pool2, [3, 3, 128, 256], [1, 1, 1, 1],
data_format=data_format, name='conv3_1')
norm3_1 = tf.nn.lrn(conv3_1, depth_radius=5, bias=2.0, alpha=0.001 / 9.0, beta=0.75, name='norm3_1')
conv3_2 = conv_layer(norm3_1, [3, 3, 256, 256], [1, 1, 1, 1],
data_format=data_format, name='conv3_2')
norm3_2 = tf.nn.lrn(conv3_2, depth_radius=5, bias=2.0, alpha=0.001 / 9.0, beta=0.75, name='norm3_2')
if data_format == 'NCHW':
pool3 = tf.nn.max_pool(norm3_2, [1, 1, 2, 2], [1, 1, 2, 2], padding='SAME', data_format=data_format,
name='pool3')
else:
pool3 = tf.nn.max_pool(norm3_2, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME', data_format=data_format,
name='pool3')
# convolution group 4, output - [2, 2, 512]
conv4_1 = conv_layer(pool3, [3, 3, 256, 512], [1, 1, 1, 1],
data_format=data_format, name='conv4_1')
norm4_1 = tf.nn.lrn(conv4_1, depth_radius=5, bias=2.0, alpha=0.001 / 9.0, beta=0.75, name='norm4_1')
conv4_2 = conv_layer(norm4_1, [3, 3, 512, 512], [1, 1, 1, 1],
data_format=data_format, name='conv4_2')
norm4_2 = tf.nn.lrn(conv4_2, depth_radius=5, bias=2.0, alpha=0.001 / 9.0, beta=0.75, name='norm4_2')
if data_format == 'NCHW':
pool4 = tf.nn.max_pool(norm4_2, [1, 1, 2, 2], [1, 1, 2, 2], padding='SAME', data_format=data_format,
name='pool4')
else:
pool4 = tf.nn.max_pool(norm4_2, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME', data_format=data_format,
name='pool4')
# convolution group 5, output - [1, 1, 512]
conv5_1 = conv_layer(pool4, [3, 3, 512, 512], [1, 1, 1, 1],
data_format=data_format, name='conv5_1')
norm5_1 = tf.nn.lrn(conv5_1, depth_radius=5, bias=2.0, alpha=0.001 / 9.0, beta=0.75, name='norm5_1')
conv5_2 = conv_layer(norm5_1, [3, 3, 512, 512], [1, 1, 1, 1],
data_format=data_format, name='conv5_2')
norm5_1 = tf.nn.lrn(conv5_2, depth_radius=5, bias=2.0, alpha=0.001 / 9.0, beta=0.75, name='norm5_2')
if data_format == 'NCHW':
pool5 = tf.nn.max_pool(norm5_1, [1, 1, 2, 2], [1, 1, 2, 2], padding='SAME', data_format=data_format,
name='pool5')
else:
pool5 = tf.nn.max_pool(norm5_1, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME', data_format=data_format,
name='pool5')
pool5_flat = tf.reshape(pool5, [-1, 1 * 1 * 512], name='flatten')
fc1 = fc_layer(pool5_flat, [1 * 1 * 512, 128], name='fc1', final=False)
droput1 = tf.nn.dropout(fc1, keep_prob)
fc2 = fc_layer(droput1, [128, 64], name='fc2', final=False)
droput2 = tf.nn.dropout(fc2, keep_prob)
softmax_linear = fc_layer(droput2, [64, NUM_CLASS], name='fc3', final=True)
return softmax_linear
def loss(logits, labels):
'''
:param logits:
:param labels:
:return:
'''
with tf.variable_scope('loss'):
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
total_loss = tf.add_n(tf.get_collection('losses'), name='total_loss')
return total_loss
def train(total_loss, global_step):
# Decay the learning rate exponentially based on the number of steps. best
lr = tf.train.exponential_decay(0.001,
global_step,
500,
0.9,
staircase=True)
# lr = tf.train.exponential_decay(0.001,
# global_step,
# 2500,
# 0.316,
# staircase=True) not good
# lr = 0.0005
tf.summary.scalar('learning_rate/lr', lr)
optimizer = tf.train.RMSPropOptimizer(lr)
grads = optimizer.compute_gradients(total_loss)
appply_gradient_op = optimizer.apply_gradients(grads, global_step=global_step)
for grad, var in grads:
if grad is not None:
tf.summary.histogram(var.op.name + '/gradients', grad)
if var is not None:
tf.summary.histogram(var.op.name, var)
return appply_gradient_op
def evaluate(logits, labels, name='Train'):
y_pred_cls = tf.argmax(logits, axis=1)
correct_prediction = tf.equal(y_pred_cls, tf.argmax(labels, axis=1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('Accuracy/{}'.format(name), accuracy)
return accuracy
if __name__ == '__main__':
# load data
train_images, train_labels, test_images, test_labels = load_cifar10()
train_images = (train_images - 128) / 128.0
test_images = (test_images - 128) / 128.0
# build variables for training procedure.
global_step = tf.Variable(initial_value=0, name='global_step', trainable=False)
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
# build train operation and variables.
train_x = tf.placeholder(tf.float32, shape=[None, NUM_IMAGE_WIDTH * NUM_IMAGE_HEIGHT * NUM_IMAGE_CHANNEL],
name='train_images')
train_y = tf.placeholder(tf.float32, shape=[None, NUM_CLASS], name='train_label')
train_logits = inference(train_x, keep_prob)
loss_op = loss(train_logits, train_y)
train_op = train(loss_op, global_step)
accuacy_op = evaluate(train_logits, train_y, name='Train')
with tf.Session() as session:
session.run(tf.global_variables_initializer())
merged = tf.summary.merge_all()
saver = tf.train.Saver()
tf_train_writer = tf.summary.FileWriter(CACHE_DIR, session.graph)
for iter in range(NUM_ITERATION):
randidx = np.random.randint(len(train_images), size=BATCH_SIZE)
batch_train_images = train_images[randidx]
batch_train_labels = train_labels[randidx]
start_time = time()
_global_step, _ = session.run([global_step, train_op],
feed_dict={train_x: batch_train_images,
train_y: batch_train_labels,
keep_prob: 0.5})
duration = time() - start_time
if (iter + 1) % 10 == 0:
_loss, _train_accuracy = session.run([loss_op, accuacy_op],
feed_dict={train_x: batch_train_images,
train_y: batch_train_labels,
keep_prob: 1.0})
msg = "Global Step: {0:>6}, accuracy: {1:>6.1%}, loss = {2:.2f} ({3:.1f} examples/sec, {4:.2f} sec/batch)"
print(msg.format(_global_step, _train_accuracy, _loss, BATCH_SIZE / duration, duration))
if (iter + 1) % 100 == 0:
data_merged, global_step_iter = session.run([merged, global_step],
feed_dict={train_x: batch_train_images,
train_y: batch_train_labels,
keep_prob: 1.0}
)
_eval_accuracy = session.run(accuacy_op,
feed_dict={train_x: test_images,
train_y: test_labels,
keep_prob: 1.0})
print("Accuracy on Test-Set: {0:.2f}%".format(_eval_accuracy * 100.0))
summary = tf.Summary(value=[
tf.Summary.Value(tag="Accuracy/Test", simple_value=_eval_accuracy),
])
tf_train_writer.add_summary(data_merged, global_step_iter)
tf_train_writer.add_summary(summary, global_step_iter)
saver.save(session, save_path=CACHE_DIR, global_step=global_step)
print("Saved checkpoint.")
|
zhangjunpeng9354/Learning-Tensorflow-by-Models
|
vgg_cifar10/vgg_cifar10_train.py
|
Python
|
mit
| 16,323
|
#
# Copyright (c) 2012 Patrice Munger
# This file is part of pynetdicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at http://pynetdicom.googlecode.com
#
from DIMSEparameters import *
from DULparameters import *
from dicom.dataset import Dataset
import dsutils
from struct import pack, unpack
from dicom.UID import ImplicitVRLittleEndian
#
# pydicom's dictionnary misses command tags. Add them.
#
from dicom._dicom_dict import DicomDictionary
import itertools
import logging
logger = logging.getLogger(__name__)
DicomDictionary.update({
0x00000000: ('UL', '1', "CommandGroupLength", ''),
0x00000002: ('UI', '1', "Affected SOP class", ''),
0x00000003: ('UI', '1', "RequestedSOPClassUID", ''),
0x00000100: ('US', '1', "CommandField", ''),
0x00000110: ('US', '1', "MessageID", ''),
0x00000120: ('US', '1', "MessageIDBeingRespondedTo", ''),
0x00000600: ('AE', '1', "MoveDestination", ''),
0x00000700: ('US', '1', "Priority", ''),
0x00000800: ('US', '1', "DataSetType", ''),
0x00000900: ('US', '1', "Status", ''),
0x00000901: ('AT', '1', "OffendingElement", ''),
0x00000902: ('LO', '1', "ErrorComment", ''),
0x00000903: ('US', '1', "ErrorID", ''),
0x00001000: ('UI', '1', " AffectedSOPInstanceUID", ''),
0x00001001: ('UI', '1', "RequestedSOPInstanceUID", ''),
0x00001002: ('US', '1', "EventTypeID", ''),
0x00001005: ('AT', '1', "AttributeIdentifierList", ''),
0x00001008: ('US', '1', "ActionTypeID", ''),
0x00001020: ('US', '1', "NumberOfRemainingSubOperations", ''),
0x00001021: ('US', '1', "NumberOfCompletedSubOperations", ''),
0x00001022: ('US', '1', "NumberOfFailedSubOperations", ''),
0x00001023: ('US', '1', "NumberOfWarningSubOperations", ''),
0x00001030: ('AE', '1', "MoveOriginatorApplicationEntityTitle", ''),
0x00001031: ('US', '1', "MoveOriginatorMessageID", ''),
})
"""
All DIMSE Message classes implement the following methods:
FromParams(DIMSEServiceParameter) : Builds a DIMSE message from a
DULServiceParameter
object. Used when receiving
primitives from the
DIMSEServiceUser.
ToParams() : Convert the Message into a
DIMSEServiceParameter object.
Used for sending primitives to
the DIMSEServiceUser.
Encode() : Returns the encoded message in
one or several P-DATA parameters
structure.
Decode(pdata) : Construct the message from one
or several P-DATA primitives
FromParams Encode
|----------------------| -------> |----------| -------> |---------------|
| Service parameters | | DIMSE | | P-DATA |
| object | | message | | primitive(s) |
|______________________| <------- |__________| <------- |_______________|
ToParams Decode
"""
DEBUG = False
def fragment(maxpdulength, str):
s = str
fragments = []
maxsize = maxpdulength - 6
while 1:
fragments.append(s[:maxsize])
s = s[maxsize:]
if len(s) <= maxsize:
if len(s) > 0:
fragments.append(s)
return fragments
class DIMSEMessage:
def __init__(self):
self.CommandSet = None
self.EncodedDataSet = None
self.DataSet = None
self.encoded_command_set = ''
self.ID = id
self.ts = ImplicitVRLittleEndian # imposed by standard.
if self.__class__ != DIMSEMessage:
self.CommandSet = Dataset()
for ii in self.CommandFields:
self.CommandSet.add_new(ii[1], ii[2], '')
def Encode(self, id, maxpdulength):
"""Returns the encoded message as a series of P-DATA service
parameter objects"""
self.ID = id
pdatas = []
encoded_command_set = dsutils.encode(
self.CommandSet, self.ts.is_implicit_VR, self.ts.is_little_endian)
# fragment command set
pdvs = fragment(maxpdulength, encoded_command_set)
assert ''.join(pdvs) == encoded_command_set
for ii in pdvs[:-1]:
# send only one pdv per pdata primitive
pdata = P_DATA_ServiceParameters()
# not last command fragment
pdata.PresentationDataValueList = [[self.ID, pack('b', 1) + ii]]
pdatas.append(pdata)
# last command fragment
pdata = P_DATA_ServiceParameters()
# last command fragment
pdata.PresentationDataValueList = [[self.ID, pack('b', 3) + pdvs[-1]]]
pdatas.append(pdata)
# fragment data set
#if self.__dict__.has_key('DataSet') and self.DataSet:
if 'DataSet' in self.__dict__ and self.DataSet is not None:
pdvs = fragment(maxpdulength, self.DataSet)
assert ''.join(pdvs) == self.DataSet
for ii in pdvs[:-1]:
pdata = P_DATA_ServiceParameters()
# not last data fragment
pdata.PresentationDataValueList = [
[self.ID, pack('b', 0) + ii]]
pdatas.append(pdata)
pdata = P_DATA_ServiceParameters()
# last data fragment
pdata.PresentationDataValueList = [
[self.ID, pack('b', 2) + pdvs[-1]]]
pdatas.append(pdata)
return pdatas
def Decode(self, pdata):
"""Constructs itself receiving a series of P-DATA primitives.
Returns True when complete, False otherwise."""
if pdata.__class__ != P_DATA_ServiceParameters:
# not a pdata
return False
if pdata is None:
return False
ii = pdata
for vv in ii.PresentationDataValueList:
# must be able to read P-DATA with several PDVs
self.ID = vv[0]
if unpack('b', vv[1][0])[0] in (1, 3):
logger.debug(" command fragment %s", self.ID)
self.encoded_command_set += vv[1][1:]
if unpack('b', vv[1][0])[0] == 3:
logger.debug(" last command fragment %s", self.ID)
self.CommandSet = dsutils.decode(
self.encoded_command_set, self.ts.is_implicit_VR,
self.ts.is_little_endian)
self.__class__ = MessageType[
self.CommandSet[(0x0000, 0x0100)].value]
if self.CommandSet[(0x0000, 0x0800)].value == 0x0101:
# response: no dataset
return True
elif unpack('b', vv[1][0])[0] in (0, 2):
if self.DataSet is None:
self.DataSet = ''
self.DataSet += vv[1][1:]
logger.debug(" data fragment %s", self.ID)
if unpack('b', vv[1][0])[0] == 2:
logger.debug(" last data fragment %s", self.ID)
return True
else:
raise "Error"
return False
def SetLength(self):
# compute length
l = 0
for ii in self.CommandSet.values()[1:]:
l += len(dsutils.encode_element(ii,
self.ts.is_implicit_VR,
self.ts.is_little_endian))
# if self.DataSet<>None:
# l += len(self.DataSet)
self.CommandSet[(0x0000, 0x0000)].value = l
def __repr__(self):
return str(self.CommandSet) + '\n'
class C_ECHO_RQ_Message(DIMSEMessage):
CommandFields = [
('Group Length',
(0x0000, 0x0000), 'UL', 1),
('Affected SOP Class UID',
(0x0000, 0x0002), 'UI', 1),
('Command Field',
(0x0000, 0x0100), 'US', 1),
('Message ID',
(0x0000, 0x0110), 'US', 1),
('Data Set Type', (0x0000, 0x0800), 'US', 1)
]
DataField = None
def FromParams(self, params):
self.CommandSet[(0x0000, 0x0002)].value = params.AffectedSOPClassUID
self.CommandSet[(0x0000, 0x0100)].value = 0x0030
self.CommandSet[(0x0000, 0x0110)].value = params.MessageID
self.CommandSet[(0x0000, 0x0800)].value = 0x0101
self.DataSet = None
self.SetLength()
def ToParams(self):
tmp = C_ECHO_ServiceParameters()
tmp.MessageID = self.CommandSet[(0x0000, 0x0110)]
tmp.AffectedSOPClassUID = self.CommandSet[(0x0000, 0x0002)]
return tmp
class C_ECHO_RSP_Message(DIMSEMessage):
CommandFields = [
('Group Length',
(0x0000, 0x0000), 'UL', 1),
('Affected SOP Class UID',
(0x0000, 0x0002), 'UI', 1),
('Command Field',
(0x0000, 0x0100), 'US', 1),
('Message ID Being Responded To',
(0x0000, 0x0120), 'US', 1),
('Data Set Type',
(0x0000, 0x0800), 'US', 1),
('Status', (0x0000, 0x0900), 'US', 1)
]
DataField = None
def FromParams(self, params):
if params.AffectedSOPClassUID:
self.CommandSet[(0x0000, 0x0002)
].value = params.AffectedSOPClassUID
self.CommandSet[(0x0000, 0x0100)].value = 0x8030
self.CommandSet[(0x0000, 0x0120)
].value = params.MessageIDBeingRespondedTo
self.CommandSet[(0x0000, 0x0800)].value = 0x0101
self.CommandSet[(0x0000, 0x0900)].value = params.Status
self.SetLength()
def ToParams(self):
tmp = C_ECHO_ServiceParameters()
tmp.AffectedSOPClassUID = self.CommandSet[(0x0000, 0x0002)]
tmp.MessageIDBeingRespondedTo = self.CommandSet[(0x0000, 0x0120)]
tmp.Status = 0
return tmp
class C_STORE_RQ_Message(DIMSEMessage):
CommandFields = [
('Group Length',
(0x0000, 0x0000), 'UL', 1),
('Affected SOP Class UID',
(0x0000, 0x0002), 'UI', 1),
('Command Field',
(0x0000, 0x0100), 'US', 1),
('Message ID',
(0x0000, 0x0110), 'US', 1),
('Priority',
(0x0000, 0x0700), 'US', 1),
('Data Set Type',
(0x0000, 0x0800), 'US', 1),
('Affected SOP Instance UID',
(0x0000, 0x1000), 'UI', 1),
('Move Originator Application Entity Title',
(0x0000, 0x1030), 'AE', 1),
('Move Originator Message ID',
(0x0000, 0x1031), 'US', 1),
]
DataField = 'Data Set'
def FromParams(self, params):
self.CommandSet[(0x0000, 0x0002)].value = params.AffectedSOPClassUID
self.CommandSet[(0x0000, 0x0100)].value = 0x0001
self.CommandSet[(0x0000, 0x0110)].value = params.MessageID
self.CommandSet[(0x0000, 0x0700)].value = params.Priority
self.CommandSet[(0x0000, 0x0800)].value = 0x0001
self.CommandSet[(0x0000, 0x1000)].value = params.AffectedSOPInstanceUID
if params.MoveOriginatorApplicationEntityTitle:
self.CommandSet[(0x0000, 0x1030)].value = \
params.MoveOriginatorApplicationEntityTitle
else:
self.CommandSet[(0x0000, 0x1030)].value = ""
if params.MoveOriginatorMessageID:
self.CommandSet[(0x0000, 0x1031)
].value = params.MoveOriginatorMessageID
else:
self.CommandSet[(0x0000, 0x1031)].value = ""
self.DataSet = params.DataSet
self.SetLength()
def ToParams(self):
tmp = C_STORE_ServiceParameters()
tmp.AffectedSOPClassUID = self.CommandSet[(0x0000, 0x0002)]
tmp.AffectedSOPInstanceUID = self.CommandSet[(0x0000, 0x1000)]
tmp.Priority = self.CommandSet[(0x0000, 0x0700)]
tmp.DataSet = self.DataSet
tmp.MessageID = self.CommandSet[(0x0000, 0x0110)]
return tmp
class C_STORE_RSP_Message(DIMSEMessage):
CommandFields = [
('Group Length',
(0x0000, 0x0000), 'UL', 1),
('Affected SOP Class UID',
(0x0000, 0x0002), 'UI', 1),
('Command Field',
(0x0000, 0x0100), 'US', 1),
('Message ID Being Responded To',
(0x0000, 0x0120), 'US', 1),
('Data Set Type',
(0x0000, 0x0800), 'US', 1),
('Status',
(0x0000, 0x0900), 'US', 1),
('Affected SOP Instance UID', (0x0000, 0x1000), 'UI', 1)
]
def FromParams(self, params):
self.CommandSet[(0x0000, 0x0002)
].value = params.AffectedSOPClassUID.value
self.CommandSet[(0x0000, 0x0100)].value = 0x8001
self.CommandSet[(0x0000, 0x0120)
].value = params.MessageIDBeingRespondedTo.value
self.CommandSet[(0x0000, 0x0800)].value = 0x0101
self.CommandSet[(0x0000, 0x0900)].value = params.Status
self.CommandSet[(0x0000, 0x1000)
].value = params.AffectedSOPInstanceUID.value
self.DataSet = None
self.SetLength()
def ToParams(self):
tmp = C_STORE_ServiceParameters()
tmp.AffectedSOPClassUID = self.CommandSet[(0x0000, 0x0002)]
tmp.MessageIDBeingRespondedTo = self.CommandSet[(0x0000, 0x0120)]
tmp.Status = self.CommandSet[(0x0000, 0x0900)]
tmp.AffectedSOPInstanceUID = self.CommandSet[(0x0000, 0x1000)]
tmp.DataSet = self.DataSet
return tmp
class C_FIND_RQ_Message(DIMSEMessage):
CommandFields = [
('Group Length',
(0x0000, 0x0000), 'UL', 1),
('Affected SOP Class UID',
(0x0000, 0x0002), 'UI', 1),
('Command Field',
(0x0000, 0x0100), 'US', 1),
('Message ID',
(0x0000, 0x0110), 'US', 1),
('Data Set Type',
(0x0000, 0x0800), 'US', 1),
('Priority',
(0x0000, 0x0700), 'US', 1),
]
DataField = 'Identifier'
def FromParams(self, params):
self.CommandSet[(0x0000, 0x0002)].value = params.AffectedSOPClassUID
self.CommandSet[(0x0000, 0x0100)].value = 0x0020
self.CommandSet[(0x0000, 0x0110)].value = params.MessageID
self.CommandSet[(0x0000, 0x0700)].value = params.Priority
self.CommandSet[(0x0000, 0x0800)].value = 0x0001
self.DataSet = params.Identifier
self.SetLength()
def ToParams(self):
tmp = C_FIND_ServiceParameters()
tmp.AffectedSOPClassUID = self.CommandSet[(0x0000, 0x0002)]
tmp.Priority = self.CommandSet[(0x0000, 0x0700)]
tmp.Identifier = self.DataSet
tmp.MessageID = self.CommandSet[(0x0000, 0x0110)]
return tmp
class C_FIND_RSP_Message(DIMSEMessage):
CommandFields = [
('Group Length',
(0x0000, 0x0000), 'UL', 1),
('Affected SOP Class UID',
(0x0000, 0x0002), 'UI', 1),
('Command Field',
(0x0000, 0x0100), 'US', 1),
('Message ID Being Responded To',
(0x0000, 0x0120), 'US', 1),
('Data Set Type',
(0x0000, 0x0800), 'US', 1),
('Status',
(0x0000, 0x0900), 'US', 1),
]
DataField = 'Identifier'
def FromParams(self, params):
self.CommandSet[(0x0000, 0x0002)
].value = params.AffectedSOPClassUID.value
self.CommandSet[(0x0000, 0x0100)].value = 0x8020
self.CommandSet[(0x0000, 0x0120)
].value = params.MessageIDBeingRespondedTo.value
if not params.Identifier:
self.CommandSet[(0x0000, 0x0800)].value = 0x0101
else:
self.CommandSet[(0x0000, 0x0800)].value = 0x000
self.CommandSet[(0x0000, 0x0900)].value = params.Status
self.DataSet = params.Identifier
self.SetLength()
def ToParams(self):
tmp = C_FIND_ServiceParameters()
tmp.AffectedSOPClassUID = self.CommandSet[(0x0000, 0x0002)]
tmp.MessageIDBeingRespondedTo = self.CommandSet[(0x0000, 0x0120)]
tmp.Status = self.CommandSet[(0x0000, 0x0900)]
tmp.Identifier = self.DataSet
return tmp
class C_GET_RQ_Message(DIMSEMessage):
CommandFields = [
('Group Length',
(0x0000, 0x0000), 'UL', 1),
('Affected SOP Class UID',
(0x0000, 0x0002), 'UI', 1),
('Command Field',
(0x0000, 0x0100), 'US', 1),
('Message ID',
(0x0000, 0x0110), 'US', 1),
('Priority',
(0x0000, 0x0700), 'US', 1),
('Data Set Type',
(0x0000, 0x0800), 'US', 1),
]
DataField = 'Identifier'
def FromParams(self, params):
self.CommandSet[(0x0000, 0x0002)].value = params.AffectedSOPClassUID
self.CommandSet[(0x0000, 0x0100)].value = 0x0010
self.CommandSet[(0x0000, 0x0110)].value = params.MessageID
self.CommandSet[(0x0000, 0x0700)].value = params.Priority
self.CommandSet[(0x0000, 0x0800)].value = 0x0001
self.DataSet = params.Identifier
self.SetLength()
def ToParams(self):
tmp = C_GET_ServiceParameters()
tmp.MessageID = self.CommandSet[(0x0000, 0x0110)]
tmp.AffectedSOPClassUID = self.CommandSet[(0x0000, 0x0002)]
tmp.Priority = self.CommandSet[(0x0000, 0x0700)]
tmp.Identifier = self.DataSet
return tmp
class C_GET_RSP_Message(DIMSEMessage):
CommandFields = [
('Group Length',
(0x0000, 0x0000), 'UL', 1),
('Affected SOP Class UID',
(0x0000, 0x0002), 'UI', 1),
('Command Field',
(0x0000, 0x0100), 'US', 1),
('Message ID Being Responded To',
(0x0000, 0x0120), 'US', 1),
('Data Set Type',
(0x0000, 0x0800), 'US', 1),
('Status',
(0x0000, 0x0900), 'US', 1),
('Number of Remaining Sub-operations',
(0x0000, 0x1020), 'US', 1),
('Number of Complete Sub-operations',
(0x0000, 0x1021), 'US', 1),
('Number of Failed Sub-operations', (0x0000, 0x1022), 'US', 1),
('Number of Warning Sub-operations',
(0x0000, 0x1023), 'US', 1),
]
DataField = 'Identifier'
def FromParams(self, params):
self.CommandSet[(0x0000, 0x0002)].value = params.AffectedSOPClassUID
self.CommandSet[(0x0000, 0x0100)].value = 0x8010
self.CommandSet[(0x0000, 0x0120)
].value = params.MessageIDBeingRespondedTo
self.CommandSet[(0x0000, 0x0800)].value = 0x0101
self.CommandSet[(0x0000, 0x0900)].value = params.Status
self.CommandSet[(0x0000, 0x1020)
].value = params.NumberOfRemainingSubOperations
self.CommandSet[(0x0000, 0x1021)
].value = params.NumberOfCompletedSubOperations
self.CommandSet[(0x0000, 0x1022)
].value = params.NumberOfFailedSubOperations
self.CommandSet[(0x0000, 0x1023)
].value = params.NumberOfWarningSubOperations
self.SetLength()
def ToParams(self):
tmp = C_GET_ServiceParameters()
tmp.AffectedSOPClassUID = self.CommandSet[(0x0000, 0x0002)]
tmp.MessageIDBeingRespondedTo = self.CommandSet[(0x0000, 0x0120)]
tmp.Status = self.CommandSet[(0x0000, 0x0900)]
try:
tmp.NumberOfRemainingSubOperations = self.CommandSet[
(0x0000, 0x1020)]
except:
pass
tmp.NumberOfCompletedSubOperations = self.CommandSet[(0x0000, 0x1021)]
tmp.NumberOfFailedSubOperations = self.CommandSet[(0x0000, 0x1022)]
tmp.NumberOfWarningSubOperations = self.CommandSet[(0x0000, 0x1023)]
tmp.Identifier = self.DataSet
return tmp
class C_MOVE_RQ_Message(DIMSEMessage):
CommandFields = [
('Group Length',
(0x0000, 0x0000), 'UL', 1),
('Affected SOP Class UID',
(0x0000, 0x0002), 'UI', 1),
('Command Field',
(0x0000, 0x0100), 'US', 1),
('Message ID',
(0x0000, 0x0110), 'US', 1),
('Priority',
(0x0000, 0x0700), 'US', 1),
('Data Set Type',
(0x0000, 0x0800), 'US', 1),
('Move Destination',
(0x0000, 0x0600), 'AE', 1),
]
DataField = 'Identifier'
def FromParams(self, params):
self.CommandSet[(0x0000, 0x0002)].value = params.AffectedSOPClassUID
self.CommandSet[(0x0000, 0x0100)].value = 0x0021
self.CommandSet[(0x0000, 0x0110)].value = params.MessageID
self.CommandSet[(0x0000, 0x0700)].value = params.Priority
self.CommandSet[(0x0000, 0x0800)].value = 0x0001
self.CommandSet[(0x0000, 0x0600)].value = params.MoveDestination
self.DataSet = params.Identifier
self.SetLength()
def ToParams(self):
tmp = C_MOVE_ServiceParameters()
tmp.MessageID = self.CommandSet[(0x0000, 0x0110)]
tmp.AffectedSOPClassUID = self.CommandSet[(0x0000, 0x0002)]
tmp.Priority = self.CommandSet[(0x0000, 0x0700)]
tmp.MoveDestination = self.CommandSet[(0x0000, 0x0600)]
tmp.Identifier = self.DataSet
return tmp
class C_MOVE_RSP_Message(DIMSEMessage):
CommandFields = [
('Group Length',
(0x0000, 0x0000), 'UL', 1),
('Affected SOP Class UID',
(0x0000, 0x0002), 'UI', 1),
('Command Field',
(0x0000, 0x0100), 'US', 1),
('Message ID Being Responded To',
(0x0000, 0x0120), 'US', 1),
('Data Set Type',
(0x0000, 0x0800), 'US', 1),
('Status',
(0x0000, 0x0900), 'US', 1),
('Number of Remaining Sub-operations',
(0x0000, 0x1020), 'US', 1),
('Number of Complete Sub-operations',
(0x0000, 0x1021), 'US', 1),
('Number of Failed Sub-operations', (0x0000, 0x1022), 'US', 1),
('Number of Warning Sub-operations',
(0x0000, 0x1023), 'US', 1),
]
DataField = 'Identifier'
def FromParams(self, params):
self.CommandSet[(0x0000, 0x0002)].value = params.AffectedSOPClassUID
self.CommandSet[(0x0000, 0x0100)].value = 0x8021
self.CommandSet[(0x0000, 0x0120)
].value = params.MessageIDBeingRespondedTo
self.CommandSet[(0x0000, 0x0800)].value = 0x0101
self.CommandSet[(0x0000, 0x0900)].value = params.Status
self.CommandSet[(0x0000, 0x1020)
].value = params.NumberOfRemainingSubOperations
self.CommandSet[(0x0000, 0x1021)
].value = params.NumberOfCompletedSubOperations
self.CommandSet[(0x0000, 0x1022)
].value = params.NumberOfFailedSubOperations
self.CommandSet[(0x0000, 0x1023)
].value = params.NumberOfWarningSubOperations
self.SetLength()
def ToParams(self):
tmp = C_MOVE_ServiceParameters()
tmp.AffectedSOPClassUID = self.CommandSet[(0x0000, 0x0002)]
tmp.MessageIDBeingRespondedTo = self.CommandSet[(0x0000, 0x0120)]
tmp.Status = self.CommandSet[(0x0000, 0x0900)]
try:
tmp.NumberOfRemainingSubOperations = self.CommandSet[
(0x0000, 0x1020)]
except:
pass
tmp.NumberOfCompletedSubOperations = self.CommandSet[(0x0000, 0x1021)]
tmp.NumberOfFailedSubOperations = self.CommandSet[(0x0000, 0x1022)]
tmp.NumberOfWarningSubOperations = self.CommandSet[(0x0000, 0x1023)]
tmp.Identifier = self.DataSet
return tmp
class C_CANCEL_RQ_Message(DIMSEMessage):
CommandFields = [
('Group Length',
(0x0000, 0x0000), 'UL', 1),
('Command Field',
(0x0000, 0x0100), 'US', 1),
('Message ID Being Responded To',
(0x0000, 0x0120), 'US', 1),
('Data Set Type',
(0x0000, 0x0800), 'US', 1),
]
DataField = 'Identifier'
def FromParams(self, params):
self.CommandSet[(0x0000, 0x0100)].value = 0x0FFF
self.CommandSet[(0x0000, 0x0120)
].value = params.MessageIDBeingRespondedTo
self.CommandSet[(0x0000, 0x0800)].value = 0x0101
self.SetLength()
class C_CANCEL_FIND_RQ_Message(C_CANCEL_RQ_Message):
def ToParams(self):
tmp = C_Find_ServiceParameters()
tmp.MessageIDBeingRespondedTo = self.CommandSet[(0x0000, 0x0120)]
return tmp
class C_CANCEL_GET_RQ_Message(C_CANCEL_RQ_Message):
def ToParams(self):
tmp = C_Get_ServiceParameters()
tmp.MessageIDBeingRespondedTo = self.CommandSet[(0x0000, 0x0120)]
return tmp
class C_CANCEL_MOVE_RQ_Message(C_CANCEL_RQ_Message):
def ToParams(self):
tmp = C_Move_ServiceParameters()
tmp.MessageIDBeingRespondedTo = self.CommandSet[(0x0000, 0x0120)]
return tmp
MessageType = {
0x0001: C_STORE_RQ_Message,
0x8001: C_STORE_RSP_Message,
0x0020: C_FIND_RQ_Message,
0x8020: C_FIND_RSP_Message,
0x0FFF: C_CANCEL_RQ_Message,
0x0010: C_GET_RQ_Message,
0x8010: C_GET_RSP_Message,
0x0021: C_MOVE_RQ_Message,
0x8021: C_MOVE_RSP_Message,
0x0030: C_ECHO_RQ_Message,
0x8030: C_ECHO_RSP_Message
}
if __name__ == '__main__':
c = C_ECHO_ServiceParameters()
c.MessageID = 0
c.AffectedSOPClassUID = '12.1232.23.123.231.'
C_ECHO_msg = C_ECHO_RQ_Message()
C_ECHO_msg.FromParams(c)
print C_ECHO_msg
print C_ECHO_msg.ToParams()
print C_ECHO_msg.Encode(1, 100)
|
cancan101/pynetdicom
|
netdicom/DIMSEmessages.py
|
Python
|
mit
| 25,889
|
from appconf import AppConf
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
class ImageKitConf(AppConf):
CACHEFILE_NAMER = 'imagekit.cachefiles.namers.hash'
SPEC_CACHEFILE_NAMER = 'imagekit.cachefiles.namers.source_name_as_path'
CACHEFILE_DIR = 'CACHE/images'
DEFAULT_CACHEFILE_BACKEND = 'imagekit.cachefiles.backends.Simple'
DEFAULT_CACHEFILE_STRATEGY = 'imagekit.cachefiles.strategies.JustInTime'
DEFAULT_FILE_STORAGE = None
CACHE_BACKEND = None
CACHE_PREFIX = 'imagekit:'
CACHE_TIMEOUT = None
USE_MEMCACHED_SAFE_CACHE_KEY = True
def configure_cache_backend(self, value):
if value is None:
from django.core.cache import DEFAULT_CACHE_ALIAS
return DEFAULT_CACHE_ALIAS
if value not in settings.CACHES:
raise ImproperlyConfigured("{0} is not present in settings.CACHES".format(value))
return value
def configure_cache_timeout(self, value):
if value is None and settings.DEBUG:
# If value is not configured and is DEBUG set it to 5 minutes
return 300
# Otherwise leave it as is. If it is None then valies will never expire
return value
def configure_default_file_storage(self, value):
if value is None:
value = settings.DEFAULT_FILE_STORAGE
return value
|
wuga214/Django-Wuga
|
env/lib/python2.7/site-packages/imagekit/conf.py
|
Python
|
apache-2.0
| 1,390
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, re, time, sys
from bs4 import BeautifulSoup
from pymongo import MongoClient, DESCENDING
import avhandle
import common
reload(sys)
#print sys.getdefaultencoding()
sys.setdefaultencoding('utf-8')
print sys.getdefaultencoding()
class MyMongodb(object):
"""docstring for MyMongodb"""
def __init__(self, db_name,collection_name):
super( MyMongodb, self).__init__()
self.client=MongoClient('127.0.0.1',27017)
self.db=self.client[db_name]
self.collection=self.db[collection_name]
def jsonsave(self,json):
data = dict(json)
# 向指定的表里添加数据
self.collection.insert(data)
def find_dup_id(self):
dup=self.collection.aggregate([{"$group": {"_id": "$code","count": {"$sum": 1 } } },{"$match": {"count": {"$gt": 1 } } }] )
dup_id=[]
for x in dup:
#print x
dup_id.append(x["_id"])
#print dup_id
return dup_id
def remove_dup(self):
dup=self.find_dup_id()
for x in self.collection.find({"code":{"$in":dup}}).sort([("name", 1)]):
print str(x["_id"])+"\t"+x["code"]
#input src dict
#output dup file dict
#return tuple :(code,filename)
def find_dup(self,dict_files):
id_list=dict_files.keys()
#value_list= dict_files.values()
dups=[]
for x in self.collection.find({"code":{"$in":id_list}}).sort([("code", 1)]):
print x["disk"] +"\t"+x["name"]
dups.append((x["code"],x["disk"],x["name"],dict_files[x["code"]]))
return dups
#for d in dup:
# for x in db.jav.find({"code":d}):
# print x["disk"] +"\t"+x["name"]
def addtodb(self,slist):
for vid,vname,cast,vdate,score in slist:
try:
self.collection.insert({"code":vid,"name":vname,"cast":cast,"date":vdate,"score":score})
except Exception,e:
print e
def txtstore_addtodb(self,slist):
for vid,vname,disk in slist:
try:
self.collection.insert({"code":vid,"name":vname,"disk":disk})
except Exception,e:
print e
def update_one(self,id,fullname,url):
#db.jav.update({"code":id}, {"$inc":{"age":10}}, multi=True) # update users set age = age + 10
u1 = self.collection.find_one({"code":id})
u1['fullname'] = fullname
u1['url'] = url
self.collection.save(u1)
def update_multi(self,id,name,cast,vdate,score):
self.collection.update({"code":id}, {"$set":{"fullname":name,"cast":cast,"date":vdate,"score":score}},upsert=True, multi=True) # update jav set url ="new url1"
def query_like(self,val):
for x in self.collection.find({"fullname":{"$regex": val}}):
try:
print x["fullname"]
except Exception,e:
print e
'''
db.jav.aggregate([{ $group: {_id: "$code",count: { $sum: 1 } } }, { $match: { count: { $gt: 1 } } }] )
db.jav.find({"fullname":/本田/})
db.jav.find().count()
db.jav.find({"code":{"$in":["soe385","jux959", "soe386"]}})
db.jav.find( { "fullname": { $exists: true } } ).count()
'''
def findmv(self):
while True:
print "please input search keyword:\n"
val=raw_input()
#query_like(u"本田")
query_like(val.decode("gbk").encode("utf-8"))
def update_fullname(self,path):
files=[x for x in os.listdir(path) if all([os.path.splitext(x)[1]=='.txt', not os.path.isdir(path+"\\"+x)])]
# txtfile=[f for f in files if os.path.splitext(f)[1]=='.txt']
store=[]
for txtfile in files:
for line in open(path+"/"+txtfile):
info=line.split("\t")
vid=common.format_rule2(info[2].strip())
name=info[2].strip()
cast=info[3].strip()
vdate=info[4].strip()
if u"识别码搜寻结果" in name:
print name.encode("gbk")
else:
store.append((vid,name,cast,vdate))
print len(store)
for a,b,c,d in store:
self.collection.update({"code":a}, {"$set":{"fullname":b,"cast":c,"date":d}},upsert=True, multi=True) # update jav set url ="new url1"
#目录下已经在mgdb存在的文件
def find_path_dup_from_mgdb(self,path):
#src_files=[avhandle.format2(x):x for x in os.listdir(path) if not os.path.isdir(path+"\\"+x)]
#生成({code:name})字典
src_files=dict((common.format_rule2(x), x) for x in os.listdir(path.decode("utf-8")) if not os.path.isdir(path.decode("utf-8")+"\\"+x))
#print src_files
dups=self.find_dup(src_files)
savefile=path+"\\dup.txt"
with open(savefile,"w") as fs:
#获取重复的文件名
for code,disk,name,src_filename in dups:
fs.write('move "%s" c:\\tmp \n'%src_filename)
fs.write('rem %s \t %s \t %s \n'%(code,disk,name))
print "save found dup file done!",savefile
if __name__ == '__main__' :
TXT_STORE_PATH="d:\\avstore\\"
TXT_INFO_PATH="d:\\avinfo\\"
mm=MyMongodb("mv","jav")
#增加新片到库
des=avhandle.walk_txtstore_file(u"G:\\",only_code=False)
print len(des)
print des
mm.txtstore_addtodb(des)
#update_fullname(TXT_INFO_PATH)
#for a,b,c,d,e in des:
# update_multi(a,b,c,d,e)
#for a,b,c in des:
# update_multi(a,b,c)
#print "update done!"
#查找目录下已经存在的片子。与mgdb库比较
#find_path_dup_from_mgdb("d:\\torrents")
#mm.find_path_dup_from_mgdb("G:\\acv")
#查找已存在的片子,
#1,先根据javlib获取全部片子,并保存在txt文件里
#2,通过读取txt获取影片名称
#3,影片名称传给find_dup进行mongodb查找,得到disk位置
#mvname=avhandle.get_mvname_from_txt(u"D:\\avstore\\cast_info\\奥田咲.txt")
#mvname=avhandle.get_mvname_from_txt(u"D:\\avstore\\cast_info\\三島奈津子.txt")
#mvname=avhandle.get_mvname_from_txt(u"D:\\avstore\\cast_info\\三好亚矢.txt")
#print len(mvname)
#find_dup(mvname)
#update_multi("pppd413")
#findmv()
#arrange_txt()
#remove_dup()
|
dannywxh/mypy
|
MyPys/mg.py
|
Python
|
apache-2.0
| 6,880
|
import os
import locale
from pprint import pprint
from django.core.management.base import BaseCommand
from simplecart.currencies.models import Locale
LANGINFO_PROPERTIES = [
'codeset',
'd_t_fmt',
'd_fmt',
't_fmt',
't_fmt_ampm',
'day_1',
'day_2',
'day_3',
'day_4',
'day_5',
'day_6',
'day_7',
'abday_1',
'abday_2',
'abday_3',
'abday_4',
'abday_5',
'abday_6',
'abday_7',
'mon_1',
'mon_2',
'mon_3',
'mon_4',
'mon_5',
'mon_6',
'mon_7',
'mon_8',
'mon_9',
'mon_10',
'mon_11',
'mon_12',
'abmon_1',
'abmon_2',
'abmon_3',
'abmon_4',
'abmon_5',
'abmon_6',
'abmon_7',
'abmon_8',
'abmon_9',
'abmon_10',
'abmon_11',
'abmon_12',
'radixchar',
'thousep',
'yesexpr',
'noexpr',
'crncystr',
'era',
'era_year',
'era_d_t_fmt',
'era_d_fmt',
'alt_digits',
]
def encode(val, encoding):
if not isinstance(val, (str, unicode)):
return val
if isinstance(val, str):
val = unicode(val, encoding)
else:
val = unicode(val)
return unicode(val.encode('utf8', 'ignore'), 'utf8')
class Command(BaseCommand):
help="""Scrapes the locale info from the current system (*nix only!)
"""
def execute(self, *args,**options):
for line in os.popen('locale -a').read().split('\n'):
add_name = False
line = line.strip()
if not line or '.' not in line:
continue
name, encoding = line.split('.')
try:
unicode(name, encoding)
except LookupError:
#not an encoding that is recognized
continue
locale.setlocale(locale.LC_ALL, line)
info = locale.localeconv()
try:
unicode(info['currency_symbol'], encoding)
except (UnicodeDecodeError, LookupError):
continue
print line
pprint(info)
try:
new_locale = Locale.objects.get(name=name)
except Locale.DoesNotExist:
new_locale = Locale(name=name)
for key, value in info.items():
if isinstance(value, list):
value = u','.join([unicode(val) for val in value])
else:
value = encode(value, encoding)
setattr(new_locale, key, value)
langinfo_data = dict()
for key in LANGINFO_PROPERTIES:
lang_key = key.upper()
if hasattr(locale, lang_key):
lang_key = getattr(locale, lang_key)
value = locale.nl_langinfo(lang_key)
try:
value = encode(value, encoding)
except UnicodeDecodeError:
print 'Error encoding:', key
continue
langinfo_data[key] = value
setattr(new_locale, key, value)
else:
print "lang_key not found:", lang_key
pprint(langinfo_data)
new_locale.save()
|
cuker/django-localedb
|
localedb/management/commands/loadlocale.py
|
Python
|
bsd-3-clause
| 3,242
|
from sklearn2sql_heroku.tests.classification import generic as class_gen
class_gen.test_model("RidgeClassifier" , "BinaryClass_100" , "oracle")
|
antoinecarme/sklearn2sql_heroku
|
tests/classification/BinaryClass_100/ws_BinaryClass_100_RidgeClassifier_oracle_code_gen.py
|
Python
|
bsd-3-clause
| 146
|
""" Form widget classes """
from django.conf import settings
from django.core.urlresolvers import reverse
from django.forms.utils import flatatt
from django.forms.widgets import CheckboxInput
from django.utils.encoding import force_text
from django.utils.html import format_html
from django.utils.translation import ugettext as _
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
class TermsOfServiceCheckboxInput(CheckboxInput):
""" Renders a checkbox with a label linking to the terms of service. """
def render(self, name, value, attrs=None):
final_attrs = self.build_attrs(attrs, type='checkbox', name=name)
if self.check_test(value):
final_attrs['checked'] = 'checked'
if not (value is True or value is False or value is None or value == ''):
# Only add the 'value' attribute if a value is non-empty.
final_attrs['value'] = force_text(value)
# Translators: link_start and link_end are HTML tags for a link to the terms of service.
# platform_name is the name of this Open edX installation.
label = _('I, and my company, accept the {link_start}{platform_name} API Terms of Service{link_end}.').format(
platform_name=configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME),
link_start='<a href="{url}" target="_blank">'.format(url=reverse('api_admin:api-tos')),
link_end='</a>',
)
html = u'<input{{}} /> <label class="tos-checkbox-label" for="{id}">{label}</label>'.format(
id=final_attrs['id'],
label=label
)
return format_html(html, flatatt(final_attrs))
|
TheMOOCAgency/edx-platform
|
openedx/core/djangoapps/api_admin/widgets.py
|
Python
|
agpl-3.0
| 1,705
|
import numpy as np
from ittk import mutual_information
def test_mutual_information():
x = np.array([7, 7, 7, 3])
y = np.array([0, 1, 2, 3])
mut_inf = mutual_information(x, y)
assert mut_inf == 0.8112781244591329
x2 = [1, 0, 1, 1, 0]
y2 = [1, 1, 1, 0, 0]
mut_inf_two = mutual_information(x2, y2)
assert mut_inf_two == 0.01997309402197492
|
MaxwellRebo/ittk
|
tests/mut_inf_tests.py
|
Python
|
mit
| 371
|
#!/usr/bin/env python3
import requests
frames = []
data = b""
found_first = False
response = requests.get("http://navigation.local:8080/stream/video.h264", stream=True)
for chunk in response.iter_content(chunk_size=1024):
if chunk:
starting_offset = len(data)
if starting_offset >= 2:
if data[-1] == b"\x00":
print("last byte is zero, backing up one")
starting_offset -= 1
if data[-2] == b"\x00":
print("second to last byte is zero, backing up one more")
starting_offset -= 1
data = data + chunk
offset = data.find(b"\x00\x00\x01", starting_offset)
if offset != -1:
print("found frame")
remaining = data[offset:]
if not found_first:
print("dropping partial first frame")
found_first = True
else:
print("adding frame", len(frames) + 1)
frames.append(data[:offset])
if len(frames) == 120:
break
data = remaining
with open("navigation.h264", "wb") as out:
out.write(b"\x00")
for frame in frames:
out.write(frame)
|
gizmo-cda/g2x-submarine-v2
|
scripts/video/get_stream.py
|
Python
|
bsd-3-clause
| 1,243
|
# -*- coding: UTF-8 -*-
__revision__ = '$Id$'
# Copyright (c) 2011 Ivo Nunes
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
# You may use and distribute this software under the terms of the
# GNU General Public License, version 2 or later
import gutils
import movie
import string
import re
import urllib
plugin_name = "IMDb-pt"
plugin_description = "Internet Movie Database Portuguese"
plugin_url = "www.imdb.pt"
plugin_language = _("Portuguese")
plugin_author = "Ivo Nunes"
plugin_author_email = "<netherblood@gmail.com>"
plugin_version = "0.1"
class Plugin(movie.Movie):
def __init__(self, id):
self.encode ='iso-8859-1'
self.movie_id = id
self.url = "http://www.imdb.pt/title/tt" + str(self.movie_id)
def initialize(self):
self.page = gutils.convert_entities(self.page)
self.cast_page = self.open_page(url=self.url + '/fullcredits')
self.cast_page = gutils.convert_entities(self.cast_page)
self.plot_page = self.open_page(url=self.url + '/plotsummary')
self.plot_page = gutils.convert_entities(self.plot_page)
def get_image(self):
self.image_url = gutils.trim(self.page, u'src="http://ia.media-imdb.com/images/', u'.jpg" /></a>')
self.image_url = "http://ia.media-imdb.com/images/" + self.image_url + ".jpg"
def get_title(self):
self.title = gutils.trim(self.page, u'<title>', u' (')
self.title = self.title.encode(self.encode)
def get_o_title(self):
self.o_title = gutils.trim(self.page, u'Conhecido Como:</h5><div class="info-content">"', u'"')
self.o_title = self.o_title.encode(self.encode)
def get_director(self):
self.director = gutils.trim(self.page, u'<h5>Diretor:</h5>', u'</a><br/>')
self.director = gutils.strip_tags(self.director)
def get_plot(self):
self.plot = gutils.trim(self.plot_page, u'<div id="swiki.2.1">', u'</div>')
self.plot = gutils.strip_tags(self.plot)
self.plot = self.plot.encode(self.encode)
def get_year(self):
self.year = gutils.trim(self.page, u' (', u')</title>')
def get_runtime(self):
self.runtime = gutils.trim(self.page, u'<h5>Duração:</h5><div class="info-content">', u' min')
self.runtime = self.runtime.encode(self.encode)
def get_genre(self):
self.genre = gutils.trim(self.page, u'<h5>Gênero:</h5>', u'</div>')
self.genre = gutils.strip_tags(self.genre)
self.genre = string.replace(self.genre, " | ", ", ")
self.genre = self.genre.encode(self.encode)
def get_cast(self):
self.cast = ''
self.cast = gutils.trim(self.cast_page, '<table class="cast">', '</table>')
if self.cast == '':
self.cast = gutils.trim(self.page, '<table class="cast">', '</table>')
self.cast = string.replace(self.cast, ' ... ', _(' como ').encode('utf8'))
self.cast = string.replace(self.cast, '...', _(' como ').encode('utf8'))
self.cast = string.replace(self.cast, '</tr><tr>', "\n")
self.cast = re.sub('</tr>[ \t]*<tr[ \t]*class="even">', "\n", self.cast)
self.cast = re.sub('</tr>[ \t]*<tr[ \t]*class="odd">', "\n", self.cast)
self.cast = self.__before_more(self.cast)
self.cast = re.sub('[ ]+', ' ', self.cast)
def get_classification(self):
self.classification = gutils.trim(self.page, u'<h5>Certificação:</h5><div class="info-content">', u'</div>')
self.classification = gutils.strip_tags(self.classification)
self.classification = string.replace(self.classification, " | ", ", ")
self.classification = self.classification.encode(self.encode)
def get_studio(self):
self.studio = gutils.trim(self.page, u'<h5>Companhia :</h5><div class="info-content">', u'Exibir mais</a>')
self.studio = gutils.strip_tags(self.studio)
self.studio = self.studio.encode(self.encode)
def get_o_site(self):
self.o_site = ""
def get_site(self):
self.site = self.url
def get_trailer(self):
self.trailer = "http://www.imdb.com/title/" + str(self.movie_id) + "/trailers"
def get_country(self):
self.country = gutils.trim(self.page, u'<h5>País:</h5><div class="info-content">', '</div>')
self.country = string.replace(self.country, " | ", ", ")
self.country = self.country.encode(self.encode)
def get_notes(self):
self.notes = ''
def get_rating(self):
self.rating = gutils.trim(self.page, u'<div class="starbar-meta">', '/10')
self.rating = gutils.strip_tags(self.rating)
self.rating = string.replace(self.rating, ",", ".")
if self.rating:
self.rating = float(self.rating)
self.rating = round(self.rating)
def get_screenplay(self):
self.screenplay = ''
parts = re.split('<a href=', gutils.trim(self.cast_page, u'>Créditos como roteirista<', '</table>'))
if len(parts) > 1:
for part in parts[1:]:
screenplay = gutils.trim(part, '>', '<')
if screenplay == 'WGA':
continue
screenplay = screenplay.replace(' (escrito por)', '')
screenplay = screenplay.replace(' and<', '<')
self.screenplay = self.screenplay + screenplay + ', '
if len(self.screenplay) > 2:
self.screenplay = self.screenplay[0:len(self.screenplay) - 2]
def get_cameraman(self):
self.cameraman = string.replace('<' + gutils.trim(self.cast_page, u'>Direção de Fotografia de<', '</table>'), u'(diretor de fotografia) ', '')
def __before_more(self, data):
for element in [u'>Exibir mais<', '>Full summary<', '>Full synopsis<']:
tmp = string.find(data, element)
if tmp>0:
data = data[:tmp] + '>'
return data
class SearchPlugin(movie.SearchMovie):
PATTERN = re.compile(r"""<a href=['"]/title/tt([0-9]+)/[^>]+[>](.*?)</td>""")
def __init__(self):
self.original_url_search = 'http://www.imdb.pt/find?s=tt&q='
self.translated_url_search = 'http://www.imdb.pt/find?s=tt&q='
self.encode = 'utf8'
def search(self, parent_window):
"""Perform the web search"""
if not self.open_search(parent_window):
return None
return self.page
def get_searches(self):
"""Try to find both id and film title for each search result"""
elements = string.split(self.page, '<tr')
if len(elements):
for element in elements[1:]:
match = self.PATTERN.findall(element)
if len(match) > 1:
tmp = re.sub('^[0-9]+[.]', '', gutils.clean(match[1][1]))
self.ids.append(match[1][0])
self.titles.append(tmp)
|
santiavenda2/griffith
|
lib/plugins/movie/PluginMovieIMDB-pt.py
|
Python
|
gpl-2.0
| 7,570
|
"""apple URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
|
tejasjadhav/django-scheduler
|
examples/basic/apple/urls.py
|
Python
|
gpl-3.0
| 747
|
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import os
import shutil
import sys
import tempfile
try:
from unittest.mock import patch
except ImportError:
from mock import patch
from jupyter_core import paths as jpaths
from IPython import paths as ipaths
from ipykernel.kernelspec import install
pjoin = os.path.join
tmp = None
patchers = []
def setup():
"""setup temporary env for tests"""
global tmp
tmp = tempfile.mkdtemp()
patchers[:] = [
patch.dict(os.environ, {
'HOME': tmp,
# Let tests work with --user install when HOME is changed:
'PYTHONPATH': os.pathsep.join(sys.path),
}),
]
for p in patchers:
p.start()
# install IPython in the temp home:
install(user=True)
def teardown():
for p in patchers:
p.stop()
try:
shutil.rmtree(tmp)
except (OSError, IOError):
# no such file
pass
|
unnikrishnankgs/va
|
venv/lib/python3.5/site-packages/ipykernel/tests/__init__.py
|
Python
|
bsd-2-clause
| 998
|
def is_animated(commands):
return any(filter(lambda tup: tup[0] in {'frames', 'vary', 'basename'}, commands))
def num_frames(commands):
for cmd in commands:
if cmd[0] == 'frames':
return cmd[1]
else:
raise AttributeError('Please specify the number of frames using the following command: frames <number>')
def get_basename(commands):
for cmd in commands:
if cmd[0] == 'basename':
return cmd[1]
else:
raise AttributeError('Please specify the filename prefix using the following command: basename <prefix>')
def make_knobs(commands, frames):
# Truncated `vary` commands
vcmds = [cmd[1:] for cmd in commands if cmd[0] == 'vary']
# Dict of arrays of knob values
knobs = {knob: [float('nan')] * frames for knob in [t[0] for t in vcmds]}
# Set the knob values
for knob, t0, t1, x0, x1 in vcmds:
# We allow t1 to be the length so that some animations involving rotations can be done smoothly
if 0 <= t0 < t1 <= frames:
x = knobs[knob]
for t in range(t0, min(t1 + 1, frames)):
# Derived from point-slope form
x[t] = x0 + (float(x1) - x0) * (t - t0) / (t1 - t0)
elif t0 >= t1:
raise ValueError('You inserted the first and last frame numbers backwards!')
else:
raise ValueError('First and last frame numbers out of bounds: %d, %d. Total number of frames: %d' % (frame0, frame1, frames))
# After looping
return knobs
|
aidan-fitz/line-eyes
|
animate.py
|
Python
|
bsd-3-clause
| 1,529
|
#!/usr/bin/env python
# mock_brew.py - Emulate brew using mock
#
import sys
import argparse
from koji_mock import KojiMock
def main():
brew_tag = 'ruby193-satellite-6.1.0-rhel-7-build'
args = parse_args()
mock = KojiMock(tag=brew_tag)
out = mock.rebuild(
src_rpm=args.srpm,
define="scl ruby193",
resultdir=args.resultdir,
)
print out
def parse_args():
"""Parse arguments passed to this program
:returns: The parsed arguments
:rtype: argparse.Namespace
"""
parser = argparse.ArgumentParser()
parser.add_argument('srpm', help='Path to the .src.rpm file to buid')
parser.add_argument('--resultdir', help='Where to place build results')
return parser.parse_args()
if __name__ == '__main__':
main()
|
kbidarkar/robottelo-ci
|
lib/python/mock_brew.py
|
Python
|
gpl-3.0
| 786
|
import os
import datetime
from flask.ext.script import Manager, prompt_bool
from flask.ext.migrate import Migrate, MigrateCommand
from formspree import create_app, app, settings
from formspree.app import redis_store
from formspree.forms.helpers import REDIS_COUNTER_KEY
from formspree.forms.models import Form
forms_app = create_app()
manager = Manager(forms_app)
# add flask-migrate commands
Migrate(forms_app, app.DB)
manager.add_command('db', MigrateCommand)
@manager.command
def run_debug(port=os.getenv('PORT', 5000)):
'''runs the app with debug flag set to true'''
forms_app.run(host='0.0.0.0', debug=True, port=int(port))
@manager.option('-H', '--host', dest='host', default=None, help='referer hostname')
@manager.option('-e', '--email', dest='email', default=None, help='form email')
def unsubscribe(email, host):
''' Unsubscribes an email by resetting the form to unconfirmed. User may get
one more confirmation email, but if she doesn't confirm that will be it.'''
form = None
if email and host:
form = Form.query.filter_by(email=email, host=host).first()
elif email and not host:
query = Form.query.filter_by(email=email)
if query.count() == 1:
form = query.first()
elif query.count() > 1:
for f in query.all():
print '-', f.host
print 'More than one result for this email, specify the host.'
elif host and not email:
query = Form.query.filter_by(host=host)
if query.count() == 1:
form = query.first()
elif query.count() > 1:
for f in query.all():
print '-', f.email
print 'More than one result for this host, specify the email.'
if form:
print 'unsubscribing the email %s from the form at %s' % (form.email, form.host)
if prompt_bool('are you sure?'):
form.confirmed = False
form.confirm_sent = False
app.DB.session.add(form)
app.DB.session.commit()
print 'success.'
@manager.option('-i', '--id', dest='id', default=None, help='form id')
@manager.option('-H', '--host', dest='host', default=None, help='referer hostname')
@manager.option('-e', '--email', dest='email', default=None, help='form email')
def monthly_counters(email=None, host=None, id=None, month=datetime.date.today().month):
if id:
query = [Form.query.get(id)]
elif email and host:
query = Form.query.filter_by(email=email, host=host)
elif email and not host:
query = Form.query.filter_by(email=email)
elif host and not email:
query = Form.query.filter_by(host=host)
else:
print 'supply each --email or --form or both (or --id).'
return 1
for form in query:
nsubmissions = redis_store.get(REDIS_COUNTER_KEY(form_id=form.id, month=month)) or 0
print '%s submissions for %s' % (nsubmissions, form)
@manager.option('-t', '--testname', dest='testname', default=None, help='name of test')
def test(testname=None):
import unittest
test_loader = unittest.defaultTestLoader
if testname:
test_suite = test_loader.loadTestsFromName(testname)
else:
test_suite = test_loader.discover('.')
test_runner = unittest.TextTestRunner()
test_runner.run(test_suite)
if __name__ == "__main__":
manager.run()
|
OVERLOADROBOTICA/OVERLOADROBOTICA.github.io
|
mail/formspree-master/manage.py
|
Python
|
mit
| 3,388
|
import unittest
import warnings
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import backend
def _as_noncontiguous_array(array):
# TODO(niboshi): cupy + cudnn test fails in F.fixed_batch_normalization.
# Fix it and use testing.array._as_noncontiguous_array.
def as_noncontiguous_array(arr):
if arr is None:
return None
if isinstance(arr, (numpy.ndarray, cuda.ndarray)):
xp = chainer.backend.get_array_module(arr)
return xp.asfortranarray(arr)
return testing.array._as_noncontiguous_array(arr)
if isinstance(array, (list, tuple)):
return type(array)([as_noncontiguous_array(arr) for arr in array])
return as_noncontiguous_array(array)
def _batch_normalization(
inputs, running_mean=None, running_var=None, decay=None):
x, gamma, beta, mean, var, eps, expander = inputs
mean_expanded = mean[expander]
std = numpy.sqrt(var + eps)[expander]
y_expect = (gamma[expander] * (x - mean_expanded) / std + beta[expander])
if running_mean is not None or running_var is not None:
m = x.size // gamma.size
adjust = m / max(m - 1., 1.) # unbiased estimation
if running_mean is not None:
running_mean *= decay
running_mean += (1 - decay) * mean
if running_var is not None:
running_var *= decay
running_var += (1 - decay) * adjust * var
return y_expect
@testing.parameterize(*(testing.product_dict(
testing.product({
'param_shape': [(3,), (3, 4), (3, 2, 3)],
'ndim': [0, 1, 2],
}) + [
{'input_shape': (5, 4, 3, 2), 'axis': (0, 2, 3)},
{'input_shape': (5, 4), 'axis': 0},
{'input_shape': (5, 4, 3), 'axis': (0, 1)},
],
testing.product({
'xdtype': [numpy.float32, numpy.float64],
'dtype': [numpy.float32, numpy.float64],
'eps': [2e-5, 5e-1],
'c_contiguous': [True, False],
'running_statistics': [True, False],
}),
) + testing.product({
'param_shape': [(3,)],
'ndim': [1],
'eps': [2e-5, 5e-1],
'xdtype': [numpy.float16, numpy.float32, numpy.float64],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'c_contiguous': [True, False],
'running_statistics': [True, False],
})))
@backend.inject_backend_tests(
['test_forward', 'test_backward', 'test_double_backward'],
# CPU tests
testing.product({
'use_cuda': [False],
'use_ideep': ['never', 'always'],
})
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cudnn_fast_batch_normalization': [True, False],
})
# ChainerX tests
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
])
class TestBatchNormalization(unittest.TestCase):
def setUp(self):
dtype = self.dtype
xdtype = self.xdtype
if not hasattr(self, 'axis'):
param_shape = self.param_shape
ndim = self.ndim
shape = (5,) + param_shape + (2,) * ndim
else:
aggr_axes = self.axis
if isinstance(self.axis, int):
aggr_axes = self.axis,
param_shape = tuple(
s
for i, s in enumerate(self.input_shape)
if i not in aggr_axes
)
shape = self.input_shape
# x, ggx, gy must share the same data type
# gamma, beta, gggamma, ggbeta must share the same data type
gamma = numpy.random.uniform(.5, 1, param_shape).astype(dtype)
beta = numpy.random.uniform(-1, 1, param_shape).astype(dtype)
x = numpy.random.uniform(-1, 1, shape).astype(xdtype)
gy = numpy.random.uniform(-1, 1, shape).astype(xdtype)
ggx = numpy.random.uniform(-1, 1, shape).astype(xdtype)
gggamma = numpy.random.uniform(-1, 1, param_shape).astype(dtype)
ggbeta = numpy.random.uniform(-1, 1, param_shape).astype(dtype)
if self.running_statistics:
self.running_mean = numpy.random.uniform(
-1, 1, param_shape).astype(dtype)
self.running_var = numpy.random.uniform(
-1, 1, param_shape).astype(dtype)
else:
self.running_mean = None
self.running_var = None
if not hasattr(self, 'axis'):
head_ndim = gamma.ndim + 1
aggr_axes = (0,) + tuple(six.moves.range(head_ndim, x.ndim))
self.expander = (None, Ellipsis) + (None,) * ndim
else:
self.expander = tuple(
None if i in aggr_axes else slice(None)
for i in range(x.ndim)
)
mean = x.mean(axis=aggr_axes)
var = x.var(axis=aggr_axes)
self.decay = 0.9
self.mean = mean
self.var = var
self.inputs = [x, gamma, beta]
self.grad_outputs = [gy]
self.grad_grad_inputs = [ggx, gggamma, ggbeta]
self.bn_options = {
'decay': self.decay,
'eps': self.eps,
}
if hasattr(self, 'axis'):
self.bn_options['axis'] = self.axis
self.check_forward_options = {'atol': 1e-4, 'rtol': 1e-3}
self.check_backward_options = {
'dtype': numpy.float64, 'atol': 1e-4, 'rtol': 1e-3}
self.check_double_backward_options = {
'dtype': numpy.float64, 'atol': 1e-3, 'rtol': 1e-2}
if self.xdtype == numpy.float16 or self.dtype == numpy.float16:
self.check_forward_options = {'atol': 1e-2, 'rtol': 1e-2}
self.check_backward_options = {
'dtype': numpy.float64, 'atol': 1e-2, 'rtol': 1e-2}
self.check_double_backward_options = {
'dtype': numpy.float64, 'atol': 1e-2, 'rtol': 1e-2}
def forward_cpu(self, inputs, running_mean, running_var):
y_expect = _batch_normalization(
inputs + [self.mean, self.var, self.eps, self.expander],
running_mean, running_var, self.decay)
return y_expect,
def check_forward(self, inputs, backend_config):
if self.running_statistics:
running_mean_expected = self.running_mean.copy()
running_var_expected = self.running_var.copy()
else:
running_mean_expected = None
running_var_expected = None
y_expected, = self.forward_cpu(
inputs, running_mean_expected, running_var_expected)
inputs = backend_config.get_array(inputs)
running_mean = backend_config.get_array(self.running_mean)
running_var = backend_config.get_array(self.running_var)
if not self.c_contiguous:
with backend_config:
inputs = _as_noncontiguous_array(inputs)
running_mean = _as_noncontiguous_array(running_mean)
running_var = _as_noncontiguous_array(running_var)
with backend_config:
y = functions.batch_normalization(
*inputs, running_mean=running_mean,
running_var=running_var, **self.bn_options)
assert y.data.dtype == self.xdtype
testing.assert_allclose(
y_expected, y.data, **self.check_forward_options)
if self.running_statistics:
testing.assert_allclose(
running_mean_expected, running_mean,
**self.check_forward_options)
testing.assert_allclose(
running_var_expected, running_var,
**self.check_forward_options)
def test_forward(self, backend_config):
self.check_forward(self.inputs, backend_config)
def check_backward(self, inputs, grad_outputs, backend_config):
inputs = backend_config.get_array(inputs)
grad_outputs = backend_config.get_array(grad_outputs)
if not self.c_contiguous:
with backend_config:
inputs = _as_noncontiguous_array(inputs)
grad_outputs = _as_noncontiguous_array(grad_outputs)
def f(*inputs):
y = functions.batch_normalization(
*inputs, **self.bn_options)
return y,
with backend_config:
gradient_check.check_backward(
f, inputs, grad_outputs,
**self.check_backward_options)
def test_backward(self, backend_config):
self.check_backward(self.inputs, self.grad_outputs, backend_config)
def check_double_backward(
self, inputs, grad_outputs, grad_grad_inputs, backend_config):
inputs = backend_config.get_array(inputs)
grad_outputs = backend_config.get_array(grad_outputs)
grad_grad_inputs = backend_config.get_array(grad_grad_inputs)
if not self.c_contiguous:
with backend_config:
inputs = _as_noncontiguous_array(inputs)
grad_outputs = _as_noncontiguous_array(grad_outputs)
grad_grad_inputs = _as_noncontiguous_array(grad_grad_inputs)
def f(*inputs):
return functions.batch_normalization(
*inputs, **self.bn_options)
with backend_config:
gradient_check.check_double_backward(
f, inputs, grad_outputs, grad_grad_inputs,
**self.check_double_backward_options)
def test_double_backward(self, backend_config):
self.check_double_backward(
self.inputs, self.grad_outputs, self.grad_grad_inputs,
backend_config)
@testing.parameterize(*(testing.product({
'param_shape': [(3,), (3, 4), (3, 2, 3)],
'ndim': [0, 1, 2],
'eps': [2e-5, 5e-1],
'dtype': [numpy.float32],
'c_contiguous': [True, False],
}) + testing.product({
'param_shape': [(3,)],
'ndim': [1],
'eps': [2e-5, 5e-1],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'c_contiguous': [True, False],
})))
@backend.inject_backend_tests(
None,
# CPU tests
[{'use_cuda': False}]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cudnn_fast_batch_normalization': [True, False],
})
# ChainerX tests
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
])
class TestFixedBatchNormalization(unittest.TestCase):
def setUp(self):
param_shape = self.param_shape
dtype = self.dtype
ndim = self.ndim
gamma = numpy.random.uniform(.5, 1, param_shape).astype(dtype)
beta = numpy.random.uniform(-1, 1, param_shape).astype(dtype)
shape = (5,) + param_shape + (2,) * ndim
x = numpy.random.uniform(-1, 1, shape).astype(dtype)
mean = numpy.random.uniform(-1, 1, param_shape).astype(dtype)
var = numpy.random.uniform(0.5, 1, param_shape).astype(dtype)
gy = numpy.random.uniform(-1, 1, shape).astype(dtype)
ggx = numpy.random.uniform(-1, 1, shape).astype(dtype)
gggamma = numpy.random.uniform(-1, 1, param_shape).astype(dtype)
ggbeta = numpy.random.uniform(-1, 1, param_shape).astype(dtype)
ggmean = numpy.random.uniform(-1, 1, param_shape).astype(dtype)
ggvar = numpy.random.uniform(-1, 1, param_shape).astype(dtype)
self.decay = 0.0
self.expander = (None, Ellipsis) + (None,) * ndim
self.inputs = [x, gamma, beta, mean, var]
self.grad_outputs = [gy]
self.grad_grad_inputs = [ggx, gggamma, ggbeta, ggmean, ggvar]
self.check_forward_options = {'atol': 1e-4, 'rtol': 1e-3}
self.check_backward_options = {'dtype': numpy.float64}
self.check_double_backward_options = {'dtype': numpy.float64}
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 1e-2, 'rtol': 1e-2}
self.check_backward_options = {
'dtype': numpy.float64, 'atol': 1e-2, 'rtol': 1e-2}
self.check_double_backward_options = {
'dtype': numpy.float64, 'atol': 1e-2, 'rtol': 1e-2}
def forward_cpu(self, inputs):
y_expect = _batch_normalization(inputs + [self.eps, self.expander])
return y_expect,
def check_forward(self, inputs, enable_backprop, backend_config):
y_expected, = self.forward_cpu(inputs)
inputs = backend_config.get_array(inputs)
if not self.c_contiguous:
with backend_config:
inputs = _as_noncontiguous_array(inputs)
with chainer.using_config('enable_backprop', enable_backprop):
with backend_config:
y = functions.fixed_batch_normalization(*inputs, eps=self.eps)
assert y.data.dtype == self.dtype
testing.assert_allclose(
y_expected, y.data, **self.check_forward_options)
def test_forward(self, backend_config):
self.check_forward(self.inputs, False, backend_config)
def test_forward_with_enable_backprop(self, backend_config):
self.check_forward(self.inputs, True, backend_config)
def check_backward(self, inputs, grad_outputs, backend_config):
inputs = backend_config.get_array(inputs)
grad_outputs = backend_config.get_array(grad_outputs)
if not self.c_contiguous:
with backend_config:
inputs = _as_noncontiguous_array(inputs)
grad_outputs = _as_noncontiguous_array(grad_outputs)
def f(*inputs):
y = functions.fixed_batch_normalization(*inputs, eps=self.eps)
return y,
with backend_config:
gradient_check.check_backward(
f, inputs, grad_outputs,
**self.check_backward_options)
def test_backward(self, backend_config):
self.check_backward(self.inputs, self.grad_outputs, backend_config)
def check_double_backward(
self, inputs, grad_outputs, grad_grad_inputs, backend_config):
inputs = backend_config.get_array(inputs)
grad_outputs = backend_config.get_array(grad_outputs)
grad_grad_inputs = backend_config.get_array(grad_grad_inputs)
if not self.c_contiguous:
with backend_config:
inputs = _as_noncontiguous_array(inputs)
grad_outputs = _as_noncontiguous_array(grad_outputs)
grad_grad_inputs = _as_noncontiguous_array(grad_grad_inputs)
def f(*inputs):
return functions.fixed_batch_normalization(*inputs, eps=self.eps)
with backend_config:
gradient_check.check_double_backward(
f, inputs, grad_outputs, grad_grad_inputs,
**self.check_double_backward_options)
def test_double_backward(self, backend_config):
self.check_double_backward(
self.inputs, self.grad_outputs, self.grad_grad_inputs,
backend_config)
@testing.parameterize(*testing.product({
'use_cudnn': ['always', 'auto', 'never'],
'eps': [2e-5, 5e-1],
# TODO(bkvogel): Check float16 support again in next cuDNN version.
'dtype': [numpy.float32, numpy.float64],
}))
@attr.cudnn
class TestBatchNormalizationCudnnCall(unittest.TestCase):
def setUp(self):
ndim = 0
param_shape = (3,)
self.gamma = cuda.cupy.random.uniform(.5, 1,
param_shape).astype(self.dtype)
self.beta = cuda.cupy.random.uniform(-1, 1,
param_shape).astype(self.dtype)
shape = (7,) + param_shape + (2,) * ndim
self.x = cuda.cupy.random.uniform(-1, 1, shape).astype(self.dtype)
self.gy = cuda.cupy.random.uniform(-1, 1, shape).astype(self.dtype)
self.args = [self.x, self.gamma, self.beta]
head_ndim = self.gamma.ndim + 1
self.aggr_axes = (0,) + tuple(six.moves.range(head_ndim, self.x.ndim))
self.mean = self.x.mean(axis=self.aggr_axes)
self.var = self.x.var(axis=self.aggr_axes) + self.eps
with chainer.using_config('use_cudnn', self.use_cudnn):
self.expect = chainer.should_use_cudnn('>=auto', 5000)
def forward(self):
return functions.batch_normalization(
*[chainer.Variable(i) for i in self.args], eps=self.eps,
running_mean=self.mean, running_var=self.var)
def test_call_cudnn_forward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
with testing.patch(
'cupy.cudnn.batch_normalization_forward_training_ex'
) as func:
self.forward()
self.assertEqual(func.called, self.expect)
def test_call_cudnn_backward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
y = self.forward()
y.grad = self.gy
with testing.patch(
'cupy.cudnn.batch_normalization_backward'
) as func:
y.backward()
self.assertEqual(func.called, self.expect)
@attr.cudnn
class TestBatchNormalizationCudnnEps(unittest.TestCase):
def setUp(self):
ndim = 0
param_shape = (3,)
dtype = numpy.float32
gamma = cuda.cupy.random.uniform(.5, 1, param_shape).astype(dtype)
beta = cuda.cupy.random.uniform(-1, 1, param_shape).astype(dtype)
shape = (7,) + param_shape + (2,) * ndim
x = cuda.cupy.random.uniform(-1, 1, shape).astype(dtype)
self.args = [x, gamma, beta]
def test_valid(self):
functions.batch_normalization(*self.args, eps=1e-5)
def test_invalid(self):
eps = -0.1
if chainer.backends.cuda.libcudnn.get_build_version() < 7500:
eps = 2e-6
with self.assertRaises(RuntimeError):
functions.batch_normalization(*self.args, eps=eps)
@attr.cudnn
class TestFixedBatchNormalizationCudnnEps(unittest.TestCase):
def setUp(self):
ndim = 0
param_shape = (3,)
dtype = numpy.float32
gamma = cuda.cupy.random.uniform(.5, 1, param_shape).astype(dtype)
beta = cuda.cupy.random.uniform(-1, 1, param_shape).astype(dtype)
mean = cuda.cupy.random.uniform(-1, 1, param_shape).astype(dtype)
var = cuda.cupy.random.uniform(-1, 1, param_shape).astype(dtype)
shape = (7,) + param_shape + (2,) * ndim
x = cuda.cupy.random.uniform(-1, 1, shape).astype(dtype)
self.args = [x, gamma, beta, mean, var]
def test_valid(self):
functions.fixed_batch_normalization(*self.args, eps=1e-5)
def test_invalid(self):
eps = -0.1
if chainer.backends.cuda.libcudnn.get_build_version() < 7500:
eps = 2e-6
with self.assertRaises(RuntimeError):
functions.fixed_batch_normalization(*self.args, eps=eps)
class TestBatchNormalizationWarning(unittest.TestCase):
def setUp(self):
pass
def create_batch(self, param_shape, x_shape):
dtype = numpy.float32
gamma = numpy.random.uniform(.5, 1, param_shape).astype(dtype)
beta = numpy.random.uniform(-1, 1, param_shape).astype(dtype)
x = numpy.random.uniform(-1, 1, x_shape).astype(dtype)
args = [x, gamma, beta]
return args
def test_invalid_batch(self):
args = self.create_batch((3,), (1, 3))
with testing.assert_warns(UserWarning):
functions.batch_normalization(*args)
def test_invalid_batch_no_batch_axis(self):
args = self.create_batch((1, 3,), (1, 3, 1))
with testing.assert_warns(UserWarning):
functions.batch_normalization(*args, axis=2)
def test_valid_batch(self):
args = self.create_batch((3,), (1, 3, 2, 2))
with warnings.catch_warnings(record=True) as w:
functions.batch_normalization(*args)
assert len(w) == 0
def test_valid_batch_no_batch_axis(self):
args = self.create_batch((1, 3,), (1, 3, 2))
with warnings.catch_warnings(record=True) as w:
functions.batch_normalization(*args, axis=2)
assert len(w) == 0
testing.run_module(__name__, __file__)
|
pfnet/chainer
|
tests/chainer_tests/functions_tests/normalization_tests/test_batch_normalization.py
|
Python
|
mit
| 20,347
|
# -*- coding: utf-8 -*-
import unittest
from wechatpy.replies import TextReply, create_reply
class CreateReplyTestCase(unittest.TestCase):
def test_create_reply_with_text_not_render(self):
text = "test"
reply = create_reply(text, render=False)
self.assertEqual("text", reply.type)
self.assertEqual(text, reply.content)
reply.render()
def test_create_reply_with_text_render(self):
text = "test"
reply = create_reply(text, render=True)
self.assertTrue(isinstance(reply, str))
def test_create_reply_with_message(self):
from wechatpy.messages import TextMessage
msg = TextMessage(
{
"FromUserName": "user1",
"ToUserName": "user2",
}
)
reply = create_reply("test", msg, render=False)
self.assertEqual("user1", reply.target)
self.assertEqual("user2", reply.source)
reply.render()
def test_create_reply_with_reply(self):
_reply = TextReply(content="test")
reply = create_reply(_reply, render=False)
self.assertEqual(_reply, reply)
reply.render()
def test_create_reply_with_articles(self):
articles = [
{
"title": "test 1",
"description": "test 1",
"image": "http://www.qq.com/1.png",
"url": "http://www.qq.com/1",
},
{
"title": "test 2",
"description": "test 2",
"image": "http://www.qq.com/2.png",
"url": "http://www.qq.com/2",
},
{
"title": "test 3",
"description": "test 3",
"image": "http://www.qq.com/3.png",
"url": "http://www.qq.com/3",
},
]
reply = create_reply(articles, render=False)
self.assertEqual("news", reply.type)
reply.render()
def test_create_reply_with_more_than_ten_articles(self):
articles = [
{
"title": "test 1",
"description": "test 1",
"image": "http://www.qq.com/1.png",
"url": "http://www.qq.com/1",
},
{
"title": "test 2",
"description": "test 2",
"image": "http://www.qq.com/2.png",
"url": "http://www.qq.com/2",
},
{
"title": "test 3",
"description": "test 3",
"image": "http://www.qq.com/3.png",
"url": "http://www.qq.com/3",
},
{
"title": "test 4",
"description": "test 4",
"image": "http://www.qq.com/4.png",
"url": "http://www.qq.com/4",
},
{
"title": "test 5",
"description": "test 5",
"image": "http://www.qq.com/5.png",
"url": "http://www.qq.com/5",
},
{
"title": "test 6",
"description": "test 6",
"image": "http://www.qq.com/6.png",
"url": "http://www.qq.com/6",
},
{
"title": "test 7",
"description": "test 7",
"image": "http://www.qq.com/7.png",
"url": "http://www.qq.com/7",
},
{
"title": "test 8",
"description": "test 8",
"image": "http://www.qq.com/8.png",
"url": "http://www.qq.com/8",
},
{
"title": "test 9",
"description": "test 9",
"image": "http://www.qq.com/9.png",
"url": "http://www.qq.com/9",
},
{
"title": "test 10",
"description": "test 10",
"image": "http://www.qq.com/10.png",
"url": "http://www.qq.com/10",
},
{
"title": "test 11",
"description": "test 11",
"image": "http://www.qq.com/11.png",
"url": "http://www.qq.com/11",
},
]
self.assertRaises(AttributeError, create_reply, articles)
def test_create_empty_reply(self):
from wechatpy.replies import EmptyReply
reply = create_reply("")
self.assertTrue(isinstance(reply, EmptyReply))
reply = create_reply(None)
self.assertTrue(isinstance(reply, EmptyReply))
reply = create_reply(False)
self.assertTrue(isinstance(reply, EmptyReply))
|
jxtech/wechatpy
|
tests/test_create_reply.py
|
Python
|
mit
| 4,741
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""contrib module containing volatile or experimental code."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Add projects here, they will show up under tf.contrib.
from tensorflow.contrib import bayesflow
from tensorflow.contrib import cloud
from tensorflow.contrib import cluster_resolver
from tensorflow.contrib import compiler
from tensorflow.contrib import copy_graph
from tensorflow.contrib import crf
from tensorflow.contrib import cudnn_rnn
from tensorflow.contrib import data
from tensorflow.contrib import deprecated
from tensorflow.contrib import distributions
from tensorflow.contrib import estimator
from tensorflow.contrib import factorization
from tensorflow.contrib import framework
from tensorflow.contrib import gan
from tensorflow.contrib import graph_editor
from tensorflow.contrib import grid_rnn
from tensorflow.contrib import image
from tensorflow.contrib import input_pipeline
from tensorflow.contrib import integrate
from tensorflow.contrib import keras
from tensorflow.contrib import kernel_methods
from tensorflow.contrib import kfac
from tensorflow.contrib import labeled_tensor
from tensorflow.contrib import layers
from tensorflow.contrib import learn
from tensorflow.contrib import legacy_seq2seq
from tensorflow.contrib import linalg
from tensorflow.contrib import linear_optimizer
from tensorflow.contrib import lookup
from tensorflow.contrib import losses
from tensorflow.contrib import memory_stats
from tensorflow.contrib import metrics
from tensorflow.contrib import model_pruning
from tensorflow.contrib import nccl
from tensorflow.contrib import nn
from tensorflow.contrib import opt
from tensorflow.contrib import predictor
from tensorflow.contrib import quantization
from tensorflow.contrib import quantize
from tensorflow.contrib import reduce_slice_ops
from tensorflow.contrib import resampler
from tensorflow.contrib import rnn
from tensorflow.contrib import saved_model
from tensorflow.contrib import seq2seq
from tensorflow.contrib import signal
from tensorflow.contrib import slim
from tensorflow.contrib import solvers
from tensorflow.contrib import sparsemax
from tensorflow.contrib import staging
from tensorflow.contrib import stat_summarizer
from tensorflow.contrib import stateless
from tensorflow.contrib import tensor_forest
from tensorflow.contrib import tensorboard
from tensorflow.contrib import testing
from tensorflow.contrib import tfprof
from tensorflow.contrib import timeseries
from tensorflow.contrib import tpu
from tensorflow.contrib import training
from tensorflow.contrib import util
from tensorflow.contrib.eager.python import tfe as eager
from tensorflow.contrib.ndlstm import python as ndlstm
from tensorflow.contrib.remote_fused_graph import pylib as remote_fused_graph
from tensorflow.contrib.specs import python as specs
from tensorflow.contrib.summary import summary
from tensorflow.python.util.lazy_loader import LazyLoader
ffmpeg = LazyLoader("ffmpeg",
globals(), "tensorflow.contrib.ffmpeg")
del LazyLoader
del absolute_import
del division
del print_function
|
dyoung418/tensorflow
|
tensorflow/contrib/__init__.py
|
Python
|
apache-2.0
| 3,815
|
#!/usr/bin/env python
########################################################################
# File : dirac-compile-externals
# Author : Adria Casajus
########################################################################
"""
Compile DIRAC externals (does not require DIRAC code)
"""
from __future__ import print_function
__RCSID__ = "$Id$"
import tempfile
import os
import getopt
import sys
import stat
import imp
import shutil
gitRepo = "https://github.com/DIRACGrid/Externals.git"
executablePerms = stat.S_IWUSR | stat.S_IRUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH
DIRACRoot = False
def downloadExternals( destPath, version = False ):
destPath = os.path.join( destPath, "Externals" )
if os.system("git clone %s %s" % (gitRepo, destPath)) != 0:
print("Cannot clone git repo")
return False
if version and os.system("cd %s; git checkout %s; git checkout -b comp-%s " % (destPath, version, version)) != 0:
print("Cannot find version %s" % version)
return False
return True
def copyFromDIRAC( filePath, destPath, isExecutable = False, filterLines = None ):
if filterLines is None:
filterLines = []
global DIRACRoot
if not DIRACRoot:
basePath = os.path.dirname( os.path.realpath( __file__ ) )
DIRACRoot = findDIRACRoot( basePath )
try:
with open( os.path.join( DIRACRoot, filePath ), "r" ) as fd:
data = fd.readlines()
except IOError as e:
print("Could not open %s: %s" % (filePath, e))
sys.exit( 1 )
destFilePath = os.path.join( destPath, os.path.basename( filePath ) )
try:
with open( destFilePath, "w" ) as fd:
for line in data:
found = False
for fstr in filterLines:
if line.find( fstr ) > -1:
found = True
break
if not found:
fd.write( line )
except IOError as e:
print("Could not write into %s: %s" % (destFilePath, e))
sys.exit( 1 )
if isExecutable:
os.chmod( destFilePath, executablePerms )
def findDIRACRoot( path ):
dirContents = os.listdir( path )
if 'DIRAC' in dirContents and os.path.isdir( os.path.join( path, 'DIRAC' ) ):
return path
parentPath = os.path.dirname( path )
if parentPath == path or len( parentPath ) == 1:
return False
return findDIRACRoot( os.path.dirname( path ) )
def resolvePackagesToBuild( compType, buildCFG, alreadyExplored = [] ):
explored = list( alreadyExplored )
packagesToBuild = []
if compType not in buildCFG.listSections():
return []
typeCFG = buildCFG[ compType ]
for tc in typeCFG.getOption( 'require', [] ):
if tc in explored:
continue
explored.append( tc )
newPackages = resolvePackagesToBuild( tc, buildCFG, explored )
for pkg in newPackages:
if pkg not in packagesToBuild:
packagesToBuild.append( pkg )
for pkg in typeCFG.getOption( 'buildOrder', [] ):
if pkg not in packagesToBuild:
packagesToBuild.append( pkg )
return packagesToBuild
def fixAbsoluteLinks( path ):
for entry in os.listdir( path ):
entryPath = os.path.join( path, entry )
if os.path.islink( entryPath ):
destPath = os.readlink( entryPath )
if os.path.isabs( destPath ):
absLinkDirSplit = [ d for d in os.path.abspath( path ).split( "/" ) if d.strip() ]
absDestDirSplit = [ d for d in destPath.split( "/" ) if d.strip() ]
common = -1
for i in range( min( len( absLinkDirSplit ), len( absDestDirSplit ) ) ):
if absLinkDirSplit[ i ] == absDestDirSplit[ i ]:
common = i
else:
break
absLinkDirSplit = absLinkDirSplit[ common + 1: ]
absDestDirSplit = absDestDirSplit[ common + 1: ]
finalDestination = [ ".." for d in absLinkDirSplit ]
finalDestination.extend( absDestDirSplit )
finalDestination = os.path.join( *finalDestination )
print("Relinking %s" % entryPath)
print(" %s -> %s" % (destPath, finalDestination))
os.unlink( entryPath )
os.symlink( finalDestination, entryPath )
elif os.path.isdir( entryPath ):
fixAbsoluteLinks( entryPath )
if __name__ == "__main__":
cmdOpts = ( ( 'D:', 'destination=', 'Destination where to build the externals' ),
( 't:', 'type=', 'Type of compilation (default: client)' ),
( 'e:', 'externalsPath=', 'Path to the externals sources' ),
( 'v:', 'version=', 'Version of the externals to compile (default will be the latest commit)' ),
( 'h', 'help', 'Show this help' ),
( 'i:', 'pythonVersion=', 'Python version to compile (default 2.7)' ),
( 'f', 'fixLinksOnly', 'Only fix absolute soft links' ),
( 'j:', 'makeJobs=', 'Number of make jobs, by default is 1' )
)
compExtVersion = False
compType = 'client'
compDest = False
compExtSource = False
onlyFixLinks = False
makeArgs = []
compVersionDict = { 'PYTHONVERSION' : '2.7' }
optList, args = getopt.getopt( sys.argv[1:],
"".join( [ opt[0] for opt in cmdOpts ] ),
[ opt[1] for opt in cmdOpts ] )
for o, v in optList:
if o in ( '-h', '--help' ):
print(__doc__.split('\n')[1])
print("\nUsage:\n\n %s [options]..." % sys.argv[0])
print("\nOptions:\n")
for cmdOpt in cmdOpts:
print(" -%s --%s : %s" % (cmdOpt[0].ljust(3), cmdOpt[1].ljust(15), cmdOpt[2]))
sys.exit( 1 )
elif o in ( '-t', '--type' ):
compType = v.lower()
elif o in ( '-e', '--externalsPath' ):
compExtSource = v
elif o in ( '-D', '--destination' ):
compDest = v
elif o in ( '-v', '--version' ):
compExtVersion = v
elif o in ( '-i', '--pythonversion' ):
compVersionDict[ 'PYTHONVERSION' ] = ".".join( [ c for c in v if c in "0123456789" ] )
elif o in ( '-f', '--fixLinksOnly' ):
onlyFixLinks = True
elif o in ( '-j', '--makeJobs' ):
try:
v = int( v )
except:
print("Value for makeJobs is not an integer (%s)" % v)
sys.exit( 1 )
if v < 1:
print("Value for makeJobs mas to be greater than 0 (%s)" % v)
sys.exit( 1 )
makeArgs.append( "-j %d" % int( v ) )
#Find platform
basePath = os.path.dirname( os.path.realpath( __file__ ) )
DIRACRoot = findDIRACRoot( basePath )
if DIRACRoot:
platformPath = os.path.join( DIRACRoot, "DIRAC", "Core", "Utilities", "Platform.py" )
with open( platformPath, "r" ) as platFD:
Platform = imp.load_module( "Platform", platFD, platformPath, ( "", "r", imp.PY_SOURCE ) )
platform = Platform.getPlatformString()
if not compDest:
if not DIRACRoot:
print("Error: Could not find DIRAC root")
sys.exit( 1 )
print("Using platform %s" % platform)
if not platform or platform == "ERROR":
print("Can not determine local platform", file=sys.stderr)
sys.exit( -1 )
compDest = os.path.join( DIRACRoot, platform )
if onlyFixLinks:
print("Fixing absolute links")
fixAbsoluteLinks( compDest )
sys.exit( 0 )
if compDest:
if os.path.isdir( compDest ):
oldCompDest = compDest + '.old'
print("Warning: %s already exists! Backing it up to %s" % (compDest, oldCompDest))
if os.path.exists( oldCompDest ):
shutil.rmtree( oldCompDest )
os.rename( compDest, oldCompDest )
if not compExtSource:
workDir = tempfile.mkdtemp( prefix = "ExtDIRAC" )
print("Creating temporary work dir at %s" % workDir)
downOK = False
if not downloadExternals( workDir, compExtVersion ):
print("Oops! Could not download Externals!")
sys.exit( 1 )
externalsDir = os.path.join( workDir, "Externals" )
else:
externalsDir = compExtSource
copyFromDIRAC( "DIRAC/Core/scripts/dirac-platform.py", externalsDir, True )
copyFromDIRAC( "DIRAC/Core/Utilities/CFG.py", externalsDir, False, [ '@gCFGSynchro' ] )
#Load CFG
cfgPath = os.path.join( externalsDir, "CFG.py" )
with open( cfgPath, "r" ) as cfgFD:
CFG = imp.load_module( "CFG", cfgFD, cfgPath, ( "", "r", imp.PY_SOURCE ) )
buildCFG = CFG.CFG().loadFromFile( os.path.join( externalsDir, "builds.cfg" ) )
if compType not in buildCFG.listSections():
print("Invalid compilation type %s" % compType)
print(" Valid ones are: %s" % ", ".join(buildCFG.listSections()))
sys.exit( 1 )
packagesToBuild = resolvePackagesToBuild( compType, buildCFG )
if compDest:
makeArgs.append( "-p '%s'" % os.path.realpath( compDest ) )
#Substitution of versions
finalPackages = []
for prog in packagesToBuild:
for k in compVersionDict:
finalPackages.append( prog.replace( "$%s$" % k, compVersionDict[k] ) )
print("Trying to get a raw environment")
patDet = os.path.join( DIRACRoot, platform )
for envVar in ( 'LD_LIBRARY_PATH', 'PATH' ):
if envVar not in os.environ:
continue
envValue = os.environ[ envVar ]
valList = [ val.strip() for val in envValue.split( ":" ) if envValue.strip() ]
fixedValList = []
for value in valList:
if value.find( patDet ) != 0:
fixedValList.append( value )
os.environ[ envVar ] = ":".join( fixedValList )
makeArgs = " ".join( makeArgs )
print("Building %s" % ", ".join(finalPackages))
for prog in finalPackages:
print("== BUILDING %s == " % prog)
progDir = os.path.join( externalsDir, prog )
makePath = os.path.join( progDir, "dirac-make.py" )
buildOutPath = os.path.join( progDir, "build.out" )
os.chmod( makePath, executablePerms )
instCmd = "'%s' %s" % ( makePath, makeArgs )
print(" - Executing %s" % instCmd)
ret = os.system( "%s > '%s' 2>&1" % ( instCmd, buildOutPath ) )
if ret:
print("Oops! Error while compiling %s" % prog)
print("Take a look at %s for more info" % buildOutPath)
sys.exit( 1 )
print("Fixing absolute links")
fixAbsoluteLinks( compDest )
|
petricm/DIRAC
|
Core/scripts/dirac-compile-externals.py
|
Python
|
gpl-3.0
| 9,938
|
from cad import CAD
from sympy import And, Or, Not, Min, Max, Add, Mul, Pow, Lt, Le, Gt, Ge, \
Eq, Ne, S, Integer, Rational, Symbol, Expr as SymPyExpr
from sympy.logic.boolalg import BooleanFunction, BooleanAtom, \
BooleanTrue, BooleanFalse
import operator
def is_min_max(s):
return isinstance(s, Min) or isinstance(s, Max)
def get_min_max(s, invert=False):
if isinstance(s, Min):
return (Min if not invert else Max)
if isinstance(s, Max):
return (Max if not invert else Min)
return None
def apply_and_simplify(s, e, op, invert_on_negative=False):
s_cons = get_min_max(s)
s_cons_inv = get_min_max(s, invert=True)
e_cons = get_min_max(e)
e_cons_inv = get_min_max(e, invert=True)
def without_invert_on_negative():
# Min/Max op Min/Max
if s_cons != None and e_cons != None:
args = []
# Min op Min or Max op Max
if s_cons == e_cons:
for sa in s.args:
for ea in e.args:
args.append(op(sa, ea))
# Min op Max
else:
for sa in s.args:
for ea in e.args:
args.append(op(sa, -ea))
return s_cons(*args)
# Min/Max op Sym
if s_cons != None and isinstance(e, Symbol):
return s_cons(*map(lambda a: op(a, e), s.args))
# Sym op Min/Max
if e_cons != None and isinstance(s, Symbol):
return e_cons(*map(lambda a: op(s, a), e.args))
# Min/Max op Int
if s_cons != None and isinstance(e, Integer):
return s_cons(*map(lambda a: op(a, e), s.args))
# Int op Min/Max
if e_cons != None and isinstance(s, Integer):
return e_cons(*map(lambda a: op(s, a), e.args))
return op(s, e)
def with_invert_on_negative():
# Min/Max op Min/Max
if s_cons != None and e_cons != None:
args = []
# Min op Min or Max op Max
if s_cons == e_cons:
for sa in s.args:
for ea in e.args:
args.append(op(sa, ea))
for sa in s.args:
for ea in e.args:
args.append(op(-sa, ea))
# Min op Max
else:
for sa in s.args:
for ea in e.args:
args.append(op(sa, -ea))
for sa in s.args:
for ea in e.args:
args.append(op(-sa, -ea))
return s_cons(*args)
# Min/Max op Sym
if s_cons != None and isinstance(e, Symbol):
args = map(lambda a: op(a, e), s.args) \
+ map(lambda a: op(a, -e), s.args)
return s_cons(*args)
# Sym op Min/Max
if e_cons != None and isinstance(s, Symbol):
args = map(lambda a: op(s, a), e.args) \
+ map(lambda a: op(-s, a), e.args)
return e_cons(*args)
# Min/Max op Int
if s_cons != None and isinstance(e, Integer):
cons = s_cons_inv if e < 0 else s_cons
return cons(*map(lambda a: op(a, e), s.args))
# Int op Min/Max
if e_cons != None and isinstance(s, Integer):
cons = e_cons_inv if s < 0 else e_cons
return cons(*map(lambda a: op(s, a), e.args))
return op(s, e)
if invert_on_negative:
return with_invert_on_negative()
return without_invert_on_negative()
class Expr(object):
initialized = False
# TODO: these caches should probably be fixed-sized LRU structures.
min_cache = {}
max_cache = {}
@staticmethod
def init():
# FIXME: Are (+-)oo correctly handled?
Expr.__add__ = \
lambda s, e: Expr(apply_and_simplify(s.expr, e.expr, operator.add))
Expr.__sub__ = \
lambda s, e: Expr(apply_and_simplify(s.expr, e.expr, operator.sub))
Expr.__mul__ = \
lambda s, e: Expr(apply_and_simplify(s.expr, e.expr, operator.mul, \
invert_on_negative=True))
Expr.__div__ = \
lambda s, e: Expr(apply_and_simplify(s.expr, e.expr, operator.div, \
invert_on_negative=True))
Expr.__pow__ = lambda s, e: Expr(s.expr ** e.expr)
Expr.__neg__ = lambda s: Expr(-s.expr)
Expr.__eq__ = lambda s, e: Expr(Eq(s.expr, e.expr))
Expr.__ne__ = lambda s, e: Expr(Ne(s.expr, e.expr))
Expr.__lt__ = lambda s, e: Expr(Lt(s.expr, e.expr))
Expr.__le__ = lambda s, e: Expr(Le(s.expr, e.expr))
Expr.__gt__ = lambda s, e: Expr(Gt(s.expr, e.expr))
Expr.__ge__ = lambda s, e: Expr(Ge(s.expr, e.expr))
Expr.__and__ = lambda s, e: Expr(And(s.expr, e.expr))
Expr.__or__ = lambda s, e: Expr(Or(s.expr, e.expr))
Expr.__invert__ = lambda s: Expr(Not(s.expr))
Expr.is_eq = lambda s, e: s.expr == e.expr
Expr.is_ne = lambda s, e: s.expr != e.expr
Expr.is_empty = lambda s: s.is_eq(Expr.empty)
Expr.is_inf = lambda s: s.expr == S.Infinity or s.expr == -S.Infinity
Expr.is_plus_inf = lambda s: s.expr == S.Infinity
Expr.is_minus_inf = lambda s: s.expr == -S.Infinity
Expr.is_constant = lambda s: isinstance(s.expr, Integer)
Expr.is_integer = lambda s: isinstance(s.expr, Integer)
Expr.is_rational = lambda s: isinstance(s.expr, Rational)
Expr.is_symbol = lambda s: isinstance(s.expr, Symbol)
Expr.is_min = lambda s: isinstance(s.expr, Min)
Expr.is_max = lambda s: isinstance(s.expr, Max)
Expr.is_add = lambda s: isinstance(s.expr, Add)
Expr.is_mul = lambda s: isinstance(s.expr, Mul)
Expr.is_pow = lambda s: isinstance(s.expr, Pow)
Expr.get_integer = lambda s: s.expr.p
Expr.get_numer = lambda s: s.expr.p
Expr.get_denom = lambda s: s.expr.q
Expr.get_name = lambda s: s.expr.name
Expr.compare = lambda s, e: s.compare(e)
# Empty. When min/max is invalid.
Expr.empty = Expr("EMPTY")
@staticmethod
def get_nan():
return Expr(S.NaN)
@staticmethod
def get_plus_inf():
return Expr(S.Infinity)
@staticmethod
def get_minus_inf():
return Expr(-S.Infinity)
@staticmethod
def get_true():
return Expr(True)
@staticmethod
def get_false():
return Expr(False)
def __init__(self, val):
if not Expr.initialized:
Expr.initialized = True
Expr.init()
if isinstance(val, int) or isinstance(val, long):
self.expr = Integer(val)
elif isinstance(val, basestring):
self.expr = Symbol(val)
elif isinstance(val, bool) or isinstance(val, BooleanAtom):
self.expr = S.One if val else S.Zero
else:
assert isinstance(val, SymPyExpr) or isinstance(val, BooleanFunction) \
or (val == S.Infinity) or (val == -S.Infinity)
self.expr = val
def __str__(self):
return self.expr.__str__()
def __repr__(self):
r = self.__str__()
return r
def subs(self, expr_from, expr_to):
return Expr(self.expr.subs([(expr_from.expr, expr_to.expr)]))
def args(self):
return map(Expr, self.expr.args)
def sympy(self):
return self.expr
@staticmethod
def reduce_conditional(expr):
if isinstance(expr, And):
# And(x, y)
x, y = expr.args
# And(Or(a, b), y)
if isinstance(x, Or):
a, b = x.args
# And(Or(a, b), Or(c, d))
if isinstance(y, Or):
c, d = y.args
return [[a, c], [a, d], [b, c], [b, d]]
# And(Or(a, b), y)
red_args = map(reduce_contional, expr.args)
red_args = sum(red_args, [])
return red_args
elif isinstance(expr.expr, Or):
red_args = map(reduce_contional, expr.args)
return red_args
return expr
@staticmethod
def minmax_args(expr, ty):
if isinstance(expr, ty):
return list(expr.args)
return [expr]
def reduce_min(self, args, assumptions):
m = Min(*args)
if not isinstance(m, Min):
# Expression was simplified.
return [m]
# Use the reduced argument list.
args = m.args
del_args = [False] * len(args)
for i in xrange(len(args)):
if del_args[i]: continue
for j in xrange(i + 1, len(args)):
if del_args[j]: continue
key = (args[i], args[j], assumptions)
if Expr.min_cache.has_key(key):
rest, resf, resi = Expr.min_cache[key]
else:
rest = CAD.implies(assumptions, args[i] <= args[j])
resf = CAD.implies(assumptions, args[i] >= args[j])
resi = CAD.implies(assumptions, args[i] > args[j])
Expr.min_cache[key] = (rest, resf, resi)
if not (CAD.is_unknown(rest) or CAD.is_unknown(resi)) \
and CAD.is_true(rest) and CAD.is_true(resi):
del_args[i] = True
del_args[j] = True
elif not (CAD.is_unknown(rest) or CAD.is_unknown(resf)) \
and (CAD.is_true(rest) or CAD.is_false(resf)):
del_args[j] = True
elif not (CAD.is_unknown(rest) or CAD.is_unknown(resf)) \
and (CAD.is_false(rest) or CAD.is_true(resf)):
del_args[i] = True
res_args = [args[i] for i in xrange(len(args)) if not del_args[i]]
return res_args
def reduce_max(self, args, assumptions):
m = Max(*args)
if not isinstance(m, Max):
# Expression was simplified.
return [m]
# Use the reduced argument list.
args = m.args
del_args = [False] * len(args)
for i in xrange(len(args)):
if del_args[i]: continue
for j in xrange(i + 1, len(args)):
if del_args[j]: continue
key = (args[i], args[j], assumptions)
if Expr.max_cache.has_key(key):
rest, resf, resi = Expr.max_cache[key]
else:
rest = CAD.implies(assumptions, args[i] >= args[j])
resf = CAD.implies(assumptions, args[i] <= args[j])
resi = CAD.implies(assumptions, args[i] < args[j])
Expr.max_cache[key] = (rest, resf, resi)
if not (CAD.is_unknown(rest) or CAD.is_unknown(resi)) \
and CAD.is_true(rest) and CAD.is_true(resi):
del_args[i] = True
del_args[j] = True
elif not (CAD.is_unknown(rest) or CAD.is_unknown(resf)) \
and (CAD.is_true(rest) or CAD.is_false(resf)):
del_args[j] = True
elif not (CAD.is_unknown(rest) or CAD.is_unknown(resf)) \
and (CAD.is_false(rest) or CAD.is_true(resf)):
del_args[i] = True
res_args = [args[i] for i in xrange(len(args)) if not del_args[i]]
return res_args
def min_or_max(self, other, op, assumptions):
try:
assert op == Min or op == Max
args = Expr.minmax_args(self.expr, op) + \
Expr.minmax_args(other.expr, op)
inv_op = Min if op == Max else Max
if isinstance(other.expr, inv_op):
if self.expr in other.expr.args:
return self
if isinstance(self.expr, inv_op) and other.expr in self.expr.args:
return other
if (self.expr == S.Infinity):
return other
elif (other.expr == S.Infinity):
return self
if (self.expr == -S.Infinity):
return self
elif (other.expr == -S.Infinity):
return other
# TODO: handle min/max parameters.
if any(map(lambda e: e.has(Min) or e.has(Max), args)):
return Expr(op(self.expr, other.expr))
if op == Min:
res_args = \
self.reduce_min(args, assumptions.expr if assumptions else False)
else:
res_args = \
self.reduce_max(args, assumptions.expr if assumptions else False)
if not res_args:
return Expr.empty
return Expr(op(*res_args))
except BaseException as b:
print "Exception triggered: min_or_max", self, other
print b
raise
def min(self, other, assumptions=None):
return self.min_or_max(other, Min, assumptions)
def max(self, other, assumptions=None):
return self.min_or_max(other, Max, assumptions)
def size(self):
return len(self.expr.args)
|
henry-nazare/llvm-sage
|
Runtime/llvmsage/expr.py
|
Python
|
bsd-3-clause
| 11,686
|
import tkinter as tk
from cv2 import *
import cv2
import matplotlib
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib.figure import Figure
matplotlib.use("TkAgg")
class Histogram:
def __init__(self, parent: tk.Frame):
self.image = None
self.hist_frame = parent
self.histCanvas = None
self.toolbar = None
self.fig = Figure(tight_layout=True)
self.fig_subplot = self.fig.add_subplot(111)
self.hist_pos_label = tk.Label(master=self.hist_frame)
self.hist_pos_label.pack(side=tk.TOP)
def _on_plot_hover(self, event):
if event.xdata is not None:
if event.xdata <= 256:
x = int(event.xdata)
y = int(self.calculate_hist()[int(event.xdata)])
self.hist_pos_label.config(text="{}:{}".format(x, y))
def __call__(self, image, *args, **kwargs):
self.image = image
self.fig_subplot.clear()
self.fig_subplot.bar(range(0, 256), self.calculate_hist(), width=1)
self.fig_subplot.set_xlim([-1, 256])
if self.histCanvas is None:
self.histCanvas = FigureCanvasTkAgg(self.fig, self.hist_frame)
self.histCanvas.show()
if self.toolbar is None:
self.toolbar = NavigationToolbar2TkAgg(self.histCanvas, self.hist_frame)
self.toolbar.update()
self.histCanvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True)
self.fig.canvas.mpl_connect('motion_notify_event', self._on_plot_hover)
def calculate_hist(self):
return cv2.calcHist([self.image], [0], None, [256], [0, 256])
|
hawwestin/MSR.APO
|
gui/histogram.py
|
Python
|
apache-2.0
| 1,681
|
"""Backward compatible behaviour with primary key 'Id' and upper-case field names"""
import datetime
from salesforce import models
from salesforce.models import SalesforceModel
class User(SalesforceModel):
Username = models.CharField(max_length=80)
Email = models.CharField(max_length=100)
class Lead(SalesforceModel):
Company = models.CharField(max_length=255)
LastName = models.CharField(max_length=80)
Owner = models.ForeignKey(User, on_delete=models.DO_NOTHING,
default=models.DEFAULTED_ON_CREATE, db_column='OwnerId')
# models for unit tests used without a connection only
class A(SalesforceModel):
email = models.EmailField(custom=True)
class Meta:
db_table = 'A__c'
class B(SalesforceModel):
class Meta:
db_table = 'B__c'
class AtoB(SalesforceModel):
a = models.ForeignKey(A, models.DO_NOTHING, custom=True)
b = models.ForeignKey(B, models.DO_NOTHING, custom=True)
class Meta:
db_table = 'AtoB__c'
class TryDefaults(SalesforceModel):
# this model doesn't exist in Salesforce, but it should be valid
# it is only for coverage of code by tests
example_str = models.CharField(max_length=50, default=models.DefaultedOnCreate('client'))
example_datetime = models.DateTimeField(default=models.DefaultedOnCreate(datetime.datetime(2021, 3, 31, 23, 59)))
# example_date = models.DateTimeField(default=models.DefaultedOnCreate(datetime.date(2021, 3, 31)))
example_time = models.DateTimeField(default=models.DefaultedOnCreate(datetime.time(23, 59)))
example_foreign_key = models.ForeignKey(User, on_delete=models.DO_NOTHING, default=models.DefaultedOnCreate())
# ,default=models.DefaultedOnCreate(User(pk='000000000000000')))
example_bool = models.BooleanField(default=models.DefaultedOnCreate(True))
example_bool_2 = models.BooleanField(default=models.DefaultedOnCreate(False))
|
django-salesforce/django-salesforce
|
tests/test_compatibility/models.py
|
Python
|
mit
| 1,930
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# client_vis.py
# Game client for 2015 ETD Winter retreat
# Client-side Visualization
# https://github.com/lmccalman/spacerace
#
# Created by Louis Tiao on 28/07/2015.
#
# For Mac OS X
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import logging
import pprint
import string
import random
import zmq
from client import Client
from argparse import ArgumentParser
DEFAULTS = {
'hostname': '10.0.0.144',
'state_port': 5556,
'control_port': 5557,
'lobby_port': 5558,
}
# Setup basic logging
logger = logging.getLogger(__name__)
logging.basicConfig(
level = logging.INFO,
datefmt = '%I:%M:%S %p',
format = '%(asctime)s [%(levelname)s]: %(message)s'
)
# Helper functions
make_random_name = lambda length: ''.join(random.choice(string.ascii_letters) \
for _ in range(length))
class MPLController:
def __init__(self, ship_name, team_name, fig, ax, client=None, *args, **kwargs):
if client is None:
client = Client(*args, **kwargs)
self.client = client
self.keys = set()
fig.canvas.mpl_connect('key_press_event', self.press)
fig.canvas.mpl_connect('key_release_event', self.release)
self.text = ax.text(0.25, 0.25, 'Hello!')
anim = animation.FuncAnimation(fig, self.anim, frames=self.state_gen(), \
init_func=self.init_anim, interval=25, blit=True, repeat=False)
response = self.client.lobby.register(ship_name, team_name)
self.game = response.game
self.ship = response.name
self.secret = response.secret
self.client.state.subscribe(self.game)
self.map =
#<<<<<<< Updated upstream
def update_control(self):
linear = int('up' in self.pressed)
#=======
def press(self, event):
self.keys.add(event.key)
linear = int('up' in self.keys)
#>>>>>>> Stashed changes
rotation = 0
rotation += int('left' in self.keys)
rotation -= int('right' in self.keys)
self.client.control.send(self.secret, linear, rotation)
def press(self, event):
self.pressed.add(event.key)
self.update_control()
def release(self, event):
#<<<<<<< Updated upstream
self.pressed.discard(event.key)
self.update_control()
#=======
self.keys.discard(event.key)
def init_anim(self):
self.text.set_text('')
return self.text,
def anim(self, state):
self.text.set_text(pprint.pformat(state))
return self.text,
def state_gen(self):
return self.client.state.state_gen()
#>>>>>>> Stashed changes
if __name__ == '__main__':
parser = ArgumentParser(
description='Spacerace: Manned Spacecraft'
)
parser.add_argument('--hostname', type=str, help='Server hostname', default=DEFAULTS['hostname'])
parser.add_argument('--state_port', type=int, help='State port', default=DEFAULTS['state_port'])
parser.add_argument('--control_port', type=int, help='Control port', default=DEFAULTS['control_port'])
parser.add_argument('--lobby_port', type=int, help='Lobby port', default=DEFAULTS['lobby_port'])
parser.add_argument('--version', action='version', version='%(prog)s 1.0')
parser.add_argument('--ship_name', '-n', type=str,
default=make_random_name(10), help='Ship Name')
parser.add_argument('--team_name', '-t', type=str,
default=make_random_name(10), help='Ship Name')
args = parser.parse_args()
logger.debug(args)
fig, ax = plt.subplots()
client = Client(args.hostname, args.lobby_port, args.control_port, args.state_port)
c = MPLController(args.ship_name, args.team_name, fig, ax, client)
plt.show()
|
rebeccad/space_race_etd_2015
|
clients/python/client_vis.py
|
Python
|
mit
| 3,804
|
from locust import HttpLocust, TaskSet, task
class UserBehavior(TaskSet):
def on_start(self):
""" on_start is called when a Locust start before any task is scheduled """
@task(1)
def login(self):
self.client.post("/login", {"email":"mablibsking@hotmail.com", "password":"CPEN321_ryangroup"})
@task(1)
def search(self):
self.client.get("/item_searched?item=samsung")
@task(2)
def search_2(self):
self.client.get("/item_searched?item=apple")
class WebsiteUser(HttpLocust):
task_set = UserBehavior
min_wait = 5000
max_wait = 9000
|
BenjaminLang/cpen_321
|
Server/test/stress_test.py
|
Python
|
mit
| 605
|
import re
class PyTestErrorAnalyzer(object):
def __call__(self, message):
failed_tests = []
sections = message.split("\r\n\r\n")
for i, section in enumerate(sections):
section = section.strip()
if re.search(r':([0-9]+):', section):
lines = section.split("\n")
failfilename = sections[i-1].split("\n")[-1].split(" ")[0]
filename, linenumber, _ = lines[1].strip().split(':')
classname, method = re.findall(r'_* *([^ ]+) *_*', lines[0])[0].split('.')
failed_tests.append({
'filename': failfilename,
'fail_filename': filename,
'fail_line': linenumber,
'class': classname,
'method': method
})
return failed_tests
@staticmethod
def check(data):
return data['command'].split(' ')[0] == 'py.test'
@staticmethod
def run_command(data):
return (
data['command'].split(' ') +
list(set([x['filename'] for x in data['failed_tests']])) +
['-k'] +
[' or '.join(
['%(class)s and %(method)s' % x for x in data['failed_tests']]
)]
)
|
fastmonkeys/cicli
|
cicli/analyzers.py
|
Python
|
mit
| 1,290
|
#!/usr/bin/env python3
xs = []
for i in range(256):
xs.append(str(i) + ' 0 0')
tam, zpatky = ' '.join(xs), ' '.join(xs[::-1])
with open('04-output.pgm', 'w') as f:
f.write('P3\n512 400\n255\n')
for i in range(400):
f.write(tam + ' ')
f.write(zpatky + ' ')
f.write('\n')
|
HoBi/BI-PYT
|
seminars/04/04-rainbow.py
|
Python
|
mit
| 309
|
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2014 TrilioData, Inc
# Copyright (c) 2015 EMC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The backups api."""
from oslo_log import log as logging
import webob
from webob import exc
from cinder.api import common
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api.views import backups as backup_views
from cinder.api import xmlutil
from cinder import backup as backupAPI
from cinder import exception
from cinder.i18n import _, _LI
from cinder import utils
LOG = logging.getLogger(__name__)
def make_backup(elem):
elem.set('id')
elem.set('status')
elem.set('size')
elem.set('container')
elem.set('parent_id')
elem.set('volume_id')
elem.set('object_count')
elem.set('availability_zone')
elem.set('created_at')
elem.set('name')
elem.set('description')
elem.set('fail_reason')
def make_backup_restore(elem):
elem.set('backup_id')
elem.set('volume_id')
elem.set('volume_name')
def make_backup_export_import_record(elem):
elem.set('backup_service')
elem.set('backup_url')
class BackupTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('backup', selector='backup')
make_backup(root)
alias = Backups.alias
namespace = Backups.namespace
return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace})
class BackupsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('backups')
elem = xmlutil.SubTemplateElement(root, 'backup', selector='backups')
make_backup(elem)
alias = Backups.alias
namespace = Backups.namespace
return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace})
class BackupRestoreTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('restore', selector='restore')
make_backup_restore(root)
alias = Backups.alias
namespace = Backups.namespace
return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace})
class BackupExportImportTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('backup-record',
selector='backup-record')
make_backup_export_import_record(root)
alias = Backups.alias
namespace = Backups.namespace
return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace})
class CreateDeserializer(wsgi.MetadataXMLDeserializer):
def default(self, string):
dom = utils.safe_minidom_parse_string(string)
backup = self._extract_backup(dom)
return {'body': {'backup': backup}}
def _extract_backup(self, node):
backup = {}
backup_node = self.find_first_child_named(node, 'backup')
attributes = ['container', 'display_name',
'display_description', 'volume_id',
'parent_id']
for attr in attributes:
if backup_node.getAttribute(attr):
backup[attr] = backup_node.getAttribute(attr)
return backup
class RestoreDeserializer(wsgi.MetadataXMLDeserializer):
def default(self, string):
dom = utils.safe_minidom_parse_string(string)
restore = self._extract_restore(dom)
return {'body': {'restore': restore}}
def _extract_restore(self, node):
restore = {}
restore_node = self.find_first_child_named(node, 'restore')
if restore_node.getAttribute('volume_id'):
restore['volume_id'] = restore_node.getAttribute('volume_id')
return restore
class BackupImportDeserializer(wsgi.MetadataXMLDeserializer):
def default(self, string):
dom = utils.safe_minidom_parse_string(string)
backup = self._extract_backup(dom)
retval = {'body': {'backup-record': backup}}
return retval
def _extract_backup(self, node):
backup = {}
backup_node = self.find_first_child_named(node, 'backup-record')
attributes = ['backup_service', 'backup_url']
for attr in attributes:
if backup_node.getAttribute(attr):
backup[attr] = backup_node.getAttribute(attr)
return backup
class BackupsController(wsgi.Controller):
"""The Backups API controller for the OpenStack API."""
_view_builder_class = backup_views.ViewBuilder
def __init__(self):
self.backup_api = backupAPI.API()
super(BackupsController, self).__init__()
@wsgi.serializers(xml=BackupTemplate)
def show(self, req, id):
"""Return data about the given backup."""
LOG.debug('show called for member %s', id)
context = req.environ['cinder.context']
try:
backup = self.backup_api.get(context, backup_id=id)
req.cache_db_backup(backup)
except exception.BackupNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
return self._view_builder.detail(req, backup)
def delete(self, req, id):
"""Delete a backup."""
LOG.debug('Delete called for member %s.', id)
context = req.environ['cinder.context']
LOG.info(_LI('Delete backup with id: %s'), id, context=context)
try:
backup = self.backup_api.get(context, id)
self.backup_api.delete(context, backup)
except exception.BackupNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
except exception.InvalidBackup as error:
raise exc.HTTPBadRequest(explanation=error.msg)
return webob.Response(status_int=202)
@wsgi.serializers(xml=BackupsTemplate)
def index(self, req):
"""Returns a summary list of backups."""
return self._get_backups(req, is_detail=False)
@wsgi.serializers(xml=BackupsTemplate)
def detail(self, req):
"""Returns a detailed list of backups."""
return self._get_backups(req, is_detail=True)
@staticmethod
def _get_backup_filter_options():
"""Return volume search options allowed by non-admin."""
return ('name', 'status', 'volume_id')
def _get_backups(self, req, is_detail):
"""Returns a list of backups, transformed through view builder."""
context = req.environ['cinder.context']
filters = req.params.copy()
marker, limit, offset = common.get_pagination_params(filters)
sort_keys, sort_dirs = common.get_sort_params(filters)
utils.remove_invalid_filter_options(context,
filters,
self._get_backup_filter_options())
if 'name' in filters:
filters['display_name'] = filters['name']
del filters['name']
backups = self.backup_api.get_all(context, search_opts=filters,
marker=marker,
limit=limit,
offset=offset,
sort_keys=sort_keys,
sort_dirs=sort_dirs,
)
req.cache_db_backups(backups.objects)
if is_detail:
backups = self._view_builder.detail_list(req, backups.objects)
else:
backups = self._view_builder.summary_list(req, backups.objects)
return backups
# TODO(frankm): Add some checks here including
# - whether requested volume_id exists so we can return some errors
# immediately
# - maybe also do validation of swift container name
@wsgi.response(202)
@wsgi.serializers(xml=BackupTemplate)
@wsgi.deserializers(xml=CreateDeserializer)
def create(self, req, body):
"""Create a new backup."""
LOG.debug('Creating new backup %s', body)
self.assert_valid_body(body, 'backup')
context = req.environ['cinder.context']
backup = body['backup']
try:
volume_id = backup['volume_id']
except KeyError:
msg = _("Incorrect request body format")
raise exc.HTTPBadRequest(explanation=msg)
container = backup.get('container', None)
self.validate_name_and_description(backup)
name = backup.get('name', None)
description = backup.get('description', None)
incremental = backup.get('incremental', False)
force = backup.get('force', False)
LOG.info(_LI("Creating backup of volume %(volume_id)s in container"
" %(container)s"),
{'volume_id': volume_id, 'container': container},
context=context)
try:
new_backup = self.backup_api.create(context, name, description,
volume_id, container,
incremental, None, force)
except exception.InvalidVolume as error:
raise exc.HTTPBadRequest(explanation=error.msg)
except exception.VolumeNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
except exception.ServiceNotFound as error:
raise exc.HTTPInternalServerError(explanation=error.msg)
retval = self._view_builder.summary(req, dict(new_backup))
return retval
@wsgi.response(202)
@wsgi.serializers(xml=BackupRestoreTemplate)
@wsgi.deserializers(xml=RestoreDeserializer)
def restore(self, req, id, body):
"""Restore an existing backup to a volume."""
LOG.debug('Restoring backup %(backup_id)s (%(body)s)',
{'backup_id': id, 'body': body})
self.assert_valid_body(body, 'restore')
context = req.environ['cinder.context']
restore = body['restore']
volume_id = restore.get('volume_id', None)
name = restore.get('name', None)
LOG.info(_LI("Restoring backup %(backup_id)s to volume %(volume_id)s"),
{'backup_id': id, 'volume_id': volume_id},
context=context)
try:
new_restore = self.backup_api.restore(context,
backup_id=id,
volume_id=volume_id,
name=name)
except exception.InvalidInput as error:
raise exc.HTTPBadRequest(explanation=error.msg)
except exception.InvalidVolume as error:
raise exc.HTTPBadRequest(explanation=error.msg)
except exception.InvalidBackup as error:
raise exc.HTTPBadRequest(explanation=error.msg)
except exception.BackupNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
except exception.VolumeNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
except exception.VolumeSizeExceedsAvailableQuota as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.msg, headers={'Retry-After': '0'})
except exception.VolumeLimitExceeded as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.msg, headers={'Retry-After': '0'})
retval = self._view_builder.restore_summary(
req, dict(new_restore))
return retval
@wsgi.response(200)
@wsgi.serializers(xml=BackupExportImportTemplate)
def export_record(self, req, id):
"""Export a backup."""
LOG.debug('export record called for member %s.', id)
context = req.environ['cinder.context']
try:
backup_info = self.backup_api.export_record(context, id)
except exception.BackupNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
except exception.InvalidBackup as error:
raise exc.HTTPBadRequest(explanation=error.msg)
retval = self._view_builder.export_summary(
req, dict(backup_info))
LOG.debug('export record output: %s.', retval)
return retval
@wsgi.response(201)
@wsgi.serializers(xml=BackupTemplate)
@wsgi.deserializers(xml=BackupImportDeserializer)
def import_record(self, req, body):
"""Import a backup."""
LOG.debug('Importing record from %s.', body)
self.assert_valid_body(body, 'backup-record')
context = req.environ['cinder.context']
import_data = body['backup-record']
# Verify that body elements are provided
try:
backup_service = import_data['backup_service']
backup_url = import_data['backup_url']
except KeyError:
msg = _("Incorrect request body format.")
raise exc.HTTPBadRequest(explanation=msg)
LOG.debug('Importing backup using %(service)s and url %(url)s.',
{'service': backup_service, 'url': backup_url})
try:
new_backup = self.backup_api.import_record(context,
backup_service,
backup_url)
except exception.BackupNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
except exception.InvalidBackup as error:
raise exc.HTTPBadRequest(explanation=error.msg)
except exception.ServiceNotFound as error:
raise exc.HTTPInternalServerError(explanation=error.msg)
retval = self._view_builder.summary(req, dict(new_backup))
LOG.debug('import record output: %s.', retval)
return retval
class Backups(extensions.ExtensionDescriptor):
"""Backups support."""
name = 'Backups'
alias = 'backups'
namespace = 'http://docs.openstack.org/volume/ext/backups/api/v1'
updated = '2012-12-12T00:00:00+00:00'
def get_resources(self):
resources = []
res = extensions.ResourceExtension(
Backups.alias, BackupsController(),
collection_actions={'detail': 'GET', 'import_record': 'POST'},
member_actions={'restore': 'POST', 'export_record': 'GET',
'action': 'POST'})
resources.append(res)
return resources
|
tobegit3hub/cinder_docker
|
cinder/api/contrib/backups.py
|
Python
|
apache-2.0
| 14,953
|
#!/usr/bin/env python
# Vendored code from pypng https://github.com/drj11/pypng
# png.py - PNG encoder/decoder in pure Python
#
# Copyright (C) 2006 Johann C. Rocholl <johann@browsershots.org>
# Portions Copyright (C) 2009 David Jones <drj@pobox.com>
# And probably portions Copyright (C) 2006 Nicko van Someren <nicko@nicko.org>
#
# Original concept by Johann C. Rocholl.
#
# LICENCE (MIT)
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
The ``png`` module can read and write PNG files.
Installation and Overview
-------------------------
``pip install pypng``
For help, type ``import png; help(png)`` in your python interpreter.
A good place to start is the :class:`Reader` and :class:`Writer` classes.
Coverage of PNG formats is fairly complete;
all allowable bit depths (1/2/4/8/16/24/32/48/64 bits per pixel) and
colour combinations are supported:
- greyscale (1/2/4/8/16 bit);
- RGB, RGBA, LA (greyscale with alpha) with 8/16 bits per channel;
- colour mapped images (1/2/4/8 bit).
Interlaced images,
which support a progressive display when downloading,
are supported for both reading and writing.
A number of optional chunks can be specified (when writing)
and understood (when reading): ``tRNS``, ``bKGD``, ``gAMA``.
The ``sBIT`` chunk can be used to specify precision for
non-native bit depths.
Requires Python 3.5 or higher.
Installation is trivial,
but see the ``README.txt`` file (with the source distribution) for details.
Full use of all features will need some reading of the PNG specification
http://www.w3.org/TR/2003/REC-PNG-20031110/.
The package also comes with command line utilities.
- ``pripamtopng`` converts
`Netpbm <http://netpbm.sourceforge.net/>`_ PAM/PNM files to PNG;
- ``pripngtopam`` converts PNG to file PAM/PNM.
There are a few more for simple PNG manipulations.
Spelling and Terminology
------------------------
Generally British English spelling is used in the documentation.
So that's "greyscale" and "colour".
This not only matches the author's native language,
it's also used by the PNG specification.
Colour Models
-------------
The major colour models supported by PNG (and hence by PyPNG) are:
- greyscale;
- greyscale--alpha;
- RGB;
- RGB--alpha.
Also referred to using the abbreviations: L, LA, RGB, RGBA.
Each letter codes a single channel:
*L* is for Luminance or Luma or Lightness (greyscale images);
*A* stands for Alpha, the opacity channel
(used for transparency effects, but higher values are more opaque,
so it makes sense to call it opacity);
*R*, *G*, *B* stand for Red, Green, Blue (colour image).
Lists, arrays, sequences, and so on
-----------------------------------
When getting pixel data out of this module (reading) and
presenting data to this module (writing) there are
a number of ways the data could be represented as a Python value.
The preferred format is a sequence of *rows*,
which each row being a sequence of *values*.
In this format, the values are in pixel order,
with all the values from all the pixels in a row
being concatenated into a single sequence for that row.
Consider an image that is 3 pixels wide by 2 pixels high, and each pixel
has RGB components:
Sequence of rows::
list([R,G,B, R,G,B, R,G,B],
[R,G,B, R,G,B, R,G,B])
Each row appears as its own list,
but the pixels are flattened so that three values for one pixel
simply follow the three values for the previous pixel.
This is the preferred because
it provides a good compromise between space and convenience.
PyPNG regards itself as at liberty to replace any sequence type with
any sufficiently compatible other sequence type;
in practice each row is an array (``bytearray`` or ``array.array``).
To allow streaming the outer list is sometimes
an iterator rather than an explicit list.
An alternative format is a single array holding all the values.
Array of values::
[R,G,B, R,G,B, R,G,B,
R,G,B, R,G,B, R,G,B]
The entire image is one single giant sequence of colour values.
Generally an array will be used (to save space), not a list.
The top row comes first,
and within each row the pixels are ordered from left-to-right.
Within a pixel the values appear in the order R-G-B-A
(or L-A for greyscale--alpha).
There is another format, which should only be used with caution.
It is mentioned because it is used internally,
is close to what lies inside a PNG file itself,
and has some support from the public API.
This format is called *packed*.
When packed, each row is a sequence of bytes (integers from 0 to 255),
just as it is before PNG scanline filtering is applied.
When the bit depth is 8 this is the same as a sequence of rows;
when the bit depth is less than 8 (1, 2 and 4),
several pixels are packed into each byte;
when the bit depth is 16 each pixel value is decomposed into 2 bytes
(and `packed` is a misnomer).
This format is used by the :meth:`Writer.write_packed` method.
It isn't usually a convenient format,
but may be just right if the source data for
the PNG image comes from something that uses a similar format
(for example, 1-bit BMPs, or another PNG file).
"""
__version__ = "0.0.20"
import collections
import io # For io.BytesIO
import itertools
import math
# http://www.python.org/doc/2.4.4/lib/module-operator.html
import operator
import re
import struct
import sys
# http://www.python.org/doc/2.4.4/lib/module-warnings.html
import warnings
import zlib
from array import array
__all__ = ["Image", "Reader", "Writer", "write_chunks", "from_array"]
# The PNG signature.
# http://www.w3.org/TR/PNG/#5PNG-file-signature
signature = struct.pack("8B", 137, 80, 78, 71, 13, 10, 26, 10)
# The xstart, ystart, xstep, ystep for the Adam7 interlace passes.
adam7 = (
(0, 0, 8, 8),
(4, 0, 8, 8),
(0, 4, 4, 8),
(2, 0, 4, 4),
(0, 2, 2, 4),
(1, 0, 2, 2),
(0, 1, 1, 2),
)
def adam7_generate(width, height):
"""
Generate the coordinates for the reduced scanlines
of an Adam7 interlaced image
of size `width` by `height` pixels.
Yields a generator for each pass,
and each pass generator yields a series of (x, y, xstep) triples,
each one identifying a reduced scanline consisting of
pixels starting at (x, y) and taking every xstep pixel to the right.
"""
for xstart, ystart, xstep, ystep in adam7:
if xstart >= width:
continue
yield ((xstart, y, xstep) for y in range(ystart, height, ystep))
# Models the 'pHYs' chunk (used by the Reader)
Resolution = collections.namedtuple("_Resolution", "x y unit_is_meter")
def group(s, n):
return list(zip(*[iter(s)] * n))
def isarray(x):
return isinstance(x, array)
def check_palette(palette):
"""
Check a palette argument (to the :class:`Writer` class) for validity.
Returns the palette as a list if okay;
raises an exception otherwise.
"""
# None is the default and is allowed.
if palette is None:
return None
p = list(palette)
if not (0 < len(p) <= 256):
raise ProtocolError(
"a palette must have between 1 and 256 entries,"
" see https://www.w3.org/TR/PNG/#11PLTE"
)
seen_triple = False
for i, t in enumerate(p):
if len(t) not in (3, 4):
raise ProtocolError("palette entry %d: entries must be 3- or 4-tuples." % i)
if len(t) == 3:
seen_triple = True
if seen_triple and len(t) == 4:
raise ProtocolError(
"palette entry %d: all 4-tuples must precede all 3-tuples" % i
)
for x in t:
if int(x) != x or not (0 <= x <= 255):
raise ProtocolError(
"palette entry %d: " "values must be integer: 0 <= x <= 255" % i
)
return p
def check_sizes(size, width, height):
"""
Check that these arguments, if supplied, are consistent.
Return a (width, height) pair.
"""
if not size:
return width, height
if len(size) != 2:
raise ProtocolError("size argument should be a pair (width, height)")
if width is not None and width != size[0]:
raise ProtocolError(
"size[0] (%r) and width (%r) should match when both are used."
% (size[0], width)
)
if height is not None and height != size[1]:
raise ProtocolError(
"size[1] (%r) and height (%r) should match when both are used."
% (size[1], height)
)
return size
def check_color(c, greyscale, which):
"""
Checks that a colour argument for transparent or background options
is the right form.
Returns the colour
(which, if it's a bare integer, is "corrected" to a 1-tuple).
"""
if c is None:
return c
if greyscale:
try:
len(c)
except TypeError:
c = (c,)
if len(c) != 1:
raise ProtocolError("%s for greyscale must be 1-tuple" % which)
if not is_natural(c[0]):
raise ProtocolError("%s colour for greyscale must be integer" % which)
else:
if not (
len(c) == 3 and is_natural(c[0]) and is_natural(c[1]) and is_natural(c[2])
):
raise ProtocolError("%s colour must be a triple of integers" % which)
return c
class Error(Exception):
def __str__(self):
return self.__class__.__name__ + ": " + " ".join(self.args)
class FormatError(Error):
"""
Problem with input file format.
In other words, PNG file does not conform to
the specification in some way and is invalid.
"""
class ProtocolError(Error):
"""
Problem with the way the programming interface has been used,
or the data presented to it.
"""
class ChunkError(FormatError):
pass
class Default:
"""The default for the greyscale paramter."""
class Writer:
"""
PNG encoder in pure Python.
"""
def __init__(
self,
width=None,
height=None,
size=None,
greyscale=Default,
alpha=False,
bitdepth=8,
palette=None,
transparent=None,
background=None,
gamma=None,
compression=None,
interlace=False,
planes=None,
colormap=None,
maxval=None,
chunk_limit=2 ** 20,
x_pixels_per_unit=None,
y_pixels_per_unit=None,
unit_is_meter=False,
):
"""
Create a PNG encoder object.
Arguments:
width, height
Image size in pixels, as two separate arguments.
size
Image size (w,h) in pixels, as single argument.
greyscale
Pixels are greyscale, not RGB.
alpha
Input data has alpha channel (RGBA or LA).
bitdepth
Bit depth: from 1 to 16 (for each channel).
palette
Create a palette for a colour mapped image (colour type 3).
transparent
Specify a transparent colour (create a ``tRNS`` chunk).
background
Specify a default background colour (create a ``bKGD`` chunk).
gamma
Specify a gamma value (create a ``gAMA`` chunk).
compression
zlib compression level: 0 (none) to 9 (more compressed);
default: -1 or None.
interlace
Create an interlaced image.
chunk_limit
Write multiple ``IDAT`` chunks to save memory.
x_pixels_per_unit
Number of pixels a unit along the x axis (write a
`pHYs` chunk).
y_pixels_per_unit
Number of pixels a unit along the y axis (write a
`pHYs` chunk). Along with `x_pixel_unit`, this gives
the pixel size ratio.
unit_is_meter
`True` to indicate that the unit (for the `pHYs`
chunk) is metre.
The image size (in pixels) can be specified either by using the
`width` and `height` arguments, or with the single `size`
argument.
If `size` is used it should be a pair (*width*, *height*).
The `greyscale` argument indicates whether input pixels
are greyscale (when true), or colour (when false).
The default is true unless `palette=` is used.
The `alpha` argument (a boolean) specifies
whether input pixels have an alpha channel (or not).
`bitdepth` specifies the bit depth of the source pixel values.
Each channel may have a different bit depth.
Each source pixel must have values that are
an integer between 0 and ``2**bitdepth-1``, where
`bitdepth` is the bit depth for the corresponding channel.
For example, 8-bit images have values between 0 and 255.
PNG only stores images with bit depths of
1,2,4,8, or 16 (the same for all channels).
When `bitdepth` is not one of these values or where
channels have different bit depths,
the next highest valid bit depth is selected,
and an ``sBIT`` (significant bits) chunk is generated
that specifies the original precision of the source image.
In this case the supplied pixel values will be rescaled to
fit the range of the selected bit depth.
The PNG file format supports many bit depth / colour model
combinations, but not all.
The details are somewhat arcane
(refer to the PNG specification for full details).
Briefly:
Bit depths < 8 (1,2,4) are only allowed with greyscale and
colour mapped images;
colour mapped images cannot have bit depth 16.
For colour mapped images
(in other words, when the `palette` argument is specified)
the `bitdepth` argument must match one of
the valid PNG bit depths: 1, 2, 4, or 8.
(It is valid to have a PNG image with a palette and
an ``sBIT`` chunk, but the meaning is slightly different;
it would be awkward to use the `bitdepth` argument for this.)
The `palette` option, when specified,
causes a colour mapped image to be created:
the PNG colour type is set to 3;
`greyscale` must not be true; `alpha` must not be true;
`transparent` must not be set.
The bit depth must be 1,2,4, or 8.
When a colour mapped image is created,
the pixel values are palette indexes and
the `bitdepth` argument specifies the size of these indexes
(not the size of the colour values in the palette).
The palette argument value should be a sequence of 3- or
4-tuples.
3-tuples specify RGB palette entries;
4-tuples specify RGBA palette entries.
All the 4-tuples (if present) must come before all the 3-tuples.
A ``PLTE`` chunk is created;
if there are 4-tuples then a ``tRNS`` chunk is created as well.
The ``PLTE`` chunk will contain all the RGB triples in the same
sequence;
the ``tRNS`` chunk will contain the alpha channel for
all the 4-tuples, in the same sequence.
Palette entries are always 8-bit.
If specified, the `transparent` and `background` parameters must be
a tuple with one element for each channel in the image.
Either a 3-tuple of integer (RGB) values for a colour image, or
a 1-tuple of a single integer for a greyscale image.
If specified, the `gamma` parameter must be a positive number
(generally, a `float`).
A ``gAMA`` chunk will be created.
Note that this will not change the values of the pixels as
they appear in the PNG file,
they are assumed to have already
been converted appropriately for the gamma specified.
The `compression` argument specifies the compression level to
be used by the ``zlib`` module.
Values from 1 to 9 (highest) specify compression.
0 means no compression.
-1 and ``None`` both mean that the ``zlib`` module uses
the default level of compession (which is generally acceptable).
If `interlace` is true then an interlaced image is created
(using PNG's so far only interace method, *Adam7*).
This does not affect how the pixels should be passed in,
rather it changes how they are arranged into the PNG file.
On slow connexions interlaced images can be
partially decoded by the browser to give
a rough view of the image that is
successively refined as more image data appears.
.. note ::
Enabling the `interlace` option requires the entire image
to be processed in working memory.
`chunk_limit` is used to limit the amount of memory used whilst
compressing the image.
In order to avoid using large amounts of memory,
multiple ``IDAT`` chunks may be created.
"""
# At the moment the `planes` argument is ignored;
# its purpose is to act as a dummy so that
# ``Writer(x, y, **info)`` works, where `info` is a dictionary
# returned by Reader.read and friends.
# Ditto for `colormap`.
width, height = check_sizes(size, width, height)
del size
if not is_natural(width) or not is_natural(height):
raise ProtocolError("width and height must be integers")
if width <= 0 or height <= 0:
raise ProtocolError("width and height must be greater than zero")
# http://www.w3.org/TR/PNG/#7Integers-and-byte-order
if width > 2 ** 31 - 1 or height > 2 ** 31 - 1:
raise ProtocolError("width and height cannot exceed 2**31-1")
if alpha and transparent is not None:
raise ProtocolError("transparent colour not allowed with alpha channel")
# bitdepth is either single integer, or tuple of integers.
# Convert to tuple.
try:
len(bitdepth)
except TypeError:
bitdepth = (bitdepth,)
for b in bitdepth:
valid = is_natural(b) and 1 <= b <= 16
if not valid:
raise ProtocolError(
"each bitdepth %r must be a positive integer <= 16" % (bitdepth,)
)
# Calculate channels, and
# expand bitdepth to be one element per channel.
palette = check_palette(palette)
alpha = bool(alpha)
colormap = bool(palette)
if greyscale is Default and palette:
greyscale = False
greyscale = bool(greyscale)
if colormap:
color_planes = 1
planes = 1
else:
color_planes = (3, 1)[greyscale]
planes = color_planes + alpha
if len(bitdepth) == 1:
bitdepth *= planes
bitdepth, self.rescale = check_bitdepth_rescale(
palette, bitdepth, transparent, alpha, greyscale
)
# These are assertions, because above logic should have
# corrected or raised all problematic cases.
if bitdepth < 8:
assert greyscale or palette
assert not alpha
if bitdepth > 8:
assert not palette
transparent = check_color(transparent, greyscale, "transparent")
background = check_color(background, greyscale, "background")
# It's important that the true boolean values
# (greyscale, alpha, colormap, interlace) are converted
# to bool because Iverson's convention is relied upon later on.
self.width = width
self.height = height
self.transparent = transparent
self.background = background
self.gamma = gamma
self.greyscale = greyscale
self.alpha = alpha
self.colormap = colormap
self.bitdepth = int(bitdepth)
self.compression = compression
self.chunk_limit = chunk_limit
self.interlace = bool(interlace)
self.palette = palette
self.x_pixels_per_unit = x_pixels_per_unit
self.y_pixels_per_unit = y_pixels_per_unit
self.unit_is_meter = bool(unit_is_meter)
self.color_type = 4 * self.alpha + 2 * (not greyscale) + 1 * self.colormap
assert self.color_type in (0, 2, 3, 4, 6)
self.color_planes = color_planes
self.planes = planes
# :todo: fix for bitdepth < 8
self.psize = (self.bitdepth / 8) * self.planes
def write(self, outfile, rows):
"""
Write a PNG image to the output file.
`rows` should be an iterable that yields each row
(each row is a sequence of values).
The rows should be the rows of the original image,
so there should be ``self.height`` rows of
``self.width * self.planes`` values.
If `interlace` is specified (when creating the instance),
then an interlaced PNG file will be written.
Supply the rows in the normal image order;
the interlacing is carried out internally.
.. note ::
Interlacing requires the entire image to be in working memory.
"""
# Values per row
vpr = self.width * self.planes
def check_rows(rows):
"""
Yield each row in rows,
but check each row first (for correct width).
"""
for i, row in enumerate(rows):
try:
wrong_length = len(row) != vpr
except TypeError:
# When using an itertools.ichain object or
# other generator not supporting __len__,
# we set this to False to skip the check.
wrong_length = False
if wrong_length:
# Note: row numbers start at 0.
raise ProtocolError(
"Expected %d values but got %d values, in row %d"
% (vpr, len(row), i)
)
yield row
if self.interlace:
fmt = "BH"[self.bitdepth > 8]
a = array(fmt, itertools.chain(*check_rows(rows)))
return self.write_array(outfile, a)
nrows = self.write_passes(outfile, check_rows(rows))
if nrows != self.height:
raise ProtocolError(
"rows supplied (%d) does not match height (%d)" % (nrows, self.height)
)
def write_passes(self, outfile, rows):
"""
Write a PNG image to the output file.
Most users are expected to find the :meth:`write` or
:meth:`write_array` method more convenient.
The rows should be given to this method in the order that
they appear in the output file.
For straightlaced images, this is the usual top to bottom ordering.
For interlaced images the rows should have been interlaced before
passing them to this function.
`rows` should be an iterable that yields each row
(each row being a sequence of values).
"""
# Ensure rows are scaled (to 4-/8-/16-bit),
# and packed into bytes.
if self.rescale:
rows = rescale_rows(rows, self.rescale)
if self.bitdepth < 8:
rows = pack_rows(rows, self.bitdepth)
elif self.bitdepth == 16:
rows = unpack_rows(rows)
return self.write_packed(outfile, rows)
def write_packed(self, outfile, rows):
"""
Write PNG file to `outfile`.
`rows` should be an iterator that yields each packed row;
a packed row being a sequence of packed bytes.
The rows have a filter byte prefixed and
are then compressed into one or more IDAT chunks.
They are not processed any further,
so if bitdepth is other than 1, 2, 4, 8, 16,
the pixel values should have been scaled
before passing them to this method.
This method does work for interlaced images but it is best avoided.
For interlaced images, the rows should be
presented in the order that they appear in the file.
"""
self.write_preamble(outfile)
# http://www.w3.org/TR/PNG/#11IDAT
if self.compression is not None:
compressor = zlib.compressobj(self.compression)
else:
compressor = zlib.compressobj()
# data accumulates bytes to be compressed for the IDAT chunk;
# it's compressed when sufficiently large.
data = bytearray()
for i, row in enumerate(rows):
# Add "None" filter type.
# Currently, it's essential that this filter type be used
# for every scanline as
# we do not mark the first row of a reduced pass image;
# that means we could accidentally compute
# the wrong filtered scanline if we used
# "up", "average", or "paeth" on such a line.
data.append(0)
data.extend(row)
if len(data) > self.chunk_limit:
compressed = compressor.compress(data)
if len(compressed):
write_chunk(outfile, b"IDAT", compressed)
data = bytearray()
compressed = compressor.compress(bytes(data))
flushed = compressor.flush()
if len(compressed) or len(flushed):
write_chunk(outfile, b"IDAT", compressed + flushed)
# http://www.w3.org/TR/PNG/#11IEND
write_chunk(outfile, b"IEND")
return i + 1
def write_preamble(self, outfile):
# http://www.w3.org/TR/PNG/#5PNG-file-signature
outfile.write(signature)
# http://www.w3.org/TR/PNG/#11IHDR
write_chunk(
outfile,
b"IHDR",
struct.pack(
"!2I5B",
self.width,
self.height,
self.bitdepth,
self.color_type,
0,
0,
self.interlace,
),
)
# See :chunk:order
# http://www.w3.org/TR/PNG/#11gAMA
if self.gamma is not None:
write_chunk(
outfile, b"gAMA", struct.pack("!L", int(round(self.gamma * 1e5)))
)
# See :chunk:order
# http://www.w3.org/TR/PNG/#11sBIT
if self.rescale:
write_chunk(
outfile,
b"sBIT",
struct.pack("%dB" % self.planes, *[s[0] for s in self.rescale]),
)
# :chunk:order: Without a palette (PLTE chunk),
# ordering is relatively relaxed.
# With one, gAMA chunk must precede PLTE chunk
# which must precede tRNS and bKGD.
# See http://www.w3.org/TR/PNG/#5ChunkOrdering
if self.palette:
p, t = make_palette_chunks(self.palette)
write_chunk(outfile, b"PLTE", p)
if t:
# tRNS chunk is optional;
# Only needed if palette entries have alpha.
write_chunk(outfile, b"tRNS", t)
# http://www.w3.org/TR/PNG/#11tRNS
if self.transparent is not None:
if self.greyscale:
fmt = "!1H"
else:
fmt = "!3H"
write_chunk(outfile, b"tRNS", struct.pack(fmt, *self.transparent))
# http://www.w3.org/TR/PNG/#11bKGD
if self.background is not None:
if self.greyscale:
fmt = "!1H"
else:
fmt = "!3H"
write_chunk(outfile, b"bKGD", struct.pack(fmt, *self.background))
# http://www.w3.org/TR/PNG/#11pHYs
if self.x_pixels_per_unit is not None and self.y_pixels_per_unit is not None:
tup = (
self.x_pixels_per_unit,
self.y_pixels_per_unit,
int(self.unit_is_meter),
)
write_chunk(outfile, b"pHYs", struct.pack("!LLB", *tup))
def write_array(self, outfile, pixels):
"""
Write an array that holds all the image values
as a PNG file on the output file.
See also :meth:`write` method.
"""
if self.interlace:
if type(pixels) != array:
# Coerce to array type
fmt = "BH"[self.bitdepth > 8]
pixels = array(fmt, pixels)
self.write_passes(outfile, self.array_scanlines_interlace(pixels))
else:
self.write_passes(outfile, self.array_scanlines(pixels))
def array_scanlines(self, pixels):
"""
Generates rows (each a sequence of values) from
a single array of values.
"""
# Values per row
vpr = self.width * self.planes
stop = 0
for y in range(self.height):
start = stop
stop = start + vpr
yield pixels[start:stop]
def array_scanlines_interlace(self, pixels):
"""
Generator for interlaced scanlines from an array.
`pixels` is the full source image as a single array of values.
The generator yields each scanline of the reduced passes in turn,
each scanline being a sequence of values.
"""
# http://www.w3.org/TR/PNG/#8InterlaceMethods
# Array type.
fmt = "BH"[self.bitdepth > 8]
# Value per row
vpr = self.width * self.planes
# Each iteration generates a scanline starting at (x, y)
# and consisting of every xstep pixels.
for lines in adam7_generate(self.width, self.height):
for x, y, xstep in lines:
# Pixels per row (of reduced image)
ppr = int(math.ceil((self.width - x) / float(xstep)))
# Values per row (of reduced image)
reduced_row_len = ppr * self.planes
if xstep == 1:
# Easy case: line is a simple slice.
offset = y * vpr
yield pixels[offset : offset + vpr]
continue
# We have to step by xstep,
# which we can do one plane at a time
# using the step in Python slices.
row = array(fmt)
# There's no easier way to set the length of an array
row.extend(pixels[0:reduced_row_len])
offset = y * vpr + x * self.planes
end_offset = (y + 1) * vpr
skip = self.planes * xstep
for i in range(self.planes):
row[i :: self.planes] = pixels[offset + i : end_offset : skip]
yield row
def write_chunk(outfile, tag, data=b""):
"""
Write a PNG chunk to the output file, including length and
checksum.
"""
data = bytes(data)
# http://www.w3.org/TR/PNG/#5Chunk-layout
outfile.write(struct.pack("!I", len(data)))
outfile.write(tag)
outfile.write(data)
checksum = zlib.crc32(tag)
checksum = zlib.crc32(data, checksum)
checksum &= 2 ** 32 - 1
outfile.write(struct.pack("!I", checksum))
def write_chunks(out, chunks):
"""Create a PNG file by writing out the chunks."""
out.write(signature)
for chunk in chunks:
write_chunk(out, *chunk)
def rescale_rows(rows, rescale):
"""
Take each row in rows (an iterator) and yield
a fresh row with the pixels scaled according to
the rescale parameters in the list `rescale`.
Each element of `rescale` is a tuple of
(source_bitdepth, target_bitdepth),
with one element per channel.
"""
# One factor for each channel
fs = [float(2 ** s[1] - 1) / float(2 ** s[0] - 1) for s in rescale]
# Assume all target_bitdepths are the same
target_bitdepths = set(s[1] for s in rescale)
assert len(target_bitdepths) == 1
(target_bitdepth,) = target_bitdepths
typecode = "BH"[target_bitdepth > 8]
# Number of channels
n_chans = len(rescale)
for row in rows:
rescaled_row = array(typecode, iter(row))
for i in range(n_chans):
channel = array(typecode, (int(round(fs[i] * x)) for x in row[i::n_chans]))
rescaled_row[i::n_chans] = channel
yield rescaled_row
def pack_rows(rows, bitdepth):
"""Yield packed rows that are a byte array.
Each byte is packed with the values from several pixels.
"""
assert bitdepth < 8
assert 8 % bitdepth == 0
# samples per byte
spb = int(8 / bitdepth)
def make_byte(block):
"""Take a block of (2, 4, or 8) values,
and pack them into a single byte.
"""
res = 0
for v in block:
res = (res << bitdepth) + v
return res
for row in rows:
a = bytearray(row)
# Adding padding bytes so we can group into a whole
# number of spb-tuples.
n = float(len(a))
extra = math.ceil(n / spb) * spb - n
a.extend([0] * int(extra))
# Pack into bytes.
# Each block is the samples for one byte.
blocks = group(a, spb)
yield bytearray(make_byte(block) for block in blocks)
def unpack_rows(rows):
"""Unpack each row from being 16-bits per value,
to being a sequence of bytes.
"""
for row in rows:
fmt = "!%dH" % len(row)
yield bytearray(struct.pack(fmt, *row))
def make_palette_chunks(palette):
"""
Create the byte sequences for a ``PLTE`` and
if necessary a ``tRNS`` chunk.
Returned as a pair (*p*, *t*).
*t* will be ``None`` if no ``tRNS`` chunk is necessary.
"""
p = bytearray()
t = bytearray()
for x in palette:
p.extend(x[0:3])
if len(x) > 3:
t.append(x[3])
if t:
return p, t
return p, None
def check_bitdepth_rescale(palette, bitdepth, transparent, alpha, greyscale):
"""
Returns (bitdepth, rescale) pair.
"""
if palette:
if len(bitdepth) != 1:
raise ProtocolError("with palette, only a single bitdepth may be used")
(bitdepth,) = bitdepth
if bitdepth not in (1, 2, 4, 8):
raise ProtocolError("with palette, bitdepth must be 1, 2, 4, or 8")
if transparent is not None:
raise ProtocolError("transparent and palette not compatible")
if alpha:
raise ProtocolError("alpha and palette not compatible")
if greyscale:
raise ProtocolError("greyscale and palette not compatible")
return bitdepth, None
# No palette, check for sBIT chunk generation.
if greyscale and not alpha:
# Single channel, L.
(bitdepth,) = bitdepth
if bitdepth in (1, 2, 4, 8, 16):
return bitdepth, None
if bitdepth > 8:
targetbitdepth = 16
elif bitdepth == 3:
targetbitdepth = 4
else:
assert bitdepth in (5, 6, 7)
targetbitdepth = 8
return targetbitdepth, [(bitdepth, targetbitdepth)]
assert alpha or not greyscale
depth_set = tuple(set(bitdepth))
if depth_set in [(8,), (16,)]:
# No sBIT required.
(bitdepth,) = depth_set
return bitdepth, None
targetbitdepth = (8, 16)[max(bitdepth) > 8]
return targetbitdepth, [(b, targetbitdepth) for b in bitdepth]
# Regex for decoding mode string
RegexModeDecode = re.compile("(LA?|RGBA?);?([0-9]*)", flags=re.IGNORECASE)
def from_array(a, mode=None, info={}):
"""
Create a PNG :class:`Image` object from a 2-dimensional array.
One application of this function is easy PIL-style saving:
``png.from_array(pixels, 'L').save('foo.png')``.
Unless they are specified using the *info* parameter,
the PNG's height and width are taken from the array size.
The first axis is the height; the second axis is the
ravelled width and channel index.
The array is treated is a sequence of rows,
each row being a sequence of values (``width*channels`` in number).
So an RGB image that is 16 pixels high and 8 wide will
occupy a 2-dimensional array that is 16x24
(each row will be 8*3 = 24 sample values).
*mode* is a string that specifies the image colour format in a
PIL-style mode. It can be:
``'L'``
greyscale (1 channel)
``'LA'``
greyscale with alpha (2 channel)
``'RGB'``
colour image (3 channel)
``'RGBA'``
colour image with alpha (4 channel)
The mode string can also specify the bit depth
(overriding how this function normally derives the bit depth,
see below).
Appending ``';16'`` to the mode will cause the PNG to be
16 bits per channel;
any decimal from 1 to 16 can be used to specify the bit depth.
When a 2-dimensional array is used *mode* determines how many
channels the image has, and so allows the width to be derived from
the second array dimension.
The array is expected to be a ``numpy`` array,
but it can be any suitable Python sequence.
For example, a list of lists can be used:
``png.from_array([[0, 255, 0], [255, 0, 255]], 'L')``.
The exact rules are: ``len(a)`` gives the first dimension, height;
``len(a[0])`` gives the second dimension.
It's slightly more complicated than that because
an iterator of rows can be used, and it all still works.
Using an iterator allows data to be streamed efficiently.
The bit depth of the PNG is normally taken from
the array element's datatype
(but if *mode* specifies a bitdepth then that is used instead).
The array element's datatype is determined in a way which
is supposed to work both for ``numpy`` arrays and for Python
``array.array`` objects.
A 1 byte datatype will give a bit depth of 8,
a 2 byte datatype will give a bit depth of 16.
If the datatype does not have an implicit size,
like the above example where it is a plain Python list of lists,
then a default of 8 is used.
The *info* parameter is a dictionary that can
be used to specify metadata (in the same style as
the arguments to the :class:`png.Writer` class).
For this function the keys that are useful are:
height
overrides the height derived from the array dimensions and
allows *a* to be an iterable.
width
overrides the width derived from the array dimensions.
bitdepth
overrides the bit depth derived from the element datatype
(but must match *mode* if that also specifies a bit depth).
Generally anything specified in the *info* dictionary will
override any implicit choices that this function would otherwise make,
but must match any explicit ones.
For example, if the *info* dictionary has a ``greyscale`` key then
this must be true when mode is ``'L'`` or ``'LA'`` and
false when mode is ``'RGB'`` or ``'RGBA'``.
"""
# We abuse the *info* parameter by modifying it. Take a copy here.
# (Also typechecks *info* to some extent).
info = dict(info)
# Syntax check mode string.
match = RegexModeDecode.match(mode)
if not match:
raise Error("mode string should be 'RGB' or 'L;16' or similar.")
mode, bitdepth = match.groups()
if bitdepth:
bitdepth = int(bitdepth)
# Colour format.
if "greyscale" in info:
if bool(info["greyscale"]) != ("L" in mode):
raise ProtocolError("info['greyscale'] should match mode.")
info["greyscale"] = "L" in mode
alpha = "A" in mode
if "alpha" in info:
if bool(info["alpha"]) != alpha:
raise ProtocolError("info['alpha'] should match mode.")
info["alpha"] = alpha
# Get bitdepth from *mode* if possible.
if bitdepth:
if info.get("bitdepth") and bitdepth != info["bitdepth"]:
raise ProtocolError(
"bitdepth (%d) should match bitdepth of info (%d)."
% (bitdepth, info["bitdepth"])
)
info["bitdepth"] = bitdepth
# Fill in and/or check entries in *info*.
# Dimensions.
width, height = check_sizes(info.get("size"), info.get("width"), info.get("height"))
if width:
info["width"] = width
if height:
info["height"] = height
if "height" not in info:
try:
info["height"] = len(a)
except TypeError:
raise ProtocolError("len(a) does not work, supply info['height'] instead.")
planes = len(mode)
if "planes" in info:
if info["planes"] != planes:
raise Error("info['planes'] should match mode.")
# In order to work out whether we the array is 2D or 3D we need its
# first row, which requires that we take a copy of its iterator.
# We may also need the first row to derive width and bitdepth.
a, t = itertools.tee(a)
row = next(t)
del t
testelement = row
if "width" not in info:
width = len(row) // planes
info["width"] = width
if "bitdepth" not in info:
try:
dtype = testelement.dtype
# goto the "else:" clause. Sorry.
except AttributeError:
try:
# Try a Python array.array.
bitdepth = 8 * testelement.itemsize
except AttributeError:
# We can't determine it from the array element's datatype,
# use a default of 8.
bitdepth = 8
else:
# If we got here without exception,
# we now assume that the array is a numpy array.
if dtype.kind == "b":
bitdepth = 1
else:
bitdepth = 8 * dtype.itemsize
info["bitdepth"] = bitdepth
for thing in ["width", "height", "bitdepth", "greyscale", "alpha"]:
assert thing in info
return Image(a, info)
# So that refugee's from PIL feel more at home. Not documented.
fromarray = from_array
class Image:
"""A PNG image. You can create an :class:`Image` object from
an array of pixels by calling :meth:`png.from_array`. It can be
saved to disk with the :meth:`save` method.
"""
def __init__(self, rows, info):
"""
.. note ::
The constructor is not public. Please do not call it.
"""
self.rows = rows
self.info = info
def save(self, file):
"""Save the image to the named *file*.
See `.write()` if you already have an open file object.
In general, you can only call this method once;
after it has been called the first time the PNG image is written,
the source data will have been streamed, and
cannot be streamed again.
"""
w = Writer(**self.info)
with open(file, "wb") as fd:
w.write(fd, self.rows)
def write(self, file):
"""Write the image to the open file object.
See `.save()` if you have a filename.
In general, you can only call this method once;
after it has been called the first time the PNG image is written,
the source data will have been streamed, and
cannot be streamed again.
"""
w = Writer(**self.info)
w.write(file, self.rows)
class Reader:
"""
Pure Python PNG decoder in pure Python.
"""
def __init__(self, _guess=None, filename=None, file=None, bytes=None):
"""
The constructor expects exactly one keyword argument.
If you supply a positional argument instead,
it will guess the input type.
Choose from the following keyword arguments:
filename
Name of input file (a PNG file).
file
A file-like object (object with a read() method).
bytes
``bytes`` or ``bytearray`` with PNG data.
"""
keywords_supplied = (
(_guess is not None)
+ (filename is not None)
+ (file is not None)
+ (bytes is not None)
)
if keywords_supplied != 1:
raise TypeError("Reader() takes exactly 1 argument")
# Will be the first 8 bytes, later on. See validate_signature.
self.signature = None
self.transparent = None
# A pair of (len,type) if a chunk has been read but its data and
# checksum have not (in other words the file position is just
# past the 4 bytes that specify the chunk type).
# See preamble method for how this is used.
self.atchunk = None
if _guess is not None:
if isarray(_guess):
bytes = _guess
elif isinstance(_guess, str):
filename = _guess
elif hasattr(_guess, "read"):
file = _guess
if bytes is not None:
self.file = io.BytesIO(bytes)
elif filename is not None:
self.file = open(filename, "rb")
elif file is not None:
self.file = file
else:
raise ProtocolError("expecting filename, file or bytes array")
def chunk(self, lenient=False):
"""
Read the next PNG chunk from the input file;
returns a (*type*, *data*) tuple.
*type* is the chunk's type as a byte string
(all PNG chunk types are 4 bytes long).
*data* is the chunk's data content, as a byte string.
If the optional `lenient` argument evaluates to `True`,
checksum failures will raise warnings rather than exceptions.
"""
self.validate_signature()
# http://www.w3.org/TR/PNG/#5Chunk-layout
if not self.atchunk:
self.atchunk = self._chunk_len_type()
if not self.atchunk:
raise ChunkError("No more chunks.")
length, type = self.atchunk
self.atchunk = None
data = self.file.read(length)
if len(data) != length:
raise ChunkError(
"Chunk %s too short for required %i octets." % (type, length)
)
checksum = self.file.read(4)
if len(checksum) != 4:
raise ChunkError("Chunk %s too short for checksum." % type)
verify = zlib.crc32(type)
verify = zlib.crc32(data, verify)
verify = struct.pack("!I", verify)
if checksum != verify:
(a,) = struct.unpack("!I", checksum)
(b,) = struct.unpack("!I", verify)
message = "Checksum error in %s chunk: 0x%08X != 0x%08X." % (
type.decode("ascii"),
a,
b,
)
if lenient:
warnings.warn(message, RuntimeWarning)
else:
raise ChunkError(message)
return type, data
def chunks(self):
"""Return an iterator that will yield each chunk as a
(*chunktype*, *content*) pair.
"""
while True:
t, v = self.chunk()
yield t, v
if t == b"IEND":
break
def undo_filter(self, filter_type, scanline, previous):
"""
Undo the filter for a scanline.
`scanline` is a sequence of bytes that
does not include the initial filter type byte.
`previous` is decoded previous scanline
(for straightlaced images this is the previous pixel row,
but for interlaced images, it is
the previous scanline in the reduced image,
which in general is not the previous pixel row in the final image).
When there is no previous scanline
(the first row of a straightlaced image,
or the first row in one of the passes in an interlaced image),
then this argument should be ``None``.
The scanline will have the effects of filtering removed;
the result will be returned as a fresh sequence of bytes.
"""
# :todo: Would it be better to update scanline in place?
result = scanline
if filter_type == 0:
return result
if filter_type not in (1, 2, 3, 4):
raise FormatError(
"Invalid PNG Filter Type. "
"See http://www.w3.org/TR/2003/REC-PNG-20031110/#9Filters ."
)
# Filter unit. The stride from one pixel to the corresponding
# byte from the previous pixel. Normally this is the pixel
# size in bytes, but when this is smaller than 1, the previous
# byte is used instead.
fu = max(1, self.psize)
# For the first line of a pass, synthesize a dummy previous
# line. An alternative approach would be to observe that on the
# first line 'up' is the same as 'null', 'paeth' is the same
# as 'sub', with only 'average' requiring any special case.
if not previous:
previous = bytearray([0] * len(scanline))
# Call appropriate filter algorithm. Note that 0 has already
# been dealt with.
fn = (
None,
undo_filter_sub,
undo_filter_up,
undo_filter_average,
undo_filter_paeth,
)[filter_type]
fn(fu, scanline, previous, result)
return result
def _deinterlace(self, raw):
"""
Read raw pixel data, undo filters, deinterlace, and flatten.
Return a single array of values.
"""
# Values per row (of the target image)
vpr = self.width * self.planes
# Values per image
vpi = vpr * self.height
# Interleaving writes to the output array randomly
# (well, not quite), so the entire output array must be in memory.
# Make a result array, and make it big enough.
if self.bitdepth > 8:
a = array("H", [0] * vpi)
else:
a = bytearray([0] * vpi)
source_offset = 0
for lines in adam7_generate(self.width, self.height):
# The previous (reconstructed) scanline.
# `None` at the beginning of a pass
# to indicate that there is no previous line.
recon = None
for x, y, xstep in lines:
# Pixels per row (reduced pass image)
ppr = int(math.ceil((self.width - x) / float(xstep)))
# Row size in bytes for this pass.
row_size = int(math.ceil(self.psize * ppr))
filter_type = raw[source_offset]
source_offset += 1
scanline = raw[source_offset : source_offset + row_size]
source_offset += row_size
recon = self.undo_filter(filter_type, scanline, recon)
# Convert so that there is one element per pixel value
flat = self._bytes_to_values(recon, width=ppr)
if xstep == 1:
assert x == 0
offset = y * vpr
a[offset : offset + vpr] = flat
else:
offset = y * vpr + x * self.planes
end_offset = (y + 1) * vpr
skip = self.planes * xstep
for i in range(self.planes):
a[offset + i : end_offset : skip] = flat[i :: self.planes]
return a
def _iter_bytes_to_values(self, byte_rows):
"""
Iterator that yields each scanline;
each scanline being a sequence of values.
`byte_rows` should be an iterator that yields
the bytes of each row in turn.
"""
for row in byte_rows:
yield self._bytes_to_values(row)
def _bytes_to_values(self, bs, width=None):
"""Convert a packed row of bytes into a row of values.
Result will be a freshly allocated object,
not shared with the argument.
"""
if self.bitdepth == 8:
return bytearray(bs)
if self.bitdepth == 16:
return array("H", struct.unpack("!%dH" % (len(bs) // 2), bs))
assert self.bitdepth < 8
if width is None:
width = self.width
# Samples per byte
spb = 8 // self.bitdepth
out = bytearray()
mask = 2 ** self.bitdepth - 1
shifts = [self.bitdepth * i for i in reversed(list(range(spb)))]
for o in bs:
out.extend([mask & (o >> i) for i in shifts])
return out[:width]
def _iter_straight_packed(self, byte_blocks):
"""Iterator that undoes the effect of filtering;
yields each row as a sequence of packed bytes.
Assumes input is straightlaced.
`byte_blocks` should be an iterable that yields the raw bytes
in blocks of arbitrary size.
"""
# length of row, in bytes
rb = self.row_bytes
a = bytearray()
# The previous (reconstructed) scanline.
# None indicates first line of image.
recon = None
for some_bytes in byte_blocks:
a.extend(some_bytes)
while len(a) >= rb + 1:
filter_type = a[0]
scanline = a[1 : rb + 1]
del a[: rb + 1]
recon = self.undo_filter(filter_type, scanline, recon)
yield recon
if len(a) != 0:
# :file:format We get here with a file format error:
# when the available bytes (after decompressing) do not
# pack into exact rows.
raise FormatError("Wrong size for decompressed IDAT chunk.")
assert len(a) == 0
def validate_signature(self):
"""
If signature (header) has not been read then read and
validate it; otherwise do nothing.
"""
if self.signature:
return
self.signature = self.file.read(8)
if self.signature != signature:
raise FormatError("PNG file has invalid signature.")
def preamble(self, lenient=False):
"""
Extract the image metadata by reading
the initial part of the PNG file up to
the start of the ``IDAT`` chunk.
All the chunks that precede the ``IDAT`` chunk are
read and either processed for metadata or discarded.
If the optional `lenient` argument evaluates to `True`,
checksum failures will raise warnings rather than exceptions.
"""
self.validate_signature()
while True:
if not self.atchunk:
self.atchunk = self._chunk_len_type()
if self.atchunk is None:
raise FormatError("This PNG file has no IDAT chunks.")
if self.atchunk[1] == b"IDAT":
return
self.process_chunk(lenient=lenient)
def _chunk_len_type(self):
"""
Reads just enough of the input to
determine the next chunk's length and type;
return a (*length*, *type*) pair where *type* is a byte sequence.
If there are no more chunks, ``None`` is returned.
"""
x = self.file.read(8)
if not x:
return None
if len(x) != 8:
raise FormatError("End of file whilst reading chunk length and type.")
length, type = struct.unpack("!I4s", x)
if length > 2 ** 31 - 1:
raise FormatError("Chunk %s is too large: %d." % (type, length))
# Check that all bytes are in valid ASCII range.
# https://www.w3.org/TR/2003/REC-PNG-20031110/#5Chunk-layout
type_bytes = set(bytearray(type))
if not (type_bytes <= set(range(65, 91)) | set(range(97, 123))):
raise FormatError("Chunk %r has invalid Chunk Type." % list(type))
return length, type
def process_chunk(self, lenient=False):
"""
Process the next chunk and its data.
This only processes the following chunk types:
``IHDR``, ``PLTE``, ``bKGD``, ``tRNS``, ``gAMA``, ``sBIT``, ``pHYs``.
All other chunk types are ignored.
If the optional `lenient` argument evaluates to `True`,
checksum failures will raise warnings rather than exceptions.
"""
type, data = self.chunk(lenient=lenient)
method = "_process_" + type.decode("ascii")
m = getattr(self, method, None)
if m:
m(data)
def _process_IHDR(self, data):
# http://www.w3.org/TR/PNG/#11IHDR
if len(data) != 13:
raise FormatError("IHDR chunk has incorrect length.")
(
self.width,
self.height,
self.bitdepth,
self.color_type,
self.compression,
self.filter,
self.interlace,
) = struct.unpack("!2I5B", data)
check_bitdepth_colortype(self.bitdepth, self.color_type)
if self.compression != 0:
raise FormatError("Unknown compression method %d" % self.compression)
if self.filter != 0:
raise FormatError(
"Unknown filter method %d,"
" see http://www.w3.org/TR/2003/REC-PNG-20031110/#9Filters ."
% self.filter
)
if self.interlace not in (0, 1):
raise FormatError(
"Unknown interlace method %d, see "
"http://www.w3.org/TR/2003/REC-PNG-20031110/#8InterlaceMethods"
" ." % self.interlace
)
# Derived values
# http://www.w3.org/TR/PNG/#6Colour-values
colormap = bool(self.color_type & 1)
greyscale = not (self.color_type & 2)
alpha = bool(self.color_type & 4)
color_planes = (3, 1)[greyscale or colormap]
planes = color_planes + alpha
self.colormap = colormap
self.greyscale = greyscale
self.alpha = alpha
self.color_planes = color_planes
self.planes = planes
self.psize = float(self.bitdepth) / float(8) * planes
if int(self.psize) == self.psize:
self.psize = int(self.psize)
self.row_bytes = int(math.ceil(self.width * self.psize))
# Stores PLTE chunk if present, and is used to check
# chunk ordering constraints.
self.plte = None
# Stores tRNS chunk if present, and is used to check chunk
# ordering constraints.
self.trns = None
# Stores sBIT chunk if present.
self.sbit = None
def _process_PLTE(self, data):
# http://www.w3.org/TR/PNG/#11PLTE
if self.plte:
warnings.warn("Multiple PLTE chunks present.")
self.plte = data
if len(data) % 3 != 0:
raise FormatError("PLTE chunk's length should be a multiple of 3.")
if len(data) > (2 ** self.bitdepth) * 3:
raise FormatError("PLTE chunk is too long.")
if len(data) == 0:
raise FormatError("Empty PLTE is not allowed.")
def _process_bKGD(self, data):
try:
if self.colormap:
if not self.plte:
warnings.warn("PLTE chunk is required before bKGD chunk.")
self.background = struct.unpack("B", data)
else:
self.background = struct.unpack("!%dH" % self.color_planes, data)
except struct.error:
raise FormatError("bKGD chunk has incorrect length.")
def _process_tRNS(self, data):
# http://www.w3.org/TR/PNG/#11tRNS
self.trns = data
if self.colormap:
if not self.plte:
warnings.warn("PLTE chunk is required before tRNS chunk.")
else:
if len(data) > len(self.plte) / 3:
# Was warning, but promoted to Error as it
# would otherwise cause pain later on.
raise FormatError("tRNS chunk is too long.")
else:
if self.alpha:
raise FormatError(
"tRNS chunk is not valid with colour type %d." % self.color_type
)
try:
self.transparent = struct.unpack("!%dH" % self.color_planes, data)
except struct.error:
raise FormatError("tRNS chunk has incorrect length.")
def _process_gAMA(self, data):
try:
self.gamma = struct.unpack("!L", data)[0] / 100000.0
except struct.error:
raise FormatError("gAMA chunk has incorrect length.")
def _process_sBIT(self, data):
self.sbit = data
if (
self.colormap
and len(data) != 3
or not self.colormap
and len(data) != self.planes
):
raise FormatError("sBIT chunk has incorrect length.")
def _process_pHYs(self, data):
# http://www.w3.org/TR/PNG/#11pHYs
self.phys = data
fmt = "!LLB"
if len(data) != struct.calcsize(fmt):
raise FormatError("pHYs chunk has incorrect length.")
self.x_pixels_per_unit, self.y_pixels_per_unit, unit = struct.unpack(fmt, data)
self.unit_is_meter = bool(unit)
def read(self, lenient=False):
"""
Read the PNG file and decode it.
Returns (`width`, `height`, `rows`, `info`).
May use excessive memory.
`rows` is a sequence of rows;
each row is a sequence of values.
If the optional `lenient` argument evaluates to True,
checksum failures will raise warnings rather than exceptions.
"""
def iteridat():
"""Iterator that yields all the ``IDAT`` chunks as strings."""
while True:
type, data = self.chunk(lenient=lenient)
if type == b"IEND":
# http://www.w3.org/TR/PNG/#11IEND
break
if type != b"IDAT":
continue
# type == b'IDAT'
# http://www.w3.org/TR/PNG/#11IDAT
if self.colormap and not self.plte:
warnings.warn("PLTE chunk is required before IDAT chunk")
yield data
self.preamble(lenient=lenient)
raw = decompress(iteridat())
if self.interlace:
def rows_from_interlace():
"""Yield each row from an interlaced PNG."""
# It's important that this iterator doesn't read
# IDAT chunks until it yields the first row.
bs = bytearray(itertools.chain(*raw))
arraycode = "BH"[self.bitdepth > 8]
# Like :meth:`group` but
# producing an array.array object for each row.
values = self._deinterlace(bs)
vpr = self.width * self.planes
for i in range(0, len(values), vpr):
row = array(arraycode, values[i : i + vpr])
yield row
rows = rows_from_interlace()
else:
rows = self._iter_bytes_to_values(self._iter_straight_packed(raw))
info = dict()
for attr in "greyscale alpha planes bitdepth interlace".split():
info[attr] = getattr(self, attr)
info["size"] = (self.width, self.height)
for attr in "gamma transparent background".split():
a = getattr(self, attr, None)
if a is not None:
info[attr] = a
if getattr(self, "x_pixels_per_unit", None):
info["physical"] = Resolution(
self.x_pixels_per_unit, self.y_pixels_per_unit, self.unit_is_meter
)
if self.plte:
info["palette"] = self.palette()
return self.width, self.height, rows, info
def read_flat(self):
"""
Read a PNG file and decode it into a single array of values.
Returns (*width*, *height*, *values*, *info*).
May use excessive memory.
`values` is a single array.
The :meth:`read` method is more stream-friendly than this,
because it returns a sequence of rows.
"""
x, y, pixel, info = self.read()
arraycode = "BH"[info["bitdepth"] > 8]
pixel = array(arraycode, itertools.chain(*pixel))
return x, y, pixel, info
def palette(self, alpha="natural"):
"""
Returns a palette that is a sequence of 3-tuples or 4-tuples,
synthesizing it from the ``PLTE`` and ``tRNS`` chunks.
These chunks should have already been processed (for example,
by calling the :meth:`preamble` method).
All the tuples are the same size:
3-tuples if there is no ``tRNS`` chunk,
4-tuples when there is a ``tRNS`` chunk.
Assumes that the image is colour type
3 and therefore a ``PLTE`` chunk is required.
If the `alpha` argument is ``'force'`` then an alpha channel is
always added, forcing the result to be a sequence of 4-tuples.
"""
if not self.plte:
raise FormatError("Required PLTE chunk is missing in colour type 3 image.")
plte = group(array("B", self.plte), 3)
if self.trns or alpha == "force":
trns = array("B", self.trns or [])
trns.extend([255] * (len(plte) - len(trns)))
plte = list(map(operator.add, plte, group(trns, 1)))
return plte
def asDirect(self):
"""
Returns the image data as a direct representation of
an ``x * y * planes`` array.
This removes the need for callers to deal with
palettes and transparency themselves.
Images with a palette (colour type 3) are converted to RGB or RGBA;
images with transparency (a ``tRNS`` chunk) are converted to
LA or RGBA as appropriate.
When returned in this format the pixel values represent
the colour value directly without needing to refer
to palettes or transparency information.
Like the :meth:`read` method this method returns a 4-tuple:
(*width*, *height*, *rows*, *info*)
This method normally returns pixel values with
the bit depth they have in the source image, but
when the source PNG has an ``sBIT`` chunk it is inspected and
can reduce the bit depth of the result pixels;
pixel values will be reduced according to the bit depth
specified in the ``sBIT`` chunk.
PNG nerds should note a single result bit depth is
used for all channels:
the maximum of the ones specified in the ``sBIT`` chunk.
An RGB565 image will be rescaled to 6-bit RGB666.
The *info* dictionary that is returned reflects
the `direct` format and not the original source image.
For example, an RGB source image with a ``tRNS`` chunk
to represent a transparent colour,
will start with ``planes=3`` and ``alpha=False`` for the
source image,
but the *info* dictionary returned by this method
will have ``planes=4`` and ``alpha=True`` because
an alpha channel is synthesized and added.
*rows* is a sequence of rows;
each row being a sequence of values
(like the :meth:`read` method).
All the other aspects of the image data are not changed.
"""
self.preamble()
# Simple case, no conversion necessary.
if not self.colormap and not self.trns and not self.sbit:
return self.read()
x, y, pixels, info = self.read()
if self.colormap:
info["colormap"] = False
info["alpha"] = bool(self.trns)
info["bitdepth"] = 8
info["planes"] = 3 + bool(self.trns)
plte = self.palette()
def iterpal(pixels):
for row in pixels:
row = [plte[x] for x in row]
yield array("B", itertools.chain(*row))
pixels = iterpal(pixels)
elif self.trns:
# It would be nice if there was some reasonable way
# of doing this without generating a whole load of
# intermediate tuples. But tuples does seem like the
# easiest way, with no other way clearly much simpler or
# much faster. (Actually, the L to LA conversion could
# perhaps go faster (all those 1-tuples!), but I still
# wonder whether the code proliferation is worth it)
it = self.transparent
maxval = 2 ** info["bitdepth"] - 1
planes = info["planes"]
info["alpha"] = True
info["planes"] += 1
typecode = "BH"[info["bitdepth"] > 8]
def itertrns(pixels):
for row in pixels:
# For each row we group it into pixels, then form a
# characterisation vector that says whether each
# pixel is opaque or not. Then we convert
# True/False to 0/maxval (by multiplication),
# and add it as the extra channel.
row = group(row, planes)
opa = map(it.__ne__, row)
opa = map(maxval.__mul__, opa)
opa = list(zip(opa)) # convert to 1-tuples
yield array(typecode, itertools.chain(*map(operator.add, row, opa)))
pixels = itertrns(pixels)
targetbitdepth = None
if self.sbit:
sbit = struct.unpack("%dB" % len(self.sbit), self.sbit)
targetbitdepth = max(sbit)
if targetbitdepth > info["bitdepth"]:
raise Error("sBIT chunk %r exceeds bitdepth %d" % (sbit, self.bitdepth))
if min(sbit) <= 0:
raise Error("sBIT chunk %r has a 0-entry" % sbit)
if targetbitdepth:
shift = info["bitdepth"] - targetbitdepth
info["bitdepth"] = targetbitdepth
def itershift(pixels):
for row in pixels:
yield [p >> shift for p in row]
pixels = itershift(pixels)
return x, y, pixels, info
def _as_rescale(self, get, targetbitdepth):
"""Helper used by :meth:`asRGB8` and :meth:`asRGBA8`."""
width, height, pixels, info = get()
maxval = 2 ** info["bitdepth"] - 1
targetmaxval = 2 ** targetbitdepth - 1
factor = float(targetmaxval) / float(maxval)
info["bitdepth"] = targetbitdepth
def iterscale():
for row in pixels:
yield [int(round(x * factor)) for x in row]
if maxval == targetmaxval:
return width, height, pixels, info
else:
return width, height, iterscale(), info
def asRGB8(self):
"""
Return the image data as an RGB pixels with 8-bits per sample.
This is like the :meth:`asRGB` method except that
this method additionally rescales the values so that
they are all between 0 and 255 (8-bit).
In the case where the source image has a bit depth < 8
the transformation preserves all the information;
where the source image has bit depth > 8, then
rescaling to 8-bit values loses precision.
No dithering is performed.
Like :meth:`asRGB`,
an alpha channel in the source image will raise an exception.
This function returns a 4-tuple:
(*width*, *height*, *rows*, *info*).
*width*, *height*, *info* are as per the :meth:`read` method.
*rows* is the pixel data as a sequence of rows.
"""
return self._as_rescale(self.asRGB, 8)
def asRGBA8(self):
"""
Return the image data as RGBA pixels with 8-bits per sample.
This method is similar to :meth:`asRGB8` and :meth:`asRGBA`:
The result pixels have an alpha channel, *and*
values are rescaled to the range 0 to 255.
The alpha channel is synthesized if necessary
(with a small speed penalty).
"""
return self._as_rescale(self.asRGBA, 8)
def asRGB(self):
"""
Return image as RGB pixels.
RGB colour images are passed through unchanged;
greyscales are expanded into RGB triplets
(there is a small speed overhead for doing this).
An alpha channel in the source image will raise an exception.
The return values are as for the :meth:`read` method except that
the *info* reflect the returned pixels, not the source image.
In particular,
for this method ``info['greyscale']`` will be ``False``.
"""
width, height, pixels, info = self.asDirect()
if info["alpha"]:
raise Error("will not convert image with alpha channel to RGB")
if not info["greyscale"]:
return width, height, pixels, info
info["greyscale"] = False
info["planes"] = 3
if info["bitdepth"] > 8:
def newarray():
return array("H", [0])
else:
def newarray():
return bytearray([0])
def iterrgb():
for row in pixels:
a = newarray() * 3 * width
for i in range(3):
a[i::3] = row
yield a
return width, height, iterrgb(), info
def asRGBA(self):
"""
Return image as RGBA pixels.
Greyscales are expanded into RGB triplets;
an alpha channel is synthesized if necessary.
The return values are as for the :meth:`read` method except that
the *info* reflect the returned pixels, not the source image.
In particular, for this method
``info['greyscale']`` will be ``False``, and
``info['alpha']`` will be ``True``.
"""
width, height, pixels, info = self.asDirect()
if info["alpha"] and not info["greyscale"]:
return width, height, pixels, info
typecode = "BH"[info["bitdepth"] > 8]
maxval = 2 ** info["bitdepth"] - 1
maxbuffer = struct.pack("=" + typecode, maxval) * 4 * width
if info["bitdepth"] > 8:
def newarray():
return array("H", maxbuffer)
else:
def newarray():
return bytearray(maxbuffer)
if info["alpha"] and info["greyscale"]:
# LA to RGBA
def convert():
for row in pixels:
# Create a fresh target row, then copy L channel
# into first three target channels, and A channel
# into fourth channel.
a = newarray()
convert_la_to_rgba(row, a)
yield a
elif info["greyscale"]:
# L to RGBA
def convert():
for row in pixels:
a = newarray()
convert_l_to_rgba(row, a)
yield a
else:
assert not info["alpha"] and not info["greyscale"]
# RGB to RGBA
def convert():
for row in pixels:
a = newarray()
convert_rgb_to_rgba(row, a)
yield a
info["alpha"] = True
info["greyscale"] = False
info["planes"] = 4
return width, height, convert(), info
def decompress(data_blocks):
"""
`data_blocks` should be an iterable that
yields the compressed data (from the ``IDAT`` chunks).
This yields decompressed byte strings.
"""
# Currently, with no max_length parameter to decompress,
# this routine will do one yield per IDAT chunk: Not very
# incremental.
d = zlib.decompressobj()
# Each IDAT chunk is passed to the decompressor, then any
# remaining state is decompressed out.
for data in data_blocks:
# :todo: add a max_length argument here to limit output size.
yield bytearray(d.decompress(data))
yield bytearray(d.flush())
def check_bitdepth_colortype(bitdepth, colortype):
"""
Check that `bitdepth` and `colortype` are both valid,
and specified in a valid combination.
Returns (None) if valid, raise an Exception if not valid.
"""
if bitdepth not in (1, 2, 4, 8, 16):
raise FormatError("invalid bit depth %d" % bitdepth)
if colortype not in (0, 2, 3, 4, 6):
raise FormatError("invalid colour type %d" % colortype)
# Check indexed (palettized) images have 8 or fewer bits
# per pixel; check only indexed or greyscale images have
# fewer than 8 bits per pixel.
if colortype & 1 and bitdepth > 8:
raise FormatError(
"Indexed images (colour type %d) cannot"
" have bitdepth > 8 (bit depth %d)."
" See http://www.w3.org/TR/2003/REC-PNG-20031110/#table111 ."
% (bitdepth, colortype)
)
if bitdepth < 8 and colortype not in (0, 3):
raise FormatError(
"Illegal combination of bit depth (%d)"
" and colour type (%d)."
" See http://www.w3.org/TR/2003/REC-PNG-20031110/#table111 ."
% (bitdepth, colortype)
)
def is_natural(x):
"""A non-negative integer."""
try:
is_integer = int(x) == x
except (TypeError, ValueError):
return False
return is_integer and x >= 0
def undo_filter_sub(filter_unit, scanline, previous, result):
"""Undo sub filter."""
ai = 0
# Loops starts at index fu. Observe that the initial part
# of the result is already filled in correctly with
# scanline.
for i in range(filter_unit, len(result)):
x = scanline[i]
a = result[ai]
result[i] = (x + a) & 0xFF
ai += 1
def undo_filter_up(filter_unit, scanline, previous, result):
"""Undo up filter."""
for i in range(len(result)):
x = scanline[i]
b = previous[i]
result[i] = (x + b) & 0xFF
def undo_filter_average(filter_unit, scanline, previous, result):
"""Undo up filter."""
ai = -filter_unit
for i in range(len(result)):
x = scanline[i]
if ai < 0:
a = 0
else:
a = result[ai]
b = previous[i]
result[i] = (x + ((a + b) >> 1)) & 0xFF
ai += 1
def undo_filter_paeth(filter_unit, scanline, previous, result):
"""Undo Paeth filter."""
# Also used for ci.
ai = -filter_unit
for i in range(len(result)):
x = scanline[i]
if ai < 0:
a = c = 0
else:
a = result[ai]
c = previous[ai]
b = previous[i]
p = a + b - c
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc:
pr = a
elif pb <= pc:
pr = b
else:
pr = c
result[i] = (x + pr) & 0xFF
ai += 1
def convert_la_to_rgba(row, result):
for i in range(3):
result[i::4] = row[0::2]
result[3::4] = row[1::2]
def convert_l_to_rgba(row, result):
"""
Convert a grayscale image to RGBA.
This method assumes the alpha channel in result is
already correctly initialized.
"""
for i in range(3):
result[i::4] = row
def convert_rgb_to_rgba(row, result):
"""
Convert an RGB image to RGBA.
This method assumes the alpha channel in result is
already correctly initialized.
"""
for i in range(3):
result[i::4] = row[i::3]
# Only reason to include this in this module is that
# several utilities need it, and it is small.
def binary_stdin():
"""
A sys.stdin that returns bytes.
"""
return sys.stdin.buffer
def binary_stdout():
"""
A sys.stdout that accepts bytes.
"""
stdout = sys.stdout.buffer
# On Windows the C runtime file orientation needs changing.
if sys.platform == "win32":
import msvcrt
import os
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
return stdout
def cli_open(path):
if path == "-":
return binary_stdin()
return open(path, "rb")
|
plotly/plotly.py
|
packages/python/plotly/_plotly_utils/png.py
|
Python
|
mit
| 80,815
|
import os
import shutil
from abc import ABCMeta, abstractmethod
from hcsvlab_robochef import configmanager
from hcsvlab_robochef.utils.manifester import *
class IngestBase(object):
__metaclass__ = ABCMeta
'''
This abstract class is a representation of an ingest. It is being used in-lieu of an interface
'''
configmanager.configinit()
@abstractmethod
def setMetaData(srcdir):
''' Loads the meta data for use during ingest '''
return None
@abstractmethod
def ingestCorpus(srcdir, outdir):
'''
The ingest entry point where an input and output directory is specified
'''
return None
@abstractmethod
def ingestDocument(sourcepath):
'''
Ingest a specific source document, from which meta-data annotations and raw data is produced
'''
return None
def identify_documents(self, documents):
'''
Identifies the indexable and display documents from the given documents according to the collection rule
'''
return (None, None)
def clear_output_dir(self, outdir):
''' Clears the output directory '''
if os.path.exists(outdir):
shutil.rmtree(outdir)
os.mkdir(outdir)
def copy_collection_metadata(self, srcdir, outdir, filename, savename):
''' Copies the collection level metadata file to output directory '''
print " copying collection level metadata file..."
metadata_file = os.path.join(srcdir, filename)
if os.path.exists(metadata_file) and os.path.exists(outdir):
shutil.copyfile(metadata_file, os.path.join(outdir,savename))
def create_collection_manifest(self, srcdir, format):
''' Creating the manifest file and putting in output directory '''
print " creating collection manifest file for " + srcdir
create_manifest(srcdir, format)
|
IntersectAustralia/hcsvlab_robochef
|
hcsvlab_robochef/ingest_base.py
|
Python
|
gpl-3.0
| 1,944
|
# coding: utf-8
# pylint: disable=invalid-name, protected-access, too-many-arguments, too-many-lines
# pylint: disable=import-error, no-name-in-module
"""Symbolic configuration API of MXNet."""
from __future__ import absolute_import as _abs
import ctypes
import warnings
from numbers import Number
import os as _os
import sys as _sys
import numpy as _numpy
from .base import _LIB, numeric_types
from .base import c_array, c_str, mx_uint, py_str, string_types, mx_real_t
from .base import NDArrayHandle, ExecutorHandle, SymbolHandle
from .base import check_call, MXNetError
from .context import Context, cpu
from .ndarray import NDArray, zeros as _nd_zeros, _DTYPE_NP_TO_MX, _DTYPE_MX_TO_NP
from .executor import Executor
from . import _symbol_internal as _internal
from .attribute import AttrScope
# Use different version of SymbolBase
# When possible, use cython to speedup part of computation.
try:
if int(_os.environ.get("MXNET_ENABLE_CYTHON", True)) == 0:
from ._ctypes.symbol import SymbolBase, _init_symbol_module
elif _sys.version_info >= (3, 0):
from ._cy3.symbol import SymbolBase, _init_symbol_module
else:
from ._cy2.symbol import SymbolBase, _init_symbol_module
except ImportError:
if int(_os.environ.get("MXNET_ENFORCE_CYTHON", False)) != 0:
raise ImportError("Cython Module cannot be loaded but MXNET_ENFORCE_CYTHON=1")
from ._ctypes.symbol import SymbolBase, _init_symbol_module
_GRAD_REQ_MAP = {'null': 0, 'write': 1, 'add': 3}
class Symbol(SymbolBase):
"""Symbol is symbolic graph of the mxnet."""
# disable dictionary storage, also do not have parent type.
# pylint: disable=no-member
__slots__ = []
def __repr__(self):
"""Get a string representation of the symbol."""
name = self.name
if name is None:
name = ', '.join([i.name for i in self])
return '<%s group [%s]>' % (self.__class__.__name__, name)
else:
return '<%s %s>' % (self.__class__.__name__, name)
def __iter__(self):
"""Returns a generator object of symbol.
One can loop through the returned object list to get outputs.
Example usage:
----------
>>> a = mx.sym.Variable('a')
>>> b = mx.sym.Variable('b')
>>> c = a+b
>>> d = mx.sym.Variable('d')
>>> e = d+c
>>> out = e.get_children()
>>> out
<Symbol Grouped>
>>> for i in out:
... i
...
<Symbol d>
<Symbol _plus0>
"""
return (self[i] for i in self.list_outputs())
def __add__(self, other):
"""x.__add__(y) <=> x+y
Scalar input is supported.
Broadcasting is not supported. Use `broadcast_add` instead. """
if isinstance(other, Symbol):
return _internal._Plus(self, other)
if isinstance(other, Number):
return _internal._PlusScalar(self, scalar=other)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
"""x.__sub__(y) <=> x-y
Scalar input is supported.
Broadcasting is not supported. Use `broadcast_sub` instead. """
if isinstance(other, Symbol):
return _internal._Minus(self, other)
if isinstance(other, Number):
return _internal._MinusScalar(self, scalar=other)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __rsub__(self, other):
"""x.__rsub__(y) <=> y-x
Only `NDArray` is supported for now.
Example usage:
----------
>>> x = mx.nd.ones((2,3))*3
>>> y = mx.nd.ones((2,3))
>>> x.__rsub__(y).asnumpy()
array([[-2., -2., -2.],
[-2., -2., -2.]], dtype=float32)
"""
if isinstance(other, Number):
return _internal._RMinusScalar(self, scalar=other)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __mul__(self, other):
"""x.__mul__(y) <=> x*y
Scalar input is supported.
Broadcasting is not supported. Use `broadcast_mul` instead. """
if isinstance(other, Symbol):
return _internal._Mul(self, other)
if isinstance(other, Number):
return _internal._MulScalar(self, scalar=other)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __rmul__(self, other):
return self.__mul__(other)
def __div__(self, other):
"""x.__div__(y) <=> x/y
Scalar input is supported.
Broadcasting is not supported. Use `broadcast_div` instead. """
if isinstance(other, Symbol):
return _internal._Div(self, other)
if isinstance(other, Number):
return _internal._DivScalar(self, scalar=other)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __rdiv__(self, other):
"""x.__rdiv__(y) <=> y/x
Only `NDArray` is supported for now.
Example usage:
----------
>>> x = mx.nd.ones((2,3))*3
>>> y = mx.nd.ones((2,3))
>>> x.__rdiv__(y).asnumpy()
array([[ 0.33333334, 0.33333334, 0.33333334],
[ 0.33333334, 0.33333334, 0.33333334]], dtype=float32)
"""
if isinstance(other, Number):
return _internal._RDivScalar(self, scalar=other)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __truediv__(self, other):
return self.__div__(other)
def __rtruediv__(self, other):
return self.__rdiv__(other)
def __pow__(self, other):
"""x.__pow__(y) <=> x**y
Scalar input is supported.
Broadcasting is not supported. Use `broadcast_pow` instead. """
if isinstance(other, Symbol):
return _internal._Power(self, other)
if isinstance(other, Number):
return _internal._PowerScalar(self, scalar=other)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __neg__(self):
"""x.__neg__() <=> -x
Numerical negative, element-wise.
Example usage:
----------
>>> a = mx.sym.Variable('a')
>>> a
<Symbol a>
>>> -a
<Symbol _mulscalar0>
>>> a_neg = a.__neg__()
>>> c = a_neg*b
>>> ex = c.eval(ctx=mx.cpu(), a=mx.nd.ones([2,3]), b=mx.nd.ones([2,3]))
>>> ex[0].asnumpy()
array([[-1., -1., -1.],
[-1., -1., -1.]], dtype=float32)
"""
return self.__mul__(-1.0)
def __copy__(self):
return self.__deepcopy__(None)
def __deepcopy__(self, _):
"""Returns a deep copy of the input object.
This function returns a deep copy of the input object including the current state
of all its parameters such as weights, biases, etc.
Any changes made to the deep copy do not reflect in the original object.
Example usage:
----------
>>> import copy
>>> data = mx.sym.Variable('data')
>>> data_1 = copy.deepcopy(data)
>>> data_1 = 2*data
>>> data_1.tojson()
>>> data_1 is data # Data got modified
False
"""
handle = SymbolHandle()
check_call(_LIB.MXSymbolCopy(self.handle,
ctypes.byref(handle)))
return Symbol(handle)
def __eq__(self, other):
"""x.__eq__(y) <=> x==y
Scalar input is supported.
Broadcasting is not supported. Use `broadcast_equal` instead. """
if isinstance(other, Symbol):
return _internal._equal(self, other)
if isinstance(other, numeric_types):
return _internal._equal_scalar(self, scalar=other)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __ne__(self, other):
"""x.__ne__(y) <=> x!=y
Scalar input is supported.
Broadcasting is not supported. Use `broadcast_not_equal` instead. """
if isinstance(other, Symbol):
return _internal._not_equal(self, other)
if isinstance(other, numeric_types):
return _internal._not_equal_scalar(self, scalar=other)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __gt__(self, other):
"""x.__gt__(y) <=> x>y
Scalar input is supported.
Broadcasting is not supported. Use `broadcast_greater` instead. """
if isinstance(other, Symbol):
return _internal._greater(self, other)
if isinstance(other, numeric_types):
return _internal._greater_scalar(self, scalar=other)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __ge__(self, other):
"""x.__ge__(y) <=> x>=y
Scalar input is supported.
Broadcasting is not supported. Use `broadcast_greater_equal` instead. """
if isinstance(other, Symbol):
return _internal._greater_equal(self, other)
if isinstance(other, numeric_types):
return _internal._greater_equal_scalar(self, scalar=other)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __lt__(self, other):
"""x.__lt__(y) <=> x<y
Scalar input is supported.
Broadcasting is not supported. Use `broadcast_lesser` instead. """
if isinstance(other, Symbol):
return _internal._lesser(self, other)
if isinstance(other, numeric_types):
return _internal._lesser_scalar(self, scalar=other)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __le__(self, other):
"""x.__le__(y) <=> x<=y
Scalar input is supported.
Broadcasting is not supported. Use `broadcast_lesser_equal` instead. """
if isinstance(other, Symbol):
return _internal._lesser_equal(self, other)
if isinstance(other, numeric_types):
return _internal._lesser_equal_scalar(self, scalar=other)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __getstate__(self):
handle = self.handle
if handle is not None:
return {'handle': self.tojson()}
else:
return {'handle': None}
def __setstate__(self, state):
# pylint: disable=assigning-non-slot
handle = state['handle']
if handle is not None:
json_str = handle
handle = SymbolHandle()
check_call(_LIB.MXSymbolCreateFromJSON(c_str(json_str), ctypes.byref(handle)))
self.handle = handle
else:
self.handle = None
def __call__(self, *args, **kwargs):
"""Composes symbol using inputs.
x.__call__(y, z) <=> x(y,z)
This function internally calls `_compose` to compose the symbol and
returns the composed symbol.
Example usage:
----------
>>> data = mx.symbol.Variable('data')
>>> net1 = mx.symbol.FullyConnected(data=data, name='fc1', num_hidden=10)
>>> net2 = mx.symbol.FullyConnected(name='fc3', num_hidden=10)
>>> composed = net2(fc3_data=net1, name='composed')
>>> composed
<Symbol composed>
>>> called = net2.__call__(fc3_data=net1, name='composed')
>>> called
<Symbol composed>
Parameters
----------
args:
Positional arguments.
kwargs:
Keyword arguments.
Returns
-------
The resulting symbol.
"""
s = self.__copy__()
s._compose(*args, **kwargs)
return s
def _compose(self, *args, **kwargs):
"""Composes symbol using inputs.
x._compose(y, z) <=> x(y,z)
This function mutates the current symbol.
Example usage:
----------
>>> data = mx.symbol.Variable('data')
>>> net1 = mx.symbol.FullyConnected(data=data, name='fc1', num_hidden=10)
>>> net2 = mx.symbol.FullyConnected(name='fc3', num_hidden=10)
>>> net2
<Symbol fc3>
>>> net2._compose(fc3_data=net1, name='composed')
>>> net2
<Symbol composed>
Parameters
----------
args:
Positional arguments.
kwargs:
Keyword arguments.
Returns
-------
The resulting symbol.
"""
name = kwargs.pop('name', None)
if name:
name = c_str(name)
if len(args) != 0 and len(kwargs) != 0:
raise TypeError('compose only accept input Symbols \
either as positional or keyword arguments, not both')
for arg in args:
if not isinstance(arg, Symbol):
raise TypeError('Compose expect `Symbol` as arguments')
for val in kwargs.values():
if not isinstance(val, Symbol):
raise TypeError('Compose expect `Symbol` as arguments')
num_args = len(args) + len(kwargs)
if len(kwargs) != 0:
keys = c_array(ctypes.c_char_p, [c_str(key) for key in kwargs.keys()])
args = c_array(SymbolHandle, [s.handle for s in kwargs.values()])
else:
keys = None
args = c_array(SymbolHandle, [s.handle for s in args])
check_call(_LIB.MXSymbolCompose(
self.handle, name, num_args, keys, args))
def __getitem__(self, index):
"""x.__getitem__(i) <=> x[i]
Returns a sliced view of the input symbol.
Example usage:
----------
>>> a = mx.sym.var('a')
>>> a.__getitem__(0)
<Symbol a>
>>> a[0]
<Symbol a>
Parameters
----------
index : int or str
Indexing key
"""
if isinstance(index, string_types):
idx = None
for i, name in enumerate(self.list_outputs()):
if name == index:
if idx is not None:
raise ValueError('There are multiple outputs with name \"%s\"' % index)
idx = i
if idx is None:
raise ValueError('Cannot find output that matches name \"%s\"' % index)
index = idx
if not isinstance(index, int):
raise TypeError('Symbol only support integer index to fetch i-th output')
if index >= (len(self.list_outputs())):
# Important, python determines the end by this exception
raise IndexError
handle = SymbolHandle()
check_call(_LIB.MXSymbolGetOutput(
self.handle, mx_uint(index), ctypes.byref(handle)))
return Symbol(handle=handle)
@property
def name(self):
"""Get name string from the symbol, this function only works for non-grouped symbol.
Returns
-------
value : str
The name of this symbol, returns ``None`` for grouped symbol.
"""
ret = ctypes.c_char_p()
success = ctypes.c_int()
check_call(_LIB.MXSymbolGetName(
self.handle, ctypes.byref(ret), ctypes.byref(success)))
if success.value != 0:
return py_str(ret.value)
else:
return None
def attr(self, key):
"""Returns the attribute string for corresponding input key from the symbol.
This function only works for non-grouped symbols.
Example usage:
----------
>>> data = mx.sym.Variable('data', attr={'mood': 'angry'})
>>> data.attr('mood')
'angry'
Parameters
----------
key : str
The key corresponding to the desired attribute.
Returns
-------
value : str
The desired attribute value, returns ``None`` if the attribute does not exist.
"""
ret = ctypes.c_char_p()
success = ctypes.c_int()
check_call(_LIB.MXSymbolGetAttr(
self.handle, c_str(key), ctypes.byref(ret), ctypes.byref(success)))
if success.value != 0:
return py_str(ret.value)
else:
return None
def list_attr(self, recursive=False):
"""Gets all attributes from the symbol.
Example usage:
----------
>>> data = mx.sym.Variable('data', attr={'mood': 'angry'})
>>> data.list_attr()
{'mood': 'angry'}
Returns
-------
ret : Dict of str to str
A dictionary mapping attribute keys to values.
"""
if recursive:
raise DeprecationWarning("Symbol.list_attr with recursive=True has been deprecated. "
"Please use attr_dict instead.")
size = mx_uint()
pairs = ctypes.POINTER(ctypes.c_char_p)()
f_handle = _LIB.MXSymbolListAttrShallow
check_call(f_handle(self.handle, ctypes.byref(size), ctypes.byref(pairs)))
return {py_str(pairs[i*2]): py_str(pairs[i*2+1]) for i in range(size.value)}
def attr_dict(self):
"""Recursively gets all attributes from the symbol and its children.
Example usage:
----------
>>> a = mx.sym.Variable('a', attr={'a1':'a2'})
>>> b = mx.sym.Variable('b', attr={'b1':'b2'})
>>> c = a+b
>>> c.attr_dict()
{'a': {'a1': 'a2'}, 'b': {'b1': 'b2'}}
Returns
-------
ret : Dict of str to dict
There is a key in the returned dict for every child with non-empty attribute set.
For each symbol, the name of the symbol is its key in the dict
and the correspond value is that symbol's attribute list (itself a dictionary).
"""
size = mx_uint()
pairs = ctypes.POINTER(ctypes.c_char_p)()
f_handle = _LIB.MXSymbolListAttr
check_call(f_handle(self.handle, ctypes.byref(size), ctypes.byref(pairs)))
ret = {}
for i in range(size.value):
name, key = py_str(pairs[i*2]).split('$')
val = py_str(pairs[i*2+1])
if name not in ret:
ret[name] = {}
ret[name][key] = val
return ret
def _set_attr(self, **kwargs):
"""Sets an attribute of the symbol.
For example. A._set_attr(foo="bar") adds the mapping ``"{foo: bar}"``
to the symbol's attribute dictionary.
Parameters
----------
**kwargs
The attributes to set
"""
for key, value in kwargs.items():
if not isinstance(value, string_types):
raise ValueError("Set Attr only accepts string values")
check_call(_LIB.MXSymbolSetAttr(
self.handle, c_str(key), c_str(str(value))))
def get_internals(self):
"""Gets a new grouped symbol `sgroup`. The output of `sgroup` is a list of
outputs of all of the internal nodes.
Consider the following code:
Example usage:
----------
>>> a = mx.sym.var('a')
>>> b = mx.sym.var('b')
>>> c = a + b
>>> d = c.get_internals()
>>> d
<Symbol Grouped>
>>> d.list_outputs()
['a', 'b', '_plus4_output']
Returns
-------
sgroup : Symbol
A symbol group containing all internal and leaf nodes of the computation graph
used to compute the symbol.
"""
handle = SymbolHandle()
check_call(_LIB.MXSymbolGetInternals(
self.handle, ctypes.byref(handle)))
return Symbol(handle=handle)
def get_children(self):
"""Gets a new grouped symbol whose output contains
inputs to output nodes of the original symbol.
Example usage:
----------
>>> x = mx.sym.Variable('x')
>>> y = mx.sym.Variable('y')
>>> z = mx.sym.Variable('z')
>>> a = y+z
>>> b = x+a
>>> b.get_children()
<Symbol Grouped>
>>> b.get_children().list_outputs()
['x', '_plus10_output']
>>> b.get_children().get_children().list_outputs()
['y', 'z']
Returns
-------
sgroup : Symbol or None
The children of the head node. If the symbol has no
inputs then ``None`` will be returned.
"""
handle = SymbolHandle()
check_call(_LIB.MXSymbolGetChildren(
self.handle, ctypes.byref(handle)))
ret = Symbol(handle=handle)
if len(ret.list_outputs()) == 0:
return None
return ret
def list_arguments(self):
"""Lists all the arguments in the symbol.
Example usage:
----------
>>> a = mx.sym.var('a')
>>> b = mx.sym.var('b')
>>> c = a + b
>>> c.list_arguments
['a', 'b']
Returns
-------
args : list of string
List containing the names of all the arguments required to compute the symbol.
"""
size = ctypes.c_uint()
sarr = ctypes.POINTER(ctypes.c_char_p)()
check_call(_LIB.MXSymbolListArguments(
self.handle, ctypes.byref(size), ctypes.byref(sarr)))
return [py_str(sarr[i]) for i in range(size.value)]
def list_outputs(self):
"""Lists all the outputs in the symbol.
Example usage:
----------
>>> a = mx.sym.var('a')
>>> b = mx.sym.var('b')
>>> c = a + b
>>> c.list_outputs()
['_plus12_output']
Returns
-------
list of str
List of all the outputs.
For most symbols, this list contains only the name of this symbol.
For symbol groups, this is a list with the names of all symbols
in the group.
"""
size = ctypes.c_uint()
sarr = ctypes.POINTER(ctypes.c_char_p)()
check_call(_LIB.MXSymbolListOutputs(
self.handle, ctypes.byref(size), ctypes.byref(sarr)))
return [py_str(sarr[i]) for i in range(size.value)]
def list_auxiliary_states(self):
"""Lists all the auxiliary states in the symbol.
Example usage:
----------
>>> a = mx.sym.var('a')
>>> b = mx.sym.var('b')
>>> c = a + b
>>> c.list_auxiliary_states()
[]
Example of auxiliary states in `BatchNorm`.
>>> data = mx.symbol.Variable('data')
>>> weight = mx.sym.Variable(name='fc1_weight')
>>> fc1 = mx.symbol.FullyConnected(data = data, weight=weight, name='fc1', num_hidden=128)
>>> fc2 = mx.symbol.BatchNorm(fc1, name='batchnorm0')
>>> fc2.list_auxiliary_states()
['batchnorm0_moving_mean', 'batchnorm0_moving_var']
Returns
-------
aux_states : list of string
List of the auxiliary states in input symbol.
Notes
-----
Auxiliary states are special states of symbols that do not correspond to an argument,
and are not updated by gradient descent. Common examples of auxiliary states
include the `moving_mean` and `moving_variance` in `BatchNorm`.
Most operators do not have auxiliary states.
"""
size = ctypes.c_uint()
sarr = ctypes.POINTER(ctypes.c_char_p)()
check_call(_LIB.MXSymbolListAuxiliaryStates(
self.handle, ctypes.byref(size), ctypes.byref(sarr)))
return [py_str(sarr[i]) for i in range(size.value)]
def infer_type(self, *args, **kwargs):
"""Infers the type of all arguments and all outputs, given the known types
for some arguments.
This function takes the known types of some arguments in either positional way
or keyword argument way as input. It returns a tuple of `None` values
if there is not enough information to deduce the missing types.
Inconsistencies in the known types will cause an error to be raised.
Example usage:
----------
>>> a = mx.sym.var('a')
>>> b = mx.sym.var('b')
>>> c = a + b
>>> arg_types, out_types, aux_types = c.infer_type(a='float32')
>>> arg_types
[<type 'numpy.float32'>, <type 'numpy.float32'>]
>>> out_types
[<type 'numpy.float32'>]
>>> aux_types
[]
Parameters
----------
*args :
Type of known arguments in a positional way.
Unknown type can be marked as None.
**kwargs :
Keyword arguments of known types.
Returns
-------
arg_types : list of numpy.dtype or None
List of argument types.
The order is same as the order of list_arguments().
out_types : list of numpy.dtype or None
List of output types.
The order is same as the order of list_outputs().
aux_types : list of numpy.dtype or None
List of auxiliary state types.
The order is same as the order of list_auxiliary_states().
"""
# pylint: disable=too-many-locals
if len(args) != 0 and len(kwargs) != 0:
raise ValueError('Can only specify known argument \
types either by positional or kwargs way.')
sdata = []
if len(args) != 0:
keys = None
for s in args:
if s is not None:
s = _numpy.dtype(s).type
if s not in _DTYPE_NP_TO_MX:
raise TypeError('Argument need to be one of '+str(_DTYPE_NP_TO_MX))
sdata.append(_DTYPE_NP_TO_MX[s])
else:
sdata.append(-1)
else:
keys = []
for k, v in kwargs.items():
v = _numpy.dtype(v).type
if v in _DTYPE_NP_TO_MX:
keys.append(c_str(k))
sdata.append(_DTYPE_NP_TO_MX[v])
arg_type_size = mx_uint()
arg_type_data = ctypes.POINTER(ctypes.c_int)()
out_type_size = mx_uint()
out_type_data = ctypes.POINTER(ctypes.c_int)()
aux_type_size = mx_uint()
aux_type_data = ctypes.POINTER(ctypes.c_int)()
complete = ctypes.c_int()
check_call(_LIB.MXSymbolInferType(
self.handle,
mx_uint(len(sdata)),
c_array(ctypes.c_char_p, keys),
c_array(ctypes.c_int, sdata),
ctypes.byref(arg_type_size),
ctypes.byref(arg_type_data),
ctypes.byref(out_type_size),
ctypes.byref(out_type_data),
ctypes.byref(aux_type_size),
ctypes.byref(aux_type_data),
ctypes.byref(complete)))
if complete.value != 0:
arg_types = [
_DTYPE_MX_TO_NP[arg_type_data[i]] for i in range(arg_type_size.value)]
out_types = [
_DTYPE_MX_TO_NP[out_type_data[i]] for i in range(out_type_size.value)]
aux_types = [
_DTYPE_MX_TO_NP[aux_type_data[i]] for i in range(aux_type_size.value)]
return (arg_types, out_types, aux_types)
else:
return (None, None, None)
# pylint: enable=too-many-locals
def infer_shape(self, *args, **kwargs):
"""Infers the shapes of all arguments and all outputs given the known shapes of
some arguments.
This function takes the known shapes of some arguments in either positional way
or keyword argument way as input. It returns a tuple of `None` values
if there is not enough information to deduce the missing shapes.
Example usage:
----------
>>> a = mx.sym.var('a')
>>> b = mx.sym.var('b')
>>> c = a + b
>>> arg_shapes, out_shapes, aux_shapes = c.infer_shape(a=(3,3))
>>> arg_shapes
[(3L, 3L), (3L, 3L)]
>>> out_shapes
[(3L, 3L)]
>>> aux_shapes
[]
>>> c.infer_shape(a=(0,3)) # 0s in shape means unknown dimensions. So, returns None.
(None, None, None)
Inconsistencies in the known shapes will cause an error to be raised.
See the following example:
>>> data = mx.sym.Variable('data')
>>> out = mx.sym.FullyConnected(data=data, name='fc1', num_hidden=1000)
>>> out = mx.sym.Activation(data=out, act_type='relu')
>>> out = mx.sym.FullyConnected(data=out, name='fc2', num_hidden=10)
>>> weight_shape= (1, 100)
>>> data_shape = (100, 100)
>>> out.infer_shape(data=data_shape, fc1_weight=weight_shape)
Error in operator fc1: Shape inconsistent, Provided=(1,100), inferred shape=(1000,100)
Parameters
----------
*args :
Shape of arguments in a positional way.
Unknown shape can be marked as None.
**kwargs :
Keyword arguments of the known shapes.
Returns
-------
arg_shapes : list of tuple or None
List of argument shapes.
The order is same as the order of list_arguments().
out_shapes : list of tuple or None
List of output shapes.
The order is same as the order of list_outputs().
aux_shapes : list of tuple or None
List of auxiliary state shapes.
The order is same as the order of list_auxiliary_states().
"""
try:
res = self._infer_shape_impl(False, *args, **kwargs)
if res[1] is None:
arg_shapes, _, _ = self._infer_shape_impl(True, *args, **kwargs)
arg_names = self.list_arguments()
unknowns = []
for name, shape in zip(arg_names, arg_shapes):
if not shape or not _numpy.prod(shape):
if len(unknowns) >= 10:
unknowns.append('...')
break
unknowns.append('%s: %s'%(name, str(shape)))
warnings.warn(
"Cannot decide shape for the following arguments " +
"(0s in shape means unknown dimensions). " +
"Consider providing them as input:\n\t" +
"\n\t".join(unknowns), stacklevel=2)
return res
except MXNetError:
print("infer_shape error. Arguments:")
for i, arg in enumerate(args):
print(" #%d: %s" % (i, arg))
for k, v in kwargs.items():
print(" %s: %s" % (k, v))
raise
def infer_shape_partial(self, *args, **kwargs):
"""Infers the shape partially.
This functions works the same way as `infer_shape`,
except that this function can return partial results.
In the following example, information about fc2 is not available. So, `infer_shape`
will return a tuple of `None` values but `infer_shape_partial` will return partial values.
Example usage:
----------
>>> data = mx.sym.Variable('data')
>>> prev = mx.sym.Variable('prev')
>>> fc1 = mx.sym.FullyConnected(data=data, name='fc1', num_hidden=128)
>>> fc2 = mx.sym.FullyConnected(data=prev, name='fc2', num_hidden=128)
>>> out = mx.sym.Activation(data=mx.sym.elemwise_add(fc1, fc2), act_type='relu')
>>> out.list_arguments()
['data', 'fc1_weight', 'fc1_bias', 'prev', 'fc2_weight', 'fc2_bias']
>>> out.infer_shape(data=(10,64))
(None, None, None)
>>> out.infer_shape_partial(data=(10,64))
([(10L, 64L), (128L, 64L), (128L,), (), (), ()], [(10L, 128L)], [])
>>> # infers shape if you give information about fc2
>>> out.infer_shape(data=(10,64), prev=(10,128))
([(10L, 64L), (128L, 64L), (128L,), (10L, 128L), (128L, 128L), (128L,)], [(10L, 128L)], [])
Parameters
----------
*args :
Shape of arguments in a positional way.
Unknown shape can be marked as None
**kwargs :
Keyword arguments of known shapes.
Returns
-------
arg_shapes : list of tuple or None
List of argument shapes.
The order is same as the order of list_arguments().
out_shapes : list of tuple or None
List of output shapes.
The order is same as the order of list_outputs().
aux_shapes : list of tuple or None
List of auxiliary state shapes.
The order is same as the order of list_auxiliary_states().
"""
return self._infer_shape_impl(True, *args, **kwargs)
def _infer_shape_impl(self, partial, *args, **kwargs):
"""The actual implementation for calling shape inference API."""
# pylint: disable=too-many-locals
if len(args) != 0 and len(kwargs) != 0:
raise ValueError('Can only specify known argument \
shapes either by positional or kwargs way.')
sdata = []
indptr = [0]
if len(args) != 0:
keys = None
for s in args:
if s is not None:
if not isinstance(s, tuple):
raise TypeError('Arguments must be shapes (tuple)')
sdata.extend(s)
indptr.append(len(sdata))
else:
keys = []
for k, v in kwargs.items():
if isinstance(v, tuple):
keys.append(c_str(k))
sdata.extend(v)
indptr.append(len(sdata))
arg_shape_size = mx_uint()
arg_shape_ndim = ctypes.POINTER(mx_uint)()
arg_shape_data = ctypes.POINTER(ctypes.POINTER(mx_uint))()
out_shape_size = mx_uint()
out_shape_ndim = ctypes.POINTER(mx_uint)()
out_shape_data = ctypes.POINTER(ctypes.POINTER(mx_uint))()
aux_shape_size = mx_uint()
aux_shape_ndim = ctypes.POINTER(mx_uint)()
aux_shape_data = ctypes.POINTER(ctypes.POINTER(mx_uint))()
complete = ctypes.c_int()
if partial:
infer_func = _LIB.MXSymbolInferShapePartial
else:
infer_func = _LIB.MXSymbolInferShape
check_call(infer_func(
self.handle,
mx_uint(len(indptr) - 1),
c_array(ctypes.c_char_p, keys),
c_array(mx_uint, indptr),
c_array(mx_uint, sdata),
ctypes.byref(arg_shape_size),
ctypes.byref(arg_shape_ndim),
ctypes.byref(arg_shape_data),
ctypes.byref(out_shape_size),
ctypes.byref(out_shape_ndim),
ctypes.byref(out_shape_data),
ctypes.byref(aux_shape_size),
ctypes.byref(aux_shape_ndim),
ctypes.byref(aux_shape_data),
ctypes.byref(complete)))
if complete.value != 0:
arg_shapes = [
tuple(arg_shape_data[i][:arg_shape_ndim[i]]) for i in range(arg_shape_size.value)]
out_shapes = [
tuple(out_shape_data[i][:out_shape_ndim[i]]) for i in range(out_shape_size.value)]
aux_shapes = [
tuple(aux_shape_data[i][:aux_shape_ndim[i]]) for i in range(aux_shape_size.value)]
return (arg_shapes, out_shapes, aux_shapes)
else:
return (None, None, None)
# pylint: enable=too-many-locals
def debug_str(self):
"""Gets a debug string.
Returns
-------
debug_str : string
Debug string of the symbol.
"""
debug_str = ctypes.c_char_p()
check_call(_LIB.MXSymbolPrint(
self.handle, ctypes.byref(debug_str)))
return py_str(debug_str.value)
def save(self, fname):
"""Saves symbol to a file.
You can also use pickle to do the job if you only work on python.
The advantage of `load`/`save` functions is that the file contents are language agnostic.
This means the model saved by one language binding can be loaded by a different
language binding of `MXNet`.
You also get the benefit of being able to directly load/save from cloud storage(S3, HDFS).
Parameters
----------
fname : str
The name of the file.
- "s3://my-bucket/path/my-s3-symbol"
- "hdfs://my-bucket/path/my-hdfs-symbol"
- "/path-to/my-local-symbol"
See Also
--------
symbol.load : Used to load symbol from file.
"""
if not isinstance(fname, string_types):
raise TypeError('fname need to be string')
check_call(_LIB.MXSymbolSaveToFile(self.handle, c_str(fname)))
def tojson(self):
"""Saves symbol to a JSON string.
See Also
--------
symbol.load_json : Used to load symbol from JSON string.
"""
json_str = ctypes.c_char_p()
check_call(_LIB.MXSymbolSaveToJSON(self.handle, ctypes.byref(json_str)))
return py_str(json_str.value)
@staticmethod
def _get_ndarray_inputs(arg_key, args, arg_names, allow_missing):
"""Helper function to get NDArray lists handles from various inputs.
Parameters
----------
arg_key : str
The name of argument, used for error message.
args : list of NDArray or dict of str to NDArray
Input arguments to the symbols.
If type is list of NDArray, the position is in the same order of arg_names.
If type is dict of str to NDArray, then it maps the name of arguments
to the corresponding NDArray,
args_names : list of string
List of argument names.
allow_missing : boolean
Whether missing argument is allowed.
When allowed, the missing handle will be set to None(null)
Returns
-------
handles : list of NDArrayHandle
The positional list of NDArrayHandles generated from input.
"""
# setup args
arg_handles = []
arg_arrays = []
if isinstance(args, list):
if len(args) != len(arg_names):
raise ValueError('Length of %s does not match the number of arguments' % arg_key)
for narr in args:
if not isinstance(narr, NDArray):
raise TypeError('Only accept list of NDArrays or dict of str to NDArray')
arg_handles.append(narr.handle)
arg_arrays = args
elif isinstance(args, dict):
for name in arg_names:
if name in args:
narr = args[name]
if not isinstance(narr, NDArray):
raise TypeError('Only accept list of NDArrays or dict of str to NDArray')
arg_handles.append(narr.handle)
arg_arrays.append(narr)
else:
if allow_missing:
arg_handles.append(None)
arg_arrays.append(None)
else:
raise ValueError('key `%s` is missing in `%s`' % (name, arg_key))
else:
raise TypeError('Only accept list of NDArrays or dict of str to NDArray')
return c_array(NDArrayHandle, arg_handles), arg_arrays
def simple_bind(self, ctx,
grad_req='write',
type_dict=None,
group2ctx=None,
**kwargs):
"""Binds current symbol to get an executor, allocate all the arguments needed.
This function simplifies the binding procedure. You need to specify only input data shapes.
Before binding the executor, the function allocates arguments and auxiliary states
that were not explicitly specified. Allows specifying data types.
Example usage:
----------
>>> x = mx.sym.Variable('x')
>>> y = mx.sym.FullyConnected(x, num_hidden=4)
>>> exe = y.simple_bind(mx.cpu(), x=(5,4), grad_req=[])
>>> exe.forward()
[<NDArray 5x4 @cpu(0)>]
>>> exe.outputs[0].asnumpy()
array([[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.]], dtype=float32)
>>> exe.arg_arrays
[<NDArray 5x4 @cpu(0)>, <NDArray 4x4 @cpu(0)>, <NDArray 4 @cpu(0)>]
>>> exe.grad_arrays
[<NDArray 5x4 @cpu(0)>, <NDArray 4x4 @cpu(0)>, <NDArray 4 @cpu(0)>]
Parameters
----------
ctx : Context
The device context the generated executor to run on.
grad_req: string
{'write', 'add', 'null'}, or list of str or dict of str to str, optional
To specify how we should update the gradient to the `args_grad`.
- 'write' means every time gradient is written to specified `args_grad` NDArray.
- 'add' means every time gradient is added to the specified NDArray.
- 'null' means no action is taken, the gradient may not be calculated.
type_dict : Dict of str->numpy.dtype
Input type dictionary, name->dtype
group2ctx : Dict of string to mx.Context
The dict mapping the `ctx_group` attribute to the context assignment.
kwargs : Dict of str->shape
Input shape dictionary, name->shape
Returns
-------
executor : mxnet.Executor
The generated executor
"""
# pylint: disable=too-many-locals
if type_dict is None:
attrs = self.attr_dict()
type_dict = {k: mx_real_t for k in self.list_arguments()
if k not in attrs or '__dtype__' not in attrs[k]}
arg_shapes, _, aux_shapes = self.infer_shape(**kwargs)
arg_types, _, aux_types = self.infer_type(**type_dict)
if arg_shapes is None or arg_types is None:
raise ValueError("Input node is not complete")
if group2ctx is not None:
attr_dict = self.attr_dict()
arg_ctx = [group2ctx.get(attr_dict[name]['__ctx_group__'], ctx) \
if name in attr_dict and '__ctx_group__' in attr_dict[name] \
else ctx for name in self.list_arguments()]
aux_ctx = [group2ctx.get(attr_dict[name]['__ctx_group__'], ctx) \
if name in attr_dict and '__ctx_group__' in attr_dict[name] \
else ctx for name in self.list_auxiliary_states()]
else:
arg_ctx = [ctx] * len(arg_shapes)
aux_ctx = [ctx] * len(aux_shapes)
# alloc space
arg_ndarrays = [
_nd_zeros(shape, dev, dtype=dtype)
for dtype, dev, shape in zip(arg_types, arg_ctx, arg_shapes)]
if grad_req != 'null':
grad_ndarrays = {}
for name, shape, dev, dtype in zip(
self.list_arguments(), arg_shapes, arg_ctx, arg_types):
if not isinstance(grad_req, dict) or grad_req[name] != 'null':
grad_ndarrays[name] = _nd_zeros(shape, dev, dtype=dtype)
else:
grad_ndarrays = None
aux_ndarrays = [_nd_zeros(shape, dev, dtype=dtype)
for shape, dev, dtype in zip(aux_shapes, aux_ctx, aux_types)]
executor = self.bind(ctx, arg_ndarrays,
grad_ndarrays, grad_req, aux_ndarrays,
group2ctx=group2ctx)
return executor
def bind(self, ctx, args, args_grad=None, grad_req='write',
aux_states=None, group2ctx=None, shared_exec=None):
"""Binds the current symbol to an executor and returns it.
We first declare the computation and then bind to the data to run.
This function returns an executor which provides method `forward()` method for evaluation
and a `outputs()` method to get all the results.
Example usage:
----------
>>> a = mx.sym.Variable('a')
>>> b = mx.sym.Variable('b')
>>> c = a + b
<Symbol _plus1>
>>> ex = c.bind(ctx=mx.cpu(), args={'a' : mx.nd.ones([2,3]), 'b' : mx.nd.ones([2,3])})
>>> ex.forward()
[<NDArray 2x3 @cpu(0)>]
>>> ex.outputs[0].asnumpy()
[[ 2. 2. 2.]
[ 2. 2. 2.]]
Parameters
----------
ctx : Context
The device context the generated executor to run on.
args : list of NDArray or dict of str to NDArray
Input arguments to the symbol.
- If the input type is a list of `NDArray`, the order should be same as the order
of `list_arguments()`.
- If the input type is a dict of str to `NDArray`, then it maps the name of arguments
to the corresponding `NDArray`.
- In either case, all the arguments must be provided.
args_grad : list of NDArray or dict of str to `NDArray`, optional
When specified, `args_grad` provides NDArrays to hold
the result of gradient value in backward.
- If the input type is a list of `NDArray`, the order should be same as the order
of `list_arguments()`.
- If the input type is a dict of str to `NDArray`, then it maps the name of arguments
to the corresponding NDArray.
- When the type is a dict of str to `NDArray`, one only need to provide the dict
for required argument gradient.
Only the specified argument gradient will be calculated.
grad_req : {'write', 'add', 'null'}, or list of str or dict of str to str, optional
To specify how we should update the gradient to the `args_grad`.
- 'write' means everytime gradient is write to specified `args_grad` `NDArray`.
- 'add' means everytime gradient is add to the specified NDArray.
- 'null' means no action is taken, the gradient may not be calculated.
aux_states : list of `NDArray`, or dict of str to `NDArray`, optional
Input auxiliary states to the symbol, only needed when the output of
`list_auxiliary_states()` is not empty.
- If the input type is a list of `NDArray`, the order should be same as the order
of `list_auxiliary_states()`.
- If the input type is a dict of str to `NDArray`, then it maps the name of
`auxiliary_states` to the corresponding `NDArray`,
- In either case, all the auxiliary states need to be provided.
group2ctx : Dict of string to mx.Context
The dict mapping the `ctx_group` attribute to the context assignment.
shared_exec : mx.executor.Executor
Executor to share memory with. This is intended for runtime reshaping, variable length
sequences, etc. The returned executor shares state with `shared_exec`, and should not be
used in parallel with it.
Returns
-------
executor : Executor
The generated executor
Notes
-----
Auxiliary states are the special states of symbols that do not correspond
to an argument, and do not have gradient but are still useful
for the specific operations. Common examples of auxiliary states include
the `moving_mean` and `moving_variance` states in `BatchNorm`.
Most operators do not have auxiliary states and in those cases,
this parameter can be safely ignored.
One can give up gradient by using a dict in `args_grad` and only specify
gradient they interested in.
"""
# pylint: disable=too-many-locals, too-many-branches
if not isinstance(ctx, Context):
raise TypeError("Context type error")
listed_arguments = self.list_arguments()
args_handle, args = self._get_ndarray_inputs('args', args, listed_arguments, False)
# setup args gradient
if args_grad is None:
args_grad_handle = c_array(NDArrayHandle, [None] * len(args))
else:
args_grad_handle, args_grad = self._get_ndarray_inputs(
'args_grad', args_grad, listed_arguments, True)
if aux_states is None:
aux_states = []
aux_args_handle, aux_states = self._get_ndarray_inputs(
'aux_states', aux_states, self.list_auxiliary_states(), False)
# setup requirements
if isinstance(grad_req, string_types):
if grad_req not in _GRAD_REQ_MAP:
raise ValueError('grad_req must be in %s' % str(_GRAD_REQ_MAP))
reqs_array = c_array(
mx_uint,
[mx_uint(_GRAD_REQ_MAP[grad_req])] * len(listed_arguments))
elif isinstance(grad_req, list):
reqs_array = c_array(mx_uint, [mx_uint(_GRAD_REQ_MAP[item]) for item in grad_req])
elif isinstance(grad_req, dict):
req_array = []
for name in listed_arguments:
if name in grad_req:
req_array.append(mx_uint(_GRAD_REQ_MAP[grad_req[name]]))
else:
req_array.append(mx_uint(0))
reqs_array = c_array(mx_uint, req_array)
ctx_map_keys = []
ctx_map_dev_types = []
ctx_map_dev_ids = []
if group2ctx:
for key, val in group2ctx.items():
ctx_map_keys.append(c_str(key))
ctx_map_dev_types.append(ctypes.c_int(val.device_typeid))
ctx_map_dev_ids.append(ctypes.c_int(val.device_id))
handle = ExecutorHandle()
shared_handle = shared_exec.handle if shared_exec is not None else ExecutorHandle()
check_call(_LIB.MXExecutorBindEX(self.handle,
ctypes.c_int(ctx.device_typeid),
ctypes.c_int(ctx.device_id),
mx_uint(len(ctx_map_keys)),
c_array(ctypes.c_char_p, ctx_map_keys),
c_array(ctypes.c_int, ctx_map_dev_types),
c_array(ctypes.c_int, ctx_map_dev_ids),
mx_uint(len(args)),
args_handle,
args_grad_handle,
reqs_array,
mx_uint(len(aux_states)),
aux_args_handle,
shared_handle,
ctypes.byref(handle)))
executor = Executor(handle, self, ctx, grad_req, group2ctx)
executor.arg_arrays = args
executor.grad_arrays = args_grad
executor.aux_arrays = aux_states
return executor
def grad(self, wrt):
"""Get the autodiff of current symbol.
This function can only be used if current symbol is a loss function.
.. note:: This function is currently not implemented.
Parameters
----------
wrt : Array of String
keyword arguments of the symbol that the gradients are taken.
Returns
-------
grad : Symbol
A gradient Symbol with returns to be the corresponding gradients.
"""
handle = SymbolHandle()
c_wrt = c_array(ctypes.c_char_p, [c_str(key) for key in wrt])
check_call(_LIB.MXSymbolGrad(self.handle,
mx_uint(len(wrt)),
c_wrt,
ctypes.byref(handle)))
return Symbol(handle)
# pylint: enable= no-member
def eval(self, ctx=cpu(), **kwargs):
"""Evaluates a symbol given arguments.
The `eval` method combines a call to `bind` (which returns an executor)
with a call to `forward` (executor method).
For the common use case, where you might repeatedly evaluate with same arguments,
eval is slow.
In that case, you should call `bind` once and then repeatedly call forward.
This function allows simpler syntax for less cumbersome introspection.
Example usage:
----------
>>> a = mx.sym.Variable('a')
>>> b = mx.sym.Variable('b')
>>> c = a + b
>>> ex = c.eval(ctx = mx.cpu(), a = mx.nd.ones([2,3]), b = mx.nd.ones([2,3]))
>>> ex
[<NDArray 2x3 @cpu(0)>]
>>> ex[0].asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
Parameters
----------
ctx : Context
The device context the generated executor to run on.
kwargs : Keyword arguments of type `NDArray`
Input arguments to the symbol. All the arguments must be provided.
Returns
----------
result : a list of NDArrays corresponding to the values taken by each symbol when
evaluated on given args. When called on a single symbol (not a group),
the result will be a list with one element.
"""
return self.bind(ctx, kwargs).forward()
def reshape(self, shape):
"""Shorthand for mxnet.sym.reshape.
Parameters
----------
shape : tuple of int
The new shape should not change the array size, namely
``np.prod(new_shape)`` should be equal to ``np.prod(self.shape)``.
One shape dimension can be -1. In this case, the value is inferred
from the length of the array and remaining dimensions.
Returns
-------
Symbol
A reshaped symbol.
"""
return reshape(self, shape=shape)
def var(name, attr=None, shape=None, lr_mult=None, wd_mult=None, dtype=None, init=None, **kwargs):
"""Creates a symbolic variable with specified name.
Example usage:
----------
>>> data = mx.sym.Variable('data', attr={'a': 'b'})
>>> data
<Symbol data>
Parameters
----------
name : str
Variable name.
attr : Dict of strings
Additional attributes to set on the variable. Format {string : string}.
shape : tuple
The shape of a variable. If specified, this will be used during the shape inference.
If one has specified a different shape for this variable using
a keyword argument when calling shape inference, this shape information will be ignored.
lr_mult : float
The learning rate multiplier for input variable.
wd_mult : float
Weight decay multiplier for input variable.
dtype : str or numpy.dtype
The dtype for input variable. If not specified, this value will be inferred.
init : initializer (mxnet.init.*)
Initializer for this variable to (optionally) override the default initializer.
kwargs : Additional attribute variables
Additional attributes must start and end with double underscores.
Returns
-------
variable : Symbol
A symbol corresponding to an input to the computation graph.
"""
if not isinstance(name, string_types):
raise TypeError('Expect a string for variable `name`')
handle = SymbolHandle()
check_call(_LIB.MXSymbolCreateVariable(c_str(name), ctypes.byref(handle)))
ret = Symbol(handle)
attr = AttrScope.current.get(attr)
attr = {} if attr is None else attr
if shape is not None:
attr['__shape__'] = str(shape)
if lr_mult is not None:
attr['__lr_mult__'] = str(lr_mult)
if wd_mult is not None:
attr['__wd_mult__'] = str(wd_mult)
if dtype is not None:
attr['__dtype__'] = str(_DTYPE_NP_TO_MX[_numpy.dtype(dtype).type])
if init is not None:
if not isinstance(init, string_types):
init = init.dumps()
attr['__init__'] = init
for k, v in kwargs.items():
if k.startswith('__') and k.endswith('__'):
attr[k] = str(v)
else:
raise ValueError('Attribute name=%s is not supported.'
' Additional attributes must start and end with double underscores,'
' e.g, __yourattr__' % k)
ret._set_attr(**attr)
return ret
# for back compatibility
Variable = var
def Group(symbols):
"""Creates a symbol that contains a collection of other symbols, grouped together.
Parameters
----------
symbols : list
List of symbols to be grouped.
Returns
-------
sym : Symbol
A group symbol.
"""
ihandles = []
for sym in symbols:
if not isinstance(sym, Symbol):
raise TypeError('Expected a list of symbols as input')
ihandles.append(sym.handle)
handle = SymbolHandle()
check_call(_LIB.MXSymbolCreateGroup(
mx_uint(len(ihandles)),
c_array(SymbolHandle, ihandles), ctypes.byref(handle)))
return Symbol(handle)
def load(fname):
"""Load symbol from a JSON file.
You can also use pickle to do the job if you only work on python.
The advantage of load/save is the file is language agnostic.
This means the file saved using save can be loaded by other language binding of mxnet.
You also get the benefit being able to directly load/save from cloud storage(S3, HDFS).
Parameters
----------
fname : str
The name of the file, examples:
- `s3://my-bucket/path/my-s3-symbol`
- `hdfs://my-bucket/path/my-hdfs-symbol`
- `/path-to/my-local-symbol`
Returns
-------
sym : Symbol
The loaded symbol.
See Also
--------
Symbol.save : Used to save symbol into file.
"""
if not isinstance(fname, string_types):
raise TypeError('fname need to be string')
handle = SymbolHandle()
check_call(_LIB.MXSymbolCreateFromFile(c_str(fname), ctypes.byref(handle)))
return Symbol(handle)
def load_json(json_str):
"""Load symbol from json string.
Parameters
----------
json_str : str
A JSON string.
Returns
-------
sym : Symbol
The loaded symbol.
See Also
--------
Symbol.tojson : Used to save symbol into json string.
"""
if not isinstance(json_str, string_types):
raise TypeError('fname required to be string')
handle = SymbolHandle()
check_call(_LIB.MXSymbolCreateFromJSON(c_str(json_str), ctypes.byref(handle)))
return Symbol(handle)
# Initialize the atomic symbol in startups
_init_symbol_module(Symbol, "mxnet")
# pylint: disable=no-member
# pylint: disable=redefined-builtin
def pow(base, exp):
""" Raise base to an exp.
Parameters
---------
base: Symbol or Number
exp: Symbol or Number
Returns
-------
result: Symbol or Number
"""
if isinstance(base, Symbol) and isinstance(exp, Symbol):
return _internal._Power(base, exp)
if isinstance(base, Symbol) and isinstance(exp, Number):
return _internal._PowerScalar(base, scalar=exp)
if isinstance(base, Number) and isinstance(exp, Symbol):
return _internal._RPowerScalar(exp, scalar=base)
if isinstance(base, Number) and isinstance(exp, Number):
return base**exp
else:
raise TypeError('types (%s, %s) not supported' % (str(type(base)), str(type(exp))))
# pylint: disable=no-member
# pylint: disable=redefined-builtin
def maximum(left, right):
""" maximum left and right
Parameters
---------
left: Symbol or Number
right: Symbol or Number
Returns
-------
result: Symbol or Number
"""
if isinstance(left, Symbol) and isinstance(right, Symbol):
return _internal._Maximum(left, right)
if isinstance(left, Symbol) and isinstance(right, Number):
return _internal._MaximumScalar(left, scalar=right)
if isinstance(left, Number) and isinstance(right, Symbol):
return _internal._MaximumScalar(right, scalar=left)
if isinstance(left, Number) and isinstance(right, Number):
return left if left > right else right
else:
raise TypeError('types (%s, %s) not supported' % (str(type(left)), str(type(right))))
# pylint: disable=no-member
# pylint: disable=redefined-builtin
def minimum(left, right):
""" minimum left and right
Parameters
---------
left: Symbol or Number
right: Symbol or Number
Returns
-------
result: Symbol or Number
"""
if isinstance(left, Symbol) and isinstance(right, Symbol):
return _internal._Minimum(left, right)
if isinstance(left, Symbol) and isinstance(right, Number):
return _internal._MinimumScalar(left, scalar=right)
if isinstance(left, Number) and isinstance(right, Symbol):
return _internal._MinimumScalar(right, scalar=left)
if isinstance(left, Number) and isinstance(right, Number):
return left if left > right else right
else:
raise TypeError('types (%s, %s) not supported' % (str(type(left)), str(type(right))))
# pylint: disable=no-member
# pylint: disable=redefined-builtin
def hypot(left, right):
""" minimum left and right
Parameters
---------
left: Symbol or Number
right: Symbol or Number
Returns
-------
result: Symbol or Number
"""
if isinstance(left, Symbol) and isinstance(right, Symbol):
return _internal._Hypot(left, right)
if isinstance(left, Symbol) and isinstance(right, Number):
return _internal._HypotScalar(left, scalar=right)
if isinstance(left, Number) and isinstance(right, Symbol):
return _internal._HypotScalar(right, scalar=left)
if isinstance(left, Number) and isinstance(right, Number):
return _numpy.hypot(left, right)
else:
raise TypeError('types (%s, %s) not supported' % (str(type(left)), str(type(right))))
def zeros(shape, dtype=None, **kwargs):
"""Return a new symbol of given shape and type, filled with zeros.
Parameters
----------
shape : int or sequence of ints
Shape of the new array.
dtype : str or numpy.dtype, optional
The value type of the inner value, default to ``np.float32``.
Returns
-------
out : Symbol
The created Symbol.
"""
if dtype is None:
dtype = _numpy.float32
return _internal._zeros(shape=shape, dtype=dtype, **kwargs)
def ones(shape, dtype=None, **kwargs):
"""Return a new symbol of given shape and type, filled with ones.
Parameters
----------
shape : int or sequence of ints
Shape of the new array.
dtype : str or numpy.dtype, optional
The value type of the inner value, default to ``np.float32``.
Returns
-------
out : Symbol
The created Symbol
"""
if dtype is None:
dtype = _numpy.float32
return _internal._ones(shape=shape, dtype=dtype, **kwargs)
def arange(start, stop=None, step=1.0, repeat=1, name=None, dtype=None):
"""Return evenly spaced values within a given interval.
Parameters
----------
start : number
Start of interval. The interval includes this value. The default start value is 0.
stop : number, optional
End of interval. The interval does not include this value.
step : number, optional
Spacing between values.
repeat : int, optional
"The repeating time of all elements.
E.g repeat=3, the element a will be repeated three times --> a, a, a.
dtype : str or numpy.dtype, optional
The value type of the inner value, default to ``np.float32``.
Returns
-------
out : Symbol
The created Symbol
"""
if dtype is None:
dtype = _numpy.float32
return _internal._arange(start=start, stop=stop, step=step, repeat=repeat,
name=name, dtype=dtype)
|
likelyzhao/mxnet
|
python/mxnet/symbol.py
|
Python
|
apache-2.0
| 64,322
|
from django.db import models
from django.contrib.auth.models import User
class Challenges(models.Model):
web_name = models.CharField(max_length=20)
popup_name = models.CharField(max_length=20, unique=True)
fullname = models.CharField(max_length=100)
flag = models.CharField(max_length=100, unique=True)
tile_icon = models.CharField(max_length=100)
description = models.TextField()
points = models.IntegerField()
num_solved = models.IntegerField()
def __unicode__(self):
return self.fullname
class ChallengesSolved(models.Model):
user = models.ForeignKey(User)
challenge = models.ForeignKey(Challenges)
class Meta:
unique_together = ('user', 'challenge',)
def __unicod__(self):
return self.challenge.fullname + '_' + self.user.username
class ChallengeSubmissions(models.Model):
user = models.ForeignKey(User, unique=True)
correct_flags = models.IntegerField()
wrong_flags = models.BigIntegerField()
class ScoreBoard(models.Model):
team = models.ForeignKey(User, unique=True)
score = models.IntegerField()
modified = models.DateTimeField(auto_now=True)
def __unicod__(self):
return self.team.username
|
IAryan/NULLify-HSCTF-2014
|
CTF/HSCTF/challenges/models.py
|
Python
|
mit
| 1,139
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Dict, Optional
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class Attributes(msrest.serialization.Model):
"""The object attributes managed by the KeyVault service.
Variables are only populated by the server, and will be ignored when sending a request.
:param enabled: Determines whether the object is enabled.
:type enabled: bool
:param not_before: Not before date in UTC.
:type not_before: ~datetime.datetime
:param expires: Expiry date in UTC.
:type expires: ~datetime.datetime
:ivar created: Creation time in UTC.
:vartype created: ~datetime.datetime
:ivar updated: Last updated time in UTC.
:vartype updated: ~datetime.datetime
"""
_validation = {
'created': {'readonly': True},
'updated': {'readonly': True},
}
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
'not_before': {'key': 'nbf', 'type': 'unix-time'},
'expires': {'key': 'exp', 'type': 'unix-time'},
'created': {'key': 'created', 'type': 'unix-time'},
'updated': {'key': 'updated', 'type': 'unix-time'},
}
def __init__(
self,
*,
enabled: Optional[bool] = None,
not_before: Optional[datetime.datetime] = None,
expires: Optional[datetime.datetime] = None,
**kwargs
):
super(Attributes, self).__init__(**kwargs)
self.enabled = enabled
self.not_before = not_before
self.expires = expires
self.created = None
self.updated = None
class BackupSecretResult(msrest.serialization.Model):
"""The backup secret result, containing the backup blob.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The backup blob containing the backed up secret.
:vartype value: bytes
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'base64'},
}
def __init__(
self,
**kwargs
):
super(BackupSecretResult, self).__init__(**kwargs)
self.value = None
class SecretBundle(msrest.serialization.Model):
"""A secret consisting of a value, id and its attributes.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: The secret value.
:type value: str
:param id: The secret id.
:type id: str
:param content_type: The content type of the secret.
:type content_type: str
:param attributes: The secret management attributes.
:type attributes: ~azure.keyvault.v7_3_preview.models.SecretAttributes
:param tags: A set of tags. Application specific metadata in the form of key-value pairs.
:type tags: dict[str, str]
:ivar kid: If this is a secret backing a KV certificate, then this field specifies the
corresponding key backing the KV certificate.
:vartype kid: str
:ivar managed: True if the secret's lifetime is managed by key vault. If this is a secret
backing a certificate, then managed will be true.
:vartype managed: bool
"""
_validation = {
'kid': {'readonly': True},
'managed': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'content_type': {'key': 'contentType', 'type': 'str'},
'attributes': {'key': 'attributes', 'type': 'SecretAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
'kid': {'key': 'kid', 'type': 'str'},
'managed': {'key': 'managed', 'type': 'bool'},
}
def __init__(
self,
*,
value: Optional[str] = None,
id: Optional[str] = None,
content_type: Optional[str] = None,
attributes: Optional["SecretAttributes"] = None,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(SecretBundle, self).__init__(**kwargs)
self.value = value
self.id = id
self.content_type = content_type
self.attributes = attributes
self.tags = tags
self.kid = None
self.managed = None
class DeletedSecretBundle(SecretBundle):
"""A Deleted Secret consisting of its previous id, attributes and its tags, as well as information on when it will be purged.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: The secret value.
:type value: str
:param id: The secret id.
:type id: str
:param content_type: The content type of the secret.
:type content_type: str
:param attributes: The secret management attributes.
:type attributes: ~azure.keyvault.v7_3_preview.models.SecretAttributes
:param tags: A set of tags. Application specific metadata in the form of key-value pairs.
:type tags: dict[str, str]
:ivar kid: If this is a secret backing a KV certificate, then this field specifies the
corresponding key backing the KV certificate.
:vartype kid: str
:ivar managed: True if the secret's lifetime is managed by key vault. If this is a secret
backing a certificate, then managed will be true.
:vartype managed: bool
:param recovery_id: The url of the recovery object, used to identify and recover the deleted
secret.
:type recovery_id: str
:ivar scheduled_purge_date: The time when the secret is scheduled to be purged, in UTC.
:vartype scheduled_purge_date: ~datetime.datetime
:ivar deleted_date: The time when the secret was deleted, in UTC.
:vartype deleted_date: ~datetime.datetime
"""
_validation = {
'kid': {'readonly': True},
'managed': {'readonly': True},
'scheduled_purge_date': {'readonly': True},
'deleted_date': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'content_type': {'key': 'contentType', 'type': 'str'},
'attributes': {'key': 'attributes', 'type': 'SecretAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
'kid': {'key': 'kid', 'type': 'str'},
'managed': {'key': 'managed', 'type': 'bool'},
'recovery_id': {'key': 'recoveryId', 'type': 'str'},
'scheduled_purge_date': {'key': 'scheduledPurgeDate', 'type': 'unix-time'},
'deleted_date': {'key': 'deletedDate', 'type': 'unix-time'},
}
def __init__(
self,
*,
value: Optional[str] = None,
id: Optional[str] = None,
content_type: Optional[str] = None,
attributes: Optional["SecretAttributes"] = None,
tags: Optional[Dict[str, str]] = None,
recovery_id: Optional[str] = None,
**kwargs
):
super(DeletedSecretBundle, self).__init__(value=value, id=id, content_type=content_type, attributes=attributes, tags=tags, **kwargs)
self.recovery_id = recovery_id
self.scheduled_purge_date = None
self.deleted_date = None
class SecretItem(msrest.serialization.Model):
"""The secret item containing secret metadata.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Secret identifier.
:type id: str
:param attributes: The secret management attributes.
:type attributes: ~azure.keyvault.v7_3_preview.models.SecretAttributes
:param tags: A set of tags. Application specific metadata in the form of key-value pairs.
:type tags: dict[str, str]
:param content_type: Type of the secret value such as a password.
:type content_type: str
:ivar managed: True if the secret's lifetime is managed by key vault. If this is a key backing
a certificate, then managed will be true.
:vartype managed: bool
"""
_validation = {
'managed': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'attributes': {'key': 'attributes', 'type': 'SecretAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
'content_type': {'key': 'contentType', 'type': 'str'},
'managed': {'key': 'managed', 'type': 'bool'},
}
def __init__(
self,
*,
id: Optional[str] = None,
attributes: Optional["SecretAttributes"] = None,
tags: Optional[Dict[str, str]] = None,
content_type: Optional[str] = None,
**kwargs
):
super(SecretItem, self).__init__(**kwargs)
self.id = id
self.attributes = attributes
self.tags = tags
self.content_type = content_type
self.managed = None
class DeletedSecretItem(SecretItem):
"""The deleted secret item containing metadata about the deleted secret.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Secret identifier.
:type id: str
:param attributes: The secret management attributes.
:type attributes: ~azure.keyvault.v7_3_preview.models.SecretAttributes
:param tags: A set of tags. Application specific metadata in the form of key-value pairs.
:type tags: dict[str, str]
:param content_type: Type of the secret value such as a password.
:type content_type: str
:ivar managed: True if the secret's lifetime is managed by key vault. If this is a key backing
a certificate, then managed will be true.
:vartype managed: bool
:param recovery_id: The url of the recovery object, used to identify and recover the deleted
secret.
:type recovery_id: str
:ivar scheduled_purge_date: The time when the secret is scheduled to be purged, in UTC.
:vartype scheduled_purge_date: ~datetime.datetime
:ivar deleted_date: The time when the secret was deleted, in UTC.
:vartype deleted_date: ~datetime.datetime
"""
_validation = {
'managed': {'readonly': True},
'scheduled_purge_date': {'readonly': True},
'deleted_date': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'attributes': {'key': 'attributes', 'type': 'SecretAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
'content_type': {'key': 'contentType', 'type': 'str'},
'managed': {'key': 'managed', 'type': 'bool'},
'recovery_id': {'key': 'recoveryId', 'type': 'str'},
'scheduled_purge_date': {'key': 'scheduledPurgeDate', 'type': 'unix-time'},
'deleted_date': {'key': 'deletedDate', 'type': 'unix-time'},
}
def __init__(
self,
*,
id: Optional[str] = None,
attributes: Optional["SecretAttributes"] = None,
tags: Optional[Dict[str, str]] = None,
content_type: Optional[str] = None,
recovery_id: Optional[str] = None,
**kwargs
):
super(DeletedSecretItem, self).__init__(id=id, attributes=attributes, tags=tags, content_type=content_type, **kwargs)
self.recovery_id = recovery_id
self.scheduled_purge_date = None
self.deleted_date = None
class DeletedSecretListResult(msrest.serialization.Model):
"""The deleted secret list result.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: A response message containing a list of the deleted secrets in the vault along
with a link to the next page of deleted secrets.
:vartype value: list[~azure.keyvault.v7_3_preview.models.DeletedSecretItem]
:ivar next_link: The URL to get the next set of deleted secrets.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[DeletedSecretItem]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DeletedSecretListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class Error(msrest.serialization.Model):
"""The key vault server error.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: The error code.
:vartype code: str
:ivar message: The error message.
:vartype message: str
:ivar inner_error: The key vault server error.
:vartype inner_error: ~azure.keyvault.v7_3_preview.models.Error
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'inner_error': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'inner_error': {'key': 'innererror', 'type': 'Error'},
}
def __init__(
self,
**kwargs
):
super(Error, self).__init__(**kwargs)
self.code = None
self.message = None
self.inner_error = None
class KeyVaultError(msrest.serialization.Model):
"""The key vault error exception.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar error: The key vault server error.
:vartype error: ~azure.keyvault.v7_3_preview.models.Error
"""
_validation = {
'error': {'readonly': True},
}
_attribute_map = {
'error': {'key': 'error', 'type': 'Error'},
}
def __init__(
self,
**kwargs
):
super(KeyVaultError, self).__init__(**kwargs)
self.error = None
class SecretAttributes(Attributes):
"""The secret management attributes.
Variables are only populated by the server, and will be ignored when sending a request.
:param enabled: Determines whether the object is enabled.
:type enabled: bool
:param not_before: Not before date in UTC.
:type not_before: ~datetime.datetime
:param expires: Expiry date in UTC.
:type expires: ~datetime.datetime
:ivar created: Creation time in UTC.
:vartype created: ~datetime.datetime
:ivar updated: Last updated time in UTC.
:vartype updated: ~datetime.datetime
:ivar recoverable_days: softDelete data retention days. Value should be >=7 and <=90 when
softDelete enabled, otherwise 0.
:vartype recoverable_days: int
:ivar recovery_level: Reflects the deletion recovery level currently in effect for secrets in
the current vault. If it contains 'Purgeable', the secret can be permanently deleted by a
privileged user; otherwise, only the system can purge the secret, at the end of the retention
interval. Possible values include: "Purgeable", "Recoverable+Purgeable", "Recoverable",
"Recoverable+ProtectedSubscription", "CustomizedRecoverable+Purgeable",
"CustomizedRecoverable", "CustomizedRecoverable+ProtectedSubscription".
:vartype recovery_level: str or ~azure.keyvault.v7_3_preview.models.DeletionRecoveryLevel
"""
_validation = {
'created': {'readonly': True},
'updated': {'readonly': True},
'recoverable_days': {'readonly': True},
'recovery_level': {'readonly': True},
}
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
'not_before': {'key': 'nbf', 'type': 'unix-time'},
'expires': {'key': 'exp', 'type': 'unix-time'},
'created': {'key': 'created', 'type': 'unix-time'},
'updated': {'key': 'updated', 'type': 'unix-time'},
'recoverable_days': {'key': 'recoverableDays', 'type': 'int'},
'recovery_level': {'key': 'recoveryLevel', 'type': 'str'},
}
def __init__(
self,
*,
enabled: Optional[bool] = None,
not_before: Optional[datetime.datetime] = None,
expires: Optional[datetime.datetime] = None,
**kwargs
):
super(SecretAttributes, self).__init__(enabled=enabled, not_before=not_before, expires=expires, **kwargs)
self.recoverable_days = None
self.recovery_level = None
class SecretListResult(msrest.serialization.Model):
"""The secret list result.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: A response message containing a list of secrets in the key vault along with a link
to the next page of secrets.
:vartype value: list[~azure.keyvault.v7_3_preview.models.SecretItem]
:ivar next_link: The URL to get the next set of secrets.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[SecretItem]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SecretListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class SecretProperties(msrest.serialization.Model):
"""Properties of the key backing a certificate.
:param content_type: The media type (MIME type).
:type content_type: str
"""
_attribute_map = {
'content_type': {'key': 'contentType', 'type': 'str'},
}
def __init__(
self,
*,
content_type: Optional[str] = None,
**kwargs
):
super(SecretProperties, self).__init__(**kwargs)
self.content_type = content_type
class SecretRestoreParameters(msrest.serialization.Model):
"""The secret restore parameters.
All required parameters must be populated in order to send to Azure.
:param secret_bundle_backup: Required. The backup blob associated with a secret bundle.
:type secret_bundle_backup: bytes
"""
_validation = {
'secret_bundle_backup': {'required': True},
}
_attribute_map = {
'secret_bundle_backup': {'key': 'value', 'type': 'base64'},
}
def __init__(
self,
*,
secret_bundle_backup: bytes,
**kwargs
):
super(SecretRestoreParameters, self).__init__(**kwargs)
self.secret_bundle_backup = secret_bundle_backup
class SecretSetParameters(msrest.serialization.Model):
"""The secret set parameters.
All required parameters must be populated in order to send to Azure.
:param value: Required. The value of the secret.
:type value: str
:param tags: A set of tags. Application specific metadata in the form of key-value pairs.
:type tags: dict[str, str]
:param content_type: Type of the secret value such as a password.
:type content_type: str
:param secret_attributes: The secret management attributes.
:type secret_attributes: ~azure.keyvault.v7_3_preview.models.SecretAttributes
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'content_type': {'key': 'contentType', 'type': 'str'},
'secret_attributes': {'key': 'attributes', 'type': 'SecretAttributes'},
}
def __init__(
self,
*,
value: str,
tags: Optional[Dict[str, str]] = None,
content_type: Optional[str] = None,
secret_attributes: Optional["SecretAttributes"] = None,
**kwargs
):
super(SecretSetParameters, self).__init__(**kwargs)
self.value = value
self.tags = tags
self.content_type = content_type
self.secret_attributes = secret_attributes
class SecretUpdateParameters(msrest.serialization.Model):
"""The secret update parameters.
:param content_type: Type of the secret value such as a password.
:type content_type: str
:param secret_attributes: The secret management attributes.
:type secret_attributes: ~azure.keyvault.v7_3_preview.models.SecretAttributes
:param tags: A set of tags. Application specific metadata in the form of key-value pairs.
:type tags: dict[str, str]
"""
_attribute_map = {
'content_type': {'key': 'contentType', 'type': 'str'},
'secret_attributes': {'key': 'attributes', 'type': 'SecretAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
content_type: Optional[str] = None,
secret_attributes: Optional["SecretAttributes"] = None,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(SecretUpdateParameters, self).__init__(**kwargs)
self.content_type = content_type
self.secret_attributes = secret_attributes
self.tags = tags
|
Azure/azure-sdk-for-python
|
sdk/keyvault/azure-keyvault-secrets/azure/keyvault/secrets/_generated/v7_3_preview/models/_models_py3.py
|
Python
|
mit
| 21,209
|
# -*- coding: utf-8 -*-
"""
@newfield purpose: Purpose
@newfield sideeffect: Side effect, Side effects
@purpose: TBD
@author: Christian Kohl�ffel
@since: 26.12.2009
@license: GPL
"""
import os
from Core.configobj import ConfigObj,flatten_errors
from Core.validate import Validator
#from dotdictlookup import DictDotLookup
import time
import Core.constants as c
import Core.Globals as g
from d2gexceptions import *
from PyQt4 import QtCore, QtGui
import logging
logger = logging.getLogger("PostPro.PostProcessorConfig")
POSTPRO_VERSION = "2"
"""
version tag - increment this each time you edit CONFIG_SPEC
compared to version number in config file so
old versions are recognized and skipped"
"""
POSTPRO_SPEC = str('''
# Section and variable names must be valid Python identifiers
# do not use whitespace in names
# do not edit the following section name:
[Version]
# do not edit the following value:
config_version = string(default="''' + \
str(POSTPRO_VERSION) + '")\n' + \
'''
[General]
output_format = string(default=".ngx")
output_text = string(default="G-CODE for EMC2")
output_type = string(default="g-code")
comments = boolean(default=False)
abs_export = boolean(default=True)
cancel_cc_for_depth = boolean(default=False)
cc_outside_the_piece = boolean(default=True)
export_ccw_arcs_only = boolean(default=False)
max_arc_radius = float(default=10000)
code_begin=string(default="G21 (Unit in mm) G90 (Absolute distance mode) G64 P0.01 (Exact Path 0.001 tol.) G17 G40 (Cancel diameter comp.) G49 (Cancel length comp.)")
code_end=string(default="M2 (Prgram end)")
[Number_Format]
pre_decimals = integer(default=1)
post_decimals = integer(default=3)
decimal_seperator = string(default=".")
pre_decimal_zero_padding = boolean(default=False)
post_decimal_zero_padding = boolean(default=True)
signed_values = boolean(default=False)
[Line_Numbers]
use_line_nrs = boolean(default=False)
line_nrs_begin = integer(default=10)
line_nrs_step = integer(default=10)
[Program]
tool_change = string(default=T%tool_nr M6%nlS%speed%nl)
feed_change = string(default=F%feed%nl)
rap_pos_plane = string(default=G0 X%XE Y%YE%nl)
rap_pos_depth = string(default=G0 Z%ZE %nl)
lin_mov_plane = string(default= G1 X%XE Y%YE%nl)
lin_mov_depth = string(default= G1 Z%ZE%nl)
arc_int_cw = string(default=G2 X%XE Y%YE I%I J%J%nl)
arc_int_ccw = string(default=G3 X%XE Y%YE I%I J%J%nl)
cutter_comp_off = string(default=G40%nl)
cutter_comp_left = string(default=G41%nl)
cutter_comp_right = string(default=G42%nl)
pre_shape_cut= string(default=M3 M8%nl)
post_shape_cut=string(default=M9 M5%nl)
comment = string(default=%nl(%comment)%nl)
''').splitlines()
""" format, type and default value specification of the global config file"""
class MyPostProConfig(QtCore.QObject):
"""
This class hosts all functions related to the PostProConfig File.
"""
def __init__(self,filename='postpro_config.cfg'):
"""
initialize the varspace of an existing plugin instance
init_varspace() is a superclass method of plugin
@param filename: The filename for the creation of a new config
file and the filename of the file to read config from.
"""
QtCore.QObject.__init__(self)
self.folder = os.path.join(g.folder, c.DEFAULT_POSTPRO_DIR)
self.filename =os.path.join(self.folder, filename)
self.default_config = False # whether a new name was generated
self.var_dict = dict()
self.spec = ConfigObj(POSTPRO_SPEC, interpolation=False, list_values=False, _inspec=True)
def tr(self,string_to_translate):
"""
Translate a string using the QCoreApplication translation framework
@param: string_to_translate: a unicode string
@return: the translated unicode string if it was possible to translate
"""
return unicode(QtGui.QApplication.translate("MyPostProConfig",
string_to_translate,
None,
QtGui.QApplication.UnicodeUTF8))
def load_config(self):
"""
This method tries to load the defined postprocessor file given in
self.filename. If this fail it will create a new one
"""
try:
# file exists, read & validate it
self.var_dict = ConfigObj(self.filename, configspec=POSTPRO_SPEC)
_vdt = Validator()
result = self.var_dict.validate(_vdt, preserve_errors=True)
validate_errors = flatten_errors(self.var_dict, result)
if validate_errors:
g.logger.logger.error(self.tr("errors reading %s:") % (self.filename))
for entry in validate_errors:
section_list, key, error = entry
if key is not None:
section_list.append(key)
else:
section_list.append('[missing section]')
section_string = ', '.join(section_list)
if error == False:
error = self.tr('Missing value or section.')
g.logger.logger.error( section_string + ' = ' + error)
if validate_errors:
raise BadConfigFileError,self.tr("syntax errors in postpro_config file")
# check config file version against internal version
if POSTPRO_VERSION:
fileversion = self.var_dict['Version']['config_version'] # this could raise KeyError
if fileversion != POSTPRO_VERSION:
raise VersionMismatchError, (fileversion, POSTPRO_VERSION)
except VersionMismatchError, values:
raise VersionMismatchError, (fileversion, POSTPRO_VERSION)
except Exception,inst:
logger.error(inst)
(base,ext) = os.path.splitext(self.filename)
badfilename = base + c.BAD_CONFIG_EXTENSION
logger.debug(self.tr("trying to rename bad cfg %s to %s") % (self.filename,badfilename))
try:
os.rename(self.filename,badfilename)
except OSError,e:
logger.error(self.tr("rename(%s,%s) failed: %s") % (self.filename,badfilename,e.strerror))
raise
else:
logger.debug(self.tr("renamed bad varspace %s to '%s'") %(self.filename,badfilename))
self.create_default_config()
self.default_config = True
logger.debug(self.tr("created default varspace '%s'") %(self.filename))
else:
self.default_config = False
logger.debug(self.tr("read existing varspace '%s'") %(self.filename))
# convenience - flatten nested config dict to access it via self.config.sectionname.varname
self.var_dict.main.interpolation = False # avoid ConfigObj getting too clever
self.vars = DictDotLookup(self.var_dict)
def make_settings_folder(self):
"""
This method creates the postprocessor settings folder if necessary
"""
try:
os.mkdir(self.folder)
except OSError:
pass
def create_default_config(self):
"""
If no postprocessor config file exists this function is called
to generate the config file based on its specification.
"""
#check for existing setting folder or create one
self.make_settings_folder()
# derive config file with defaults from spec
logger.debug(POSTPRO_SPEC)
self.var_dict = ConfigObj(configspec=POSTPRO_SPEC)
_vdt = Validator()
self.var_dict.validate(_vdt, copy=True)
self.var_dict.filename = self.filename
self.var_dict.write()
# def _save_varspace(self):
# self.var_dict.filename = self.filename
# self.var_dict.write()
#
def print_vars(self):
print "Variables:"
for k,v in self.var_dict['Variables'].items():
print k," = ",v
class DictDotLookup(object):
"""
Creates objects that behave much like a dictionaries, but allow nested
key access using object '.' (dot) lookups.
"""
def __init__(self, d):
for k in d:
if isinstance(d[k], dict):
self.__dict__[k] = DictDotLookup(d[k])
elif isinstance(d[k], (list, tuple)):
l = []
for v in d[k]:
if isinstance(v, dict):
l.append(DictDotLookup(v))
else:
l.append(v)
self.__dict__[k] = l
else:
self.__dict__[k] = d[k]
def __getitem__(self, name):
if name in self.__dict__:
return self.__dict__[name]
def __iter__(self):
return iter(self.__dict__.keys())
# def __repr__(self):
# return pprint.pformat(self.__dict__)
|
workflo/dxf2gcode
|
source/PostPro/PostProcessorConfig.py
|
Python
|
gpl-3.0
| 9,312
|
import random
class GridWorld(object):
ACTIONS = [0, 1, 2, 3]
def __init__(self, grid, rewards, noise=0):
self.grid = grid
self.rewards = rewards
self._state_map = {}
self._reverse_map = {}
self.initial_state = 0
self._success_prob = 1 - noise
for i in range(len(grid)):
for j in range(len(grid[i])):
self._state_map[(i, j)] = len(self._state_map)
self._reverse_map[self._state_map[(i, j)]] = (i, j)
self.current_state = self.initial_state
def take_action(self, action):
coords = self._reverse_map[self.current_state]
if self.grid[coords[0]][coords[1]] == -1:
raise RuntimeError('Terminal state reached')
roll = random.random()
if roll > self._success_prob:
if action == 0 or action == 1:
action = 2 if random.random() > 0.5 else 3
elif action == 2 or action == 3:
action = 0 if random.random() > 0.5 else 1
if action == 0: # 0 is left
moveto = (coords[0], max(0, coords[1] - 1))
elif action == 1: # 1 is right
moveto = (coords[0], min(len(self.grid[coords[0]]) - 1, coords[1] + 1))
elif action == 2: # 2 is up
moveto = (max(0, coords[0] - 1), coords[1])
elif action == 3: # 3 is down
moveto = (min(len(self.grid) - 1, coords[0] + 1), coords[1])
if self.grid[moveto[0]][moveto[1]] == 1:
moveto = coords
reward = self.rewards[moveto[0]][moveto[1]]
self.current_state = self._state_map[moveto]
return action, reward, self.current_state
def play(strategy, iterations=1000, converge=False, max_steps_per_game=1000):
strategy.valid_actions = GridWorld.ACTIONS
grid_matrix = [[0, 0, 0, -1],
[0, 1, 0, -1],
[0, 0, 0, 0]]
reward_matrix = [[-0.04, -0.04, -0.04, 10],
[-0.04, -0.04, -0.04, -10],
[-0.04, -0.04, -0.04, -0.04]]
mygrid = GridWorld(grid_matrix, reward_matrix, 0.2)
strategy.fit((0, 0, 0))
count = 0
steps = 0
while count < iterations:
action = strategy.policy(mygrid.current_state)
try:
action, reward, state = mygrid.take_action(action)
strategy.fit((state, action, reward))
steps += 1
if steps > max_steps_per_game:
raise RuntimeError('Maximum number of steps reached')
except RuntimeError:
count += 1
mygrid.current_state = 0
strategy.init_episode()
strategy.fit((0, 0, 0))
if converge:
strategy.converge()
action_names = {0: '<', 1: '>', 2: '^', 3: 'v'}
print('')
for i in range(len(grid_matrix)):
row = ''
for j in range(len(grid_matrix[i])):
s = mygrid._state_map[(i, j)]
o = action_names.get(strategy._greedy_policy(s)) if grid_matrix[i][j] == 0 \
else str(grid_matrix[i][j])
row += o + '\t'
print(row)
for a in GridWorld.ACTIONS:
print('')
print('Action: %s' % action_names.get(a))
for i in range(len(grid_matrix)):
row = ''
for j in range(len(grid_matrix[i])):
s = mygrid._state_map[(i,j)]
o = '%.3f' % strategy.learner.val(s, a) if grid_matrix[i][j] == 0 \
else str(grid_matrix[i][j])
row += o + '\t'
print(row)
if hasattr(strategy, '_transition_count'):
print('')
print('Action count: %s' % action_names.get(a))
for i in range(len(grid_matrix)):
row = ''
for j in range(len(grid_matrix[i])):
s = mygrid._state_map[(i,j)]
o = '%d' % strategy._transition_count.get((s, a), 0)
row += o + '\t'
print(row)
|
omtinez/rltools
|
rltools/domains/gridworld.py
|
Python
|
mit
| 4,020
|
"""hexagami URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
from hexagami.core import urls as core_urls
urlpatterns = [
url(r'', include(core_urls, namespace='core')),
url(r'^admin/', include(admin.site.urls)),
]
|
fandrefh/hexagami
|
hexagami/urls.py
|
Python
|
gpl-3.0
| 796
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.