hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f731fb3d75d9b93685c1d1a5612a918a4923c2b5 | 3,402 | py | Python | cogs/Member_Log.py | yutarou12/keibot-python | 4a7e869bf3fc43d5cc3eddd442a3318e7cb98a7f | [
"MIT"
] | 1 | 2021-06-12T23:21:52.000Z | 2021-06-12T23:21:52.000Z | cogs/Member_Log.py | yutarou12/keibot-python | 4a7e869bf3fc43d5cc3eddd442a3318e7cb98a7f | [
"MIT"
] | null | null | null | cogs/Member_Log.py | yutarou12/keibot-python | 4a7e869bf3fc43d5cc3eddd442a3318e7cb98a7f | [
"MIT"
] | null | null | null | from datetime import datetime, timedelta
import sqlite3
from discord import Embed, AllowedMentions
from discord.ext import commands
from pytz import timezone
class Member_Log(commands.Cog):
"""メンバー用のログ機能関連のコマンドがあります。"""
def __init__(self, bot):
self.bot = bot
self.welcome_notice = []
self.bot.loop.create_task(self.setup())
async def setup(self):
await self.bot.wait_until_ready()
data = self.bot.db.welcome_notice_get()
if len(data) > 0:
for g in data[0]:
self.welcome_notice.append(g)
@commands.command(name='notice-on',
description='メンバー参加通知の機能をオンにします',
brief=['この機能の説明は、メンバーのアカウント作成日が3日以内の際に、'
'サーバーの管理者にDMを送信する機能です。\n'
'このコマンドの実行には、権限:管理者が必要です', 'administrator', 'notice-function'])
@commands.has_permissions(administrator=True)
@commands.guild_only()
async def _notice_on(self, ctx):
try:
res = self.bot.db.welcome_notice_set(ctx.guild.id)
if res:
self.welcome_notice.append(ctx.guild.id)
success_embed = Embed(description='メンバー参加通知の機能をオンにしました')
return await ctx.reply(embed=success_embed, allowed_mentions=AllowedMentions.none())
except sqlite3.IntegrityError:
integrity_error = Embed(description='メッセージURL展開の機能をオンにしました')
return await ctx.reply(embed=integrity_error, allowed_mentions=AllowedMentions.none())
@commands.command(name='notice-off',
description='メンバー参加通知の機能をオフにします',
brief=['この機能の説明は、メンバーのアカウント作成日が3日以内の際に、'
'サーバーの管理者にDMを送信する機能です。\n'
'このコマンドの実行には、権限:管理者が必要です', 'administrator', 'notice-function'])
@commands.has_permissions(administrator=True)
@commands.guild_only()
async def _notice_off(self, ctx):
res = self.bot.db.welcome_notice_unset(ctx.guild.id)
if res:
self.welcome_notice.remove(ctx.guild.id)
success_embed = Embed(description='メンバー参加通知の機能をオフにしました')
return await ctx.reply(embed=success_embed, allowed_mentions=AllowedMentions.none())
@commands.Cog.listener()
async def on_member_join(self, member):
if member.guild.id in self.welcome_notice:
if not member.bot:
if datetime.utcnow() - member.created_at < timedelta(days=4):
created_jst = member.created_at.astimezone(timezone("Asia/Tokyo"))
created_at = (created_jst + timedelta(hours=9)).strftime("%Y/%m/%d %H:%M:%S")
notice_embed = Embed(title='メンバー参加通知',
description='次のユーザーのアカウント作成日が3日以内だっため通知しました')
notice_embed.add_field(name='参加ユーザー', value=f'> {member}', inline=False)
notice_embed.add_field(name='参加サーバー', value=f'> {member.guild.name}', inline=False)
notice_embed.add_field(name='アカウント作成日', value=f'> {created_at}', inline=False)
notice_embed.set_thumbnail(url=member.avatar_url)
notice_embed.set_author(name=f'{member}', icon_url=member.avatar_url)
await member.guild.owner.send(embed=notice_embed)
def setup(bot):
bot.add_cog(Member_Log(bot))
| 45.972973 | 103 | 0.61699 | from datetime import datetime, timedelta
import sqlite3
from discord import Embed, AllowedMentions
from discord.ext import commands
from pytz import timezone
class Member_Log(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.welcome_notice = []
self.bot.loop.create_task(self.setup())
async def setup(self):
await self.bot.wait_until_ready()
data = self.bot.db.welcome_notice_get()
if len(data) > 0:
for g in data[0]:
self.welcome_notice.append(g)
@commands.command(name='notice-on',
description='メンバー参加通知の機能をオンにします',
brief=['この機能の説明は、メンバーのアカウント作成日が3日以内の際に、'
'サーバーの管理者にDMを送信する機能です。\n'
'このコマンドの実行には、権限:管理者が必要です', 'administrator', 'notice-function'])
@commands.has_permissions(administrator=True)
@commands.guild_only()
async def _notice_on(self, ctx):
try:
res = self.bot.db.welcome_notice_set(ctx.guild.id)
if res:
self.welcome_notice.append(ctx.guild.id)
success_embed = Embed(description='メンバー参加通知の機能をオンにしました')
return await ctx.reply(embed=success_embed, allowed_mentions=AllowedMentions.none())
except sqlite3.IntegrityError:
integrity_error = Embed(description='メッセージURL展開の機能をオンにしました')
return await ctx.reply(embed=integrity_error, allowed_mentions=AllowedMentions.none())
@commands.command(name='notice-off',
description='メンバー参加通知の機能をオフにします',
brief=['この機能の説明は、メンバーのアカウント作成日が3日以内の際に、'
'サーバーの管理者にDMを送信する機能です。\n'
'このコマンドの実行には、権限:管理者が必要です', 'administrator', 'notice-function'])
@commands.has_permissions(administrator=True)
@commands.guild_only()
async def _notice_off(self, ctx):
res = self.bot.db.welcome_notice_unset(ctx.guild.id)
if res:
self.welcome_notice.remove(ctx.guild.id)
success_embed = Embed(description='メンバー参加通知の機能をオフにしました')
return await ctx.reply(embed=success_embed, allowed_mentions=AllowedMentions.none())
@commands.Cog.listener()
async def on_member_join(self, member):
if member.guild.id in self.welcome_notice:
if not member.bot:
if datetime.utcnow() - member.created_at < timedelta(days=4):
created_jst = member.created_at.astimezone(timezone("Asia/Tokyo"))
created_at = (created_jst + timedelta(hours=9)).strftime("%Y/%m/%d %H:%M:%S")
notice_embed = Embed(title='メンバー参加通知',
description='次のユーザーのアカウント作成日が3日以内だっため通知しました')
notice_embed.add_field(name='参加ユーザー', value=f'> {member}', inline=False)
notice_embed.add_field(name='参加サーバー', value=f'> {member.guild.name}', inline=False)
notice_embed.add_field(name='アカウント作成日', value=f'> {created_at}', inline=False)
notice_embed.set_thumbnail(url=member.avatar_url)
notice_embed.set_author(name=f'{member}', icon_url=member.avatar_url)
await member.guild.owner.send(embed=notice_embed)
def setup(bot):
bot.add_cog(Member_Log(bot))
| true | true |
f731fb8e98454206f951ad629d427c908be3673f | 20,599 | py | Python | dnsuptools/dnsuptools.py | TheTesla/dnsupdate | 1be992718c5e678a750b4f69bf94706583408365 | [
"MIT"
] | 1 | 2019-06-04T18:46:00.000Z | 2019-06-04T18:46:00.000Z | dnsuptools/dnsuptools.py | TheTesla/dnsupdate | 1be992718c5e678a750b4f69bf94706583408365 | [
"MIT"
] | null | null | null | dnsuptools/dnsuptools.py | TheTesla/dnsupdate | 1be992718c5e678a750b4f69bf94706583408365 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- encoding: UTF8 -*-
from dnsuptools.dnsupdate import defaultDictList, MatchUpperLabels, DNSUpdate
from dnsuptools.tlsarecgen import tlsaRecordsFromCertFile, tlsaFromFile
from dnsuptools.dkimrecgen import dkimFromFile
from simpleloggerplus import simpleloggerplus as log
import re
import pycurl
from io import BytesIO
import socket
import dns.resolver
def dkimKeySplit(dkimDict):
if type(dkimDict) is list:
return [dkimKeySplit(e) for e in dkimDict]
keyL = dkimDict['keyname'].split('_')
dkimDict['keybasename'] = keyL[0]
if 1 < len(keyL):
dkimDict['keynbr'] = keyL[1]
return dkimDict
def parseNSentry(record):
return {'ns': record['content']}
def parseDKIMentry(record):
key = record['name']
keyList = key.split('.')
val = record['content'].replace(' ', '')
valList = val.split(';')
valDict = {e.split('=')[0]: e.split('=')[1] for e in valList if '=' in e}
dkim = {'name': '.'.join(keyList[2:]), 'keyname': keyList[0], 'dkimlabel': keyList[1]}
dkim.update(valDict)
dkimKeySplit(dkim)
return dkim
def formatDKIMentry(name, dkimDict):
if type(dkimDict) is list:
return [formatDKIMentry(name, e) for e in dkimDict]
dkim = {'keyname': 'key1', 'v': 'DKIM1', 'k': 'rsa'}
dkim.update(dkimDict)
return {'name': '{x[keyname]}._domainkey.{name}'.format(x=dkim, name=str(name)), 'type': 'TXT', 'content': 'v={x[v]}; k={x[k]}; p={x[p]}'.format(x=dkim)}
def parseTLSAentry(record):
key = record['name']
keyList = key.split('.')
log.debug(keyList)
val = record['content']
valList = val.split(' ')
tlsa = {'name': '.'.join(keyList[2:]), 'port': keyList[0], 'proto': keyList[1], 'usage': valList[0], 'selector': valList[1], 'matchingtype': valList[2], 'tlsa': valList[3]}
#tlsa = {'port': keyList[0], 'proto': keyList[1], 'usage': valList[0], 'selector': valList[1], 'matchingtype': valList[2], 'tlsa': valList[3]}
if '_' == tlsa['port'][0]:
tlsa['port'] = tlsa['port'][1:]
if '_' == tlsa['proto'][0]:
tlsa['proto'] = tlsa['proto'][1:]
tlsa['tlsa'] = tlsa['tlsa'].replace('\n','')
return tlsa
def formatTLSAentry(name, tlsaDict):
if type(tlsaDict) is list:
return [formatTLSAentry(name, e) for e in tlsaDict]
tlsa = {'port': '*', 'proto': 'tcp'}
tlsa.update(tlsaDict)
if '*' != tlsa['port']:
tlsa['port'] = '_{}'.format(tlsa['port'])
tlsa['tlsa'] = tlsa['tlsa'].replace(b'\n',b'')
return {'name': '{x[port]}._{x[proto]}.{name}'.format(x=tlsa, name=str(name)), 'type': 'TLSA', 'content': '{x[usage]} {x[selector]} {x[matchingtype]} {x[tlsa]}'.format(x=tlsa)}
def parseSRVentry(record):
key = record['name']
keyList = key.split('.')
val = record['content']
valList = val.split(' ')
srv = {'name': '.'.join(keyList[2:]), 'service': keyList[0][1:], 'proto': keyList[1][1:], 'weight': valList[0], 'port': valList[1], 'server': valList[2], 'prio': record['prio']}
return srv
def formatSRVentry(name, srvDict):
if type(srvDict) is list:
return [formatSRVentry(name, e) for e in srvDict]
srv = srvDict
for k in ['service', 'proto', 'prio', 'weight', 'port', 'server']:
if k not in srv:
log.warn('Missing member \"{}\" in SRV entry!'.format(k))
return {}
return {'name': '_{x[service]}._{x[proto]}.{name}'.format(x=srv, name=str(name)), 'type': 'SRV', 'prio': srv['prio'], 'content': '{x[weight]} {x[port]} {x[server]}'.format(x=srv)}
def isSubDict(subDict, contentDict):
for k, v in subDict.items():
if k not in contentDict:
return False
if str(v) != str(contentDict[k]):
return False
return True
def parseSPFentries(entryList):
entryDict = {}
for e in entryList:
if e[0] in '+-~?':
entryDict[e[1:]] = e[0]
else:
entryDict[e] = '+'
return entryDict
def formatSPFentries(entryDict):
allVal = []
if 'all' in entryDict:
allVal = [str(entryDict['all'])+'all']
del entryDict['all']
entryList = ['{v}{k}'.format(v=v,k=k) for k, v in entryDict.items()]
entryList.extend(allVal)
return entryList
def qryDNS(nsName, qryName, recType, ns=None):
resolver = dns.resolver.Resolver()
if ns is not None:
if type(ns) is not list:
ns = [ns]
if 0 < len(ns):
resolver.nameservers = ns
resolver.nameservers=[socket.gethostbyname(nsName)]
return [rdata for rdata in resolver.query(qryName, recType)]
def parseDMARC(dmarcStr):
return {e.split('=')[0].replace(' ',''): e.split('=')[1].replace(' ','') for e in dmarcStr.split(';')}
def formatDMARC(dmarcDict):
v = 'v={v}'.format(v=dmarcDict['v'])
del dmarcDict['v']
return ';'.join([v] + ['{k}={v}'.format(k=k, v=v) for k, v in dmarcDict.items()])
def sanIPv4(x):
return re.sub('[^0-9.]', '', x)
def sanIPv6(x):
return re.sub('[^0-9:a-fA-F]', '', x)
def curlGet(url):
buff = BytesIO()
c = pycurl.Curl()
c.setopt(pycurl.CONNECTTIMEOUT, 4)
c.setopt(c.URL, str(url))
c.setopt(c.WRITEDATA, buff)
c.perform()
c.close()
return buff.getvalue().decode()
def getIPv4(a = 'auto'):
if 'auto' != a:
return a
try:
ipv4Str = curlGet('ipv4.icanhazip.com')
except Exception as e:
return None
return sanIPv4(ipv4Str)
def getIPv6(aaaa = 'auto'):
if 'auto' != aaaa:
return aaaa
try:
ipv6Str = curlGet('ipv6.icanhazip.com')
log.debug(ipv6Str)
except Exception as e:
return None
return sanIPv6(ipv6Str)
def genSPF(spf, behavior = '?all', v = 'spf1'):
if type(spf) is str:
spf = [spf]
if type(spf) is set:
spf = list(spf)
if v is not None:
spf = ['v=' + v] + spf
if behavior is not None:
spf += [behavior]
return ' '.join(spf)
def genCAA(caaDict):
if type(caaDict) is dict:
caaDict = [caaDict]
caaList = []
for e in caaDict:
caa = {'flag': 0, 'tag': 'issue'}
caa.update(e)
caaStr = '{x[flag]} {x[tag]} "{x[url]}"'.format(x=caa)
caaList.append(caaStr)
return caaList
def parseCAA(caaRR):
caaStr = caaRR['content']
log.debug(caaStr)
caa = {}
caa['flag'], caa['tag'], caa['url'] = caaStr.split(' ')
caa['url'] = caa['url'][1:-1]
caa = {str(k): str(v) for k, v in caa.items()}
log.debug(caa)
return caa
def encDNSemail(x):
xSpl = x.split('@')
log.debug(xSpl)
if 1 == len(xSpl):
return x
elif 1 < len(xSpl):
return xSpl[0].replace('.', '\\.') + '.' + xSpl[1] + '.'
else:
raise(TypeError('No valid email address'))
def decDNSemail(x):
if 2 == len(x.split('@')):
return x
elif 2 < len(x.split('@')):
raise(TypeError('No valid email address'))
else:
xSpl = x.split('\\.')
y = '.'.join(xSpl[:-1]) + '.' + '@'.join(xSpl[-1].split('.', 1))
if '.' == y[0]:
y = y[1:]
if '.' == y[-1]:
return y[:-1]
else:
return y
def makeIP4(a):
if a is None:
a = 'auto'
if type(a) is not list:
a = [a]
a = [getIPv4(e) for e in a]
a = [e for e in a if e is not None]
return a
def makeIP6(aaaa):
if aaaa is None:
aaaa = 'auto'
if type(aaaa) is not list:
aaaa = [aaaa]
log.debug(aaaa)
aaaa = [getIPv6(e) for e in aaaa]
aaaa = [e for e in aaaa if e is not None]
log.debug(aaaa)
return aaaa
def soaUpdate(curSOAdict, updSOAdict):
soa = dict(curSOAdict)
soa.update(updSOAdict)
soa['serial'] += 1
soa['hostmaster'] = encDNSemail(soa['hostmaster'])
soaTXT = '{soa[primns]} {soa[hostmaster]} {soa[serial]} {soa[refresh]} {soa[retry]} {soa[expire]} {soa[ncttl]}'.format(soa = soa)
return {'content': soaTXT, 'id': soa['id']}
def soaQRYs2dict(soaNSqry, soaAPIqry):
soa = soaNSqry
return {'primns': soa.mname.to_text(), 'hostmaster': decDNSemail(soa.rname.to_text()), 'serial': soa.serial, 'refresh': soa.refresh, 'retry': soa.retry, 'expire': soa.expire, 'ncttl': soa.minimum, 'id': soaAPIqry['id']}
def recordFilter(entry, records, parser=None):
result = []
for rr in records:
rr = dict(rr)
if parser is not None:
rr.update(parser(rr))
if not isSubDict(entry, rr):
continue
result.append(rr)
return result
class DNSUpTools(DNSUpdate):
def __init__(self):
DNSUpdate.__init__(self)
def qrySOA(self, name):
soaAPI = self.qry({'name': name, 'type': 'SOA'})[0]
soaList = soaAPI['content'].split(' ')
ns = [e['content'] for e in self.qryNS(name)[0]]
soaNS = qryDNS(soaList[0], name, 'SOA', ns)[0] # extended query for last 4 values - WARNING internal nameserver update takes time, consecutive updates may result in inconsistencies
return soaQRYs2dict(soaNS, soaAPI)
def setSOAentry(self, name, updSOAdict):
soa = self.qrySOA(name)
soaRR = soaUpdate(soa, updSOAdict)
self.updOrAddDictList({'name': name, 'type': 'SOA'}, soaRR)
def addA(self, name, a = 'auto'):
a = makeIP4(a)
self.addList({'name': name, 'type': 'A'}, a)
def delA(self, name, aDelete = '*', aPreserve = []):
aPreserve = makeIP4(aPreserve)
self.delList({'name': name, 'type': 'A'}, aDelete, aPreserve)
def setA(self, name, a = 'auto'):
self.addA(name, a)
self.delA(name, '*', a)
def addAAAA(self, name, aaaa):
aaaa = makeIP6(aaaa)
self.addList({'name': name, 'type': 'AAAA'}, aaaa)
def delAAAA(self, name, aaaaDelete = '*', aaaaPreserve = []):
aaaaPreserve = makeIP6(aaaaPreserve)
self.delList({'name': name, 'type': 'AAAA'}, aaaaDelete, aaaaPreserve)
def setAAAA(self, name, aaaa = 'auto'):
self.addAAAA(name, aaaa)
self.delAAAA(name, '*', aaaa)
def addMX(self, name, mx):
self.addDictList({'name': name, 'type': 'MX', 'prio': 10}, mx)
def delMX(self, name, mxDelete = [{}], mxPreserve = []):
self.delDictList({'name': name, 'type': 'MX'}, mxDelete, mxPreserve)
def setMX(self, name, mx):
self.addMX(name, mx)
self.delMX(name, [{}], mx)
def addCNAME(self, name, cname):
self.addList({'name': name, 'type': 'CNAME'}, cname)
def delCNAME(self, name, cnameDelete = '*', cnamePreserve = []):
self.delList({'name': name, 'type': 'CNAME'}, cnameDelete, cnamePreserve)
def setCNAME(self, name, cname):
self.addCNAME(name, cname)
self.delCNAME(name, '*', cname)
def addTXT(self, name, txt):
self.addList({'name': name, 'type': 'TXT'}, txt)
def delTXT(self, name, txtDelete = '*', txtPreserve = []):
self.delList({'name': name, 'type': 'TXT'}, txtDelete, txtPreserve)
def setTXT(self, name, txt):
self.addTXT(name, txt)
self.delTXT(name, '*', txt)
def addNS(self, name, ns):
self.addList({'name': name, 'type': 'NS'}, ns)
def delNS(self, name, nsDelete = '*', nsPreserve = []):
self.delList({'name': name, 'type': 'NS'}, nsDelete, nsPreserve)
def qryNS(self, name):
return self.qryRR(name, 'NS')
def setNS(self, name, ns):
self.addNS(name, ns)
self.delNS(name, '*', ns)
def addTLSA(self, name, tlsaDict):
tlsaDictList = tlsaFromFile(tlsaDict)
tlsaRRdictList = formatTLSAentry(name, tlsaDictList)
self.addDictList({}, tlsaRRdictList)
def delTLSA(self, name, tlsaDelete={}, tlsaPreserve = []):
if type(tlsaDelete) is dict:
tlsaDelete = [tlsaDelete]
if type(tlsaPreserve) is dict:
tlsaPreserve = [tlsaPreserve]
tlsaFromFile(tlsaDelete)
tlsaFromFile(tlsaPreserve)
for i, e in enumerate(tlsaDelete):
if 'filename' in e:
del tlsaDelete[i]['filename']
if 'op' in e:
del tlsaDelete[i]['op']
for i, e in enumerate(tlsaPreserve):
if 'filename' in e:
del tlsaPreserve[i]['filename']
if 'op' in e:
del tlsaPreserve[i]['op']
deleteRv = self.qryTLSA(name, tlsaDelete)
preserveRv = self.qryTLSA(name, tlsaPreserve)
return self.deleteRv(deleteRv, preserveRv)
def setTLSA(self, name, tlsaDict):
self.addTLSA(name, tlsaDict)
self.delTLSA(name, {}, tlsaDict)
def addTLSAfromCert(self, name, certFilenames, tlsaTypes = [[3,0,1], [3,0,2], [3,1,1], [3,1,2], [2,0,1], [2,0,2], [2,1,1], [2,1,2]]):
if 'auto' == str(tlsaTypes):
tlsaTypes = [[3,0,1], [3,0,2], [3,1,1], [3,1,2], [2,0,1], [2,0,2], [2,1,1], [2,1,2]]
log.debug('name = %s' % name)
log.debug('certFilenames = %s' % certFilenames)
self.addTLSA(name, tlsaRecordsFromCertFile(certFilenames, tlsaTypes))
def delTLSApreserveFromCert(self, name, tlsaDelete = {}, certFilenamesPreserve = []):
self.delTLSA(name, tlsaDelete, tlsaRecordsFromCertFile(certFilenamesPreserve))
def setTLSAfromCert(self, name, certFilenames, tlsaTypes = [[3,0,1], [3,0,2], [3,1,1], [3,1,2], [2,0,1], [2,0,2], [2,1,1], [2,1,2]]):
if 'auto' == str(tlsaTypes):
tlsaTypes = [[3,0,1], [3,0,2], [3,1,1], [3,1,2], [2,0,1], [2,0,2], [2,1,1], [2,1,2]]
self.setTLSA(name, tlsaRecordsFromCertFile(certFilenames, tlsaTypes))
def setSPFentry(self, name, spfADD, spfDEL = {}):
if 0 == len(spfADD) and 0 == len(spfDEL):
return
rrQ = self.qrySPF(name)
if 0 == len(rrQ):
self.setSPF(name, formatSPFentries(parseSPFentries(set(spfADD))))
return
spfQ = rrQ[0]['content'].split(' ')
spfID = rrQ[0]['id']
spfSqry = set(spfQ[1:])
spfSdel = set(spfDEL)
if '*' in spfSdel:
spfSqry = {}
spfS = {e for e in spfSqry if e not in spfSdel}
spfD = parseSPFentries(spfS)
spfD.update(parseSPFentries(set(spfADD)))
spfL = formatSPFentries(spfD)
self.setSPF(name, spfL, spfID, spfQ[0][2:])
def qrySPF(self, name):
rv = self.qry({'name': str(name), 'type': 'TXT'})
return [rr for rr in rv if 'v=spf1' in rr['content'].split(' ')]
def delSPF(self, name):
spf = self.qrySPF(name)
self.setSPF(name, [], spf['id'])
# only one SPF record allowed
def setSPF(self, name, spf, rrID = None, v = 'spf1'):
if 0 == len(spf):
if rrID is None:
return
self.delete({'recordId': rrID})
return
spf = ' '.join(formatSPFentries(parseSPFentries(spf)))
txt = genSPF(spf, None, v)
updR = {'content': txt}
if rrID is not None:
updR['id'] = rrID
self.updOrAddDictList({'name': str(name), 'type': 'TXT'}, updR)
def delDMARC(self, name):
self.delTXT('_dmarc.'+str(name))
# only one DMARC record allowed
def setDMARC(self, name, dmarcDict):
log.debug(dmarcDict)
if {} == dmarcDict:
self.delDMARC(name)
return
dmarc = {'v': 'DMARC1', 'p': 'none'}
dmarc.update(dmarcDict)
dmarc = {k: v for k, v in dmarc.items() if '' != v}
dmarcStr = formatDMARC(dmarc)
self.update({'name': '_dmarc.'+str(name), 'type': 'TXT'}, {'content': dmarcStr})
def qryDMARC(self, name):
dmarcRv = self.qry({'name': '_dmarc.'+str(name), 'type': 'TXT'})
dmarcQ = [parseDMARC(rr['content']) for rr in dmarcRv]
return dmarcQ
def setDMARCentry(self, name, dmarcDict):
q = self.qryDMARC(name)
dmarc = {}
for e in q:
dmarc.update(e)
if '' in dmarcDict:
dmarc = dict(dmarcDict)
del dmarc['']
else:
dmarc.update(dmarcDict)
self.setDMARC(name, dmarc)
def delADSP(self, name, adspDelete = '*', adspPreserve = []):
if '*' == adspDelete:
self.delTXT('_adsp._domainkey.' + str(name), '*', adspPreserve)
else:
self.delTXT('_adsp._domainkey.' + str(name), 'dkim=' + str(adspDelete), adspPreserve)
# only one ADSP record allowed
def setADSP(self, name, adsp):
if '' == adsp:
self.delADSP(name)
return
self.update({'name': '_adsp._domainkey.' + str(name), 'type': 'TXT'}, {'content': 'dkim=' + str(adsp)})
def setACME(self, name, challenge=''):
if '' == challenge:
self.delACME(name)
return
self.update({'name': '_acme-challenge.' + str(name), 'type': 'TXT'}, {'content': str(challenge)})
def delACME(self, name):
self.delTXT('_acme-challenge.' + str(name), '*')
def addCAA(self, name, caaDict):
try:
self.addList({'name': str(name), 'type': 'CAA'}, genCAA(caaDict))
except KeyError as e:
log.warn('Not adding CAA record!')
def setCAA(self, name, caaDict):
self.addCAA(name, caaDict)
self.delCAA(name, [{}], caaDict)
def qryCAA(self, name, caaDict = {}):
if type(caaDict) is dict:
caaDict = [caaDict]
for e in caaDict:
e['name'] = str(name)
return self.qryRR(str(name), 'CAA', parseCAA, caaDict, [])
def delCAA(self, name, caaDelete = [{}], caaPreserve = []):
deleteRv = self.qryCAA(name, caaDelete)
preserveRv = self.qryCAA(name, caaPreserve)
return self.deleteRv(deleteRv, preserveRv)
def addSRV(self, name, srvDict):
log.debug(srvDict)
srvDictList = defaultDictList({'prio': 10, 'weight' : 0}, srvDict)
srvRRdictList = formatSRVentry(name, srvDictList)
self.addDictList({}, srvRRdictList)
def qryRR(self, name, rrType, parser=None, rrDict = {}, qryFilters=[MatchUpperLabels]):
rrRv = self.qryWild({'name': name, 'type': rrType}, qryFilters)
if type(rrDict) is dict:
rrDict = [rrDict]
for i, e in enumerate(rrDict):
rrDict[i]['name'] = name
return [recordFilter(e, rrRv, parser) for e in rrDict]
def qryTLSA(self, name, tlsaDict = {}):
return self.qryRR(name, 'TLSA', parseTLSAentry, tlsaDict)
def qrySRV(self, name, srvDict = {}):
return self.qryRR(name, 'SRV', parseSRVentry, srvDict)
def delSRV(self, name, srvDelete, srvPreserve = []):
deleteRv = self.qrySRV(name, srvDelete)
preserveRv = self.qrySRV(name, srvPreserve)
return self.deleteRv(deleteRv, preserveRv)
def setSRV(self, name, srvDict):
self.addSRV(name, srvDict)
self.delSRV(name, {}, srvDict)
def addDKIM(self, name, dkimDict):
dkimDict = dkimFromFile(dkimDict)
dkimRRdictList = formatDKIMentry(name, dkimDict)
self.addDictList({}, dkimRRdictList)
def addDKIMfromFile(self, name, filenames):
if type(filenames) is str:
filenames = [filenames]
dkimDictList = [{'filename': e} for e in filenames]
self.addDKIM(name, dkimDictList)
def qryDKIM(self, name, dkimDict):
rv = self.qryRR(name, 'TXT', parseDKIMentry, dkimDict)
rv = [f for e in rv for f in e if f['keyname'] != '_adsp']
return rv
def delDKIM(self, name, dkimDelete = {}, dkimPreserve = []):
if type(dkimDelete) is dict:
dkimDelete = [dkimDelete]
if type(dkimPreserve) is dict:
dkimPreserve = [dkimPreserve]
dkimFromFile(dkimDelete)
dkimFromFile(dkimPreserve)
for i, e in enumerate(dkimDelete):
if 'filename' in e:
del dkimDelete[i]['filename']
for i, e in enumerate(dkimPreserve):
if 'filename' in e:
del dkimPreserve[i]['filename']
deleteRv = self.qryDKIM(name, dkimDelete)
preserveRv = self.qryDKIM(name, dkimPreserve)
return self.deleteRv(deleteRv, preserveRv)
def delDKIMpreserveFromFile(self, name, filenames):
if type(filenames) is str:
filenames = [filenames]
dkimPreserveList = [{'filename': e} for e in filenames]
self.delDKIM(name, {}, dkimPreserveList)
def setDKIM(self, name, dkimDict):
self.addDKIM(name, dkimDict)
self.delDKIM(name, {}, dkimDict)
def setDKIMfromFile(self, name, filenames):
self.addDKIMfromFile(name, filenames)
self.delDKIMpreserveFromFile(name, filenames)
| 34.047934 | 223 | 0.572455 |
from dnsuptools.dnsupdate import defaultDictList, MatchUpperLabels, DNSUpdate
from dnsuptools.tlsarecgen import tlsaRecordsFromCertFile, tlsaFromFile
from dnsuptools.dkimrecgen import dkimFromFile
from simpleloggerplus import simpleloggerplus as log
import re
import pycurl
from io import BytesIO
import socket
import dns.resolver
def dkimKeySplit(dkimDict):
if type(dkimDict) is list:
return [dkimKeySplit(e) for e in dkimDict]
keyL = dkimDict['keyname'].split('_')
dkimDict['keybasename'] = keyL[0]
if 1 < len(keyL):
dkimDict['keynbr'] = keyL[1]
return dkimDict
def parseNSentry(record):
return {'ns': record['content']}
def parseDKIMentry(record):
key = record['name']
keyList = key.split('.')
val = record['content'].replace(' ', '')
valList = val.split(';')
valDict = {e.split('=')[0]: e.split('=')[1] for e in valList if '=' in e}
dkim = {'name': '.'.join(keyList[2:]), 'keyname': keyList[0], 'dkimlabel': keyList[1]}
dkim.update(valDict)
dkimKeySplit(dkim)
return dkim
def formatDKIMentry(name, dkimDict):
if type(dkimDict) is list:
return [formatDKIMentry(name, e) for e in dkimDict]
dkim = {'keyname': 'key1', 'v': 'DKIM1', 'k': 'rsa'}
dkim.update(dkimDict)
return {'name': '{x[keyname]}._domainkey.{name}'.format(x=dkim, name=str(name)), 'type': 'TXT', 'content': 'v={x[v]}; k={x[k]}; p={x[p]}'.format(x=dkim)}
def parseTLSAentry(record):
key = record['name']
keyList = key.split('.')
log.debug(keyList)
val = record['content']
valList = val.split(' ')
tlsa = {'name': '.'.join(keyList[2:]), 'port': keyList[0], 'proto': keyList[1], 'usage': valList[0], 'selector': valList[1], 'matchingtype': valList[2], 'tlsa': valList[3]}
if '_' == tlsa['port'][0]:
tlsa['port'] = tlsa['port'][1:]
if '_' == tlsa['proto'][0]:
tlsa['proto'] = tlsa['proto'][1:]
tlsa['tlsa'] = tlsa['tlsa'].replace('\n','')
return tlsa
def formatTLSAentry(name, tlsaDict):
if type(tlsaDict) is list:
return [formatTLSAentry(name, e) for e in tlsaDict]
tlsa = {'port': '*', 'proto': 'tcp'}
tlsa.update(tlsaDict)
if '*' != tlsa['port']:
tlsa['port'] = '_{}'.format(tlsa['port'])
tlsa['tlsa'] = tlsa['tlsa'].replace(b'\n',b'')
return {'name': '{x[port]}._{x[proto]}.{name}'.format(x=tlsa, name=str(name)), 'type': 'TLSA', 'content': '{x[usage]} {x[selector]} {x[matchingtype]} {x[tlsa]}'.format(x=tlsa)}
def parseSRVentry(record):
key = record['name']
keyList = key.split('.')
val = record['content']
valList = val.split(' ')
srv = {'name': '.'.join(keyList[2:]), 'service': keyList[0][1:], 'proto': keyList[1][1:], 'weight': valList[0], 'port': valList[1], 'server': valList[2], 'prio': record['prio']}
return srv
def formatSRVentry(name, srvDict):
if type(srvDict) is list:
return [formatSRVentry(name, e) for e in srvDict]
srv = srvDict
for k in ['service', 'proto', 'prio', 'weight', 'port', 'server']:
if k not in srv:
log.warn('Missing member \"{}\" in SRV entry!'.format(k))
return {}
return {'name': '_{x[service]}._{x[proto]}.{name}'.format(x=srv, name=str(name)), 'type': 'SRV', 'prio': srv['prio'], 'content': '{x[weight]} {x[port]} {x[server]}'.format(x=srv)}
def isSubDict(subDict, contentDict):
for k, v in subDict.items():
if k not in contentDict:
return False
if str(v) != str(contentDict[k]):
return False
return True
def parseSPFentries(entryList):
entryDict = {}
for e in entryList:
if e[0] in '+-~?':
entryDict[e[1:]] = e[0]
else:
entryDict[e] = '+'
return entryDict
def formatSPFentries(entryDict):
allVal = []
if 'all' in entryDict:
allVal = [str(entryDict['all'])+'all']
del entryDict['all']
entryList = ['{v}{k}'.format(v=v,k=k) for k, v in entryDict.items()]
entryList.extend(allVal)
return entryList
def qryDNS(nsName, qryName, recType, ns=None):
resolver = dns.resolver.Resolver()
if ns is not None:
if type(ns) is not list:
ns = [ns]
if 0 < len(ns):
resolver.nameservers = ns
resolver.nameservers=[socket.gethostbyname(nsName)]
return [rdata for rdata in resolver.query(qryName, recType)]
def parseDMARC(dmarcStr):
return {e.split('=')[0].replace(' ',''): e.split('=')[1].replace(' ','') for e in dmarcStr.split(';')}
def formatDMARC(dmarcDict):
v = 'v={v}'.format(v=dmarcDict['v'])
del dmarcDict['v']
return ';'.join([v] + ['{k}={v}'.format(k=k, v=v) for k, v in dmarcDict.items()])
def sanIPv4(x):
return re.sub('[^0-9.]', '', x)
def sanIPv6(x):
return re.sub('[^0-9:a-fA-F]', '', x)
def curlGet(url):
buff = BytesIO()
c = pycurl.Curl()
c.setopt(pycurl.CONNECTTIMEOUT, 4)
c.setopt(c.URL, str(url))
c.setopt(c.WRITEDATA, buff)
c.perform()
c.close()
return buff.getvalue().decode()
def getIPv4(a = 'auto'):
if 'auto' != a:
return a
try:
ipv4Str = curlGet('ipv4.icanhazip.com')
except Exception as e:
return None
return sanIPv4(ipv4Str)
def getIPv6(aaaa = 'auto'):
if 'auto' != aaaa:
return aaaa
try:
ipv6Str = curlGet('ipv6.icanhazip.com')
log.debug(ipv6Str)
except Exception as e:
return None
return sanIPv6(ipv6Str)
def genSPF(spf, behavior = '?all', v = 'spf1'):
if type(spf) is str:
spf = [spf]
if type(spf) is set:
spf = list(spf)
if v is not None:
spf = ['v=' + v] + spf
if behavior is not None:
spf += [behavior]
return ' '.join(spf)
def genCAA(caaDict):
if type(caaDict) is dict:
caaDict = [caaDict]
caaList = []
for e in caaDict:
caa = {'flag': 0, 'tag': 'issue'}
caa.update(e)
caaStr = '{x[flag]} {x[tag]} "{x[url]}"'.format(x=caa)
caaList.append(caaStr)
return caaList
def parseCAA(caaRR):
caaStr = caaRR['content']
log.debug(caaStr)
caa = {}
caa['flag'], caa['tag'], caa['url'] = caaStr.split(' ')
caa['url'] = caa['url'][1:-1]
caa = {str(k): str(v) for k, v in caa.items()}
log.debug(caa)
return caa
def encDNSemail(x):
xSpl = x.split('@')
log.debug(xSpl)
if 1 == len(xSpl):
return x
elif 1 < len(xSpl):
return xSpl[0].replace('.', '\\.') + '.' + xSpl[1] + '.'
else:
raise(TypeError('No valid email address'))
def decDNSemail(x):
if 2 == len(x.split('@')):
return x
elif 2 < len(x.split('@')):
raise(TypeError('No valid email address'))
else:
xSpl = x.split('\\.')
y = '.'.join(xSpl[:-1]) + '.' + '@'.join(xSpl[-1].split('.', 1))
if '.' == y[0]:
y = y[1:]
if '.' == y[-1]:
return y[:-1]
else:
return y
def makeIP4(a):
if a is None:
a = 'auto'
if type(a) is not list:
a = [a]
a = [getIPv4(e) for e in a]
a = [e for e in a if e is not None]
return a
def makeIP6(aaaa):
if aaaa is None:
aaaa = 'auto'
if type(aaaa) is not list:
aaaa = [aaaa]
log.debug(aaaa)
aaaa = [getIPv6(e) for e in aaaa]
aaaa = [e for e in aaaa if e is not None]
log.debug(aaaa)
return aaaa
def soaUpdate(curSOAdict, updSOAdict):
soa = dict(curSOAdict)
soa.update(updSOAdict)
soa['serial'] += 1
soa['hostmaster'] = encDNSemail(soa['hostmaster'])
soaTXT = '{soa[primns]} {soa[hostmaster]} {soa[serial]} {soa[refresh]} {soa[retry]} {soa[expire]} {soa[ncttl]}'.format(soa = soa)
return {'content': soaTXT, 'id': soa['id']}
def soaQRYs2dict(soaNSqry, soaAPIqry):
soa = soaNSqry
return {'primns': soa.mname.to_text(), 'hostmaster': decDNSemail(soa.rname.to_text()), 'serial': soa.serial, 'refresh': soa.refresh, 'retry': soa.retry, 'expire': soa.expire, 'ncttl': soa.minimum, 'id': soaAPIqry['id']}
def recordFilter(entry, records, parser=None):
result = []
for rr in records:
rr = dict(rr)
if parser is not None:
rr.update(parser(rr))
if not isSubDict(entry, rr):
continue
result.append(rr)
return result
class DNSUpTools(DNSUpdate):
def __init__(self):
DNSUpdate.__init__(self)
def qrySOA(self, name):
soaAPI = self.qry({'name': name, 'type': 'SOA'})[0]
soaList = soaAPI['content'].split(' ')
ns = [e['content'] for e in self.qryNS(name)[0]]
soaNS = qryDNS(soaList[0], name, 'SOA', ns)[0]
return soaQRYs2dict(soaNS, soaAPI)
def setSOAentry(self, name, updSOAdict):
soa = self.qrySOA(name)
soaRR = soaUpdate(soa, updSOAdict)
self.updOrAddDictList({'name': name, 'type': 'SOA'}, soaRR)
def addA(self, name, a = 'auto'):
a = makeIP4(a)
self.addList({'name': name, 'type': 'A'}, a)
def delA(self, name, aDelete = '*', aPreserve = []):
aPreserve = makeIP4(aPreserve)
self.delList({'name': name, 'type': 'A'}, aDelete, aPreserve)
def setA(self, name, a = 'auto'):
self.addA(name, a)
self.delA(name, '*', a)
def addAAAA(self, name, aaaa):
aaaa = makeIP6(aaaa)
self.addList({'name': name, 'type': 'AAAA'}, aaaa)
def delAAAA(self, name, aaaaDelete = '*', aaaaPreserve = []):
aaaaPreserve = makeIP6(aaaaPreserve)
self.delList({'name': name, 'type': 'AAAA'}, aaaaDelete, aaaaPreserve)
def setAAAA(self, name, aaaa = 'auto'):
self.addAAAA(name, aaaa)
self.delAAAA(name, '*', aaaa)
def addMX(self, name, mx):
self.addDictList({'name': name, 'type': 'MX', 'prio': 10}, mx)
def delMX(self, name, mxDelete = [{}], mxPreserve = []):
self.delDictList({'name': name, 'type': 'MX'}, mxDelete, mxPreserve)
def setMX(self, name, mx):
self.addMX(name, mx)
self.delMX(name, [{}], mx)
def addCNAME(self, name, cname):
self.addList({'name': name, 'type': 'CNAME'}, cname)
def delCNAME(self, name, cnameDelete = '*', cnamePreserve = []):
self.delList({'name': name, 'type': 'CNAME'}, cnameDelete, cnamePreserve)
def setCNAME(self, name, cname):
self.addCNAME(name, cname)
self.delCNAME(name, '*', cname)
def addTXT(self, name, txt):
self.addList({'name': name, 'type': 'TXT'}, txt)
def delTXT(self, name, txtDelete = '*', txtPreserve = []):
self.delList({'name': name, 'type': 'TXT'}, txtDelete, txtPreserve)
def setTXT(self, name, txt):
self.addTXT(name, txt)
self.delTXT(name, '*', txt)
def addNS(self, name, ns):
self.addList({'name': name, 'type': 'NS'}, ns)
def delNS(self, name, nsDelete = '*', nsPreserve = []):
self.delList({'name': name, 'type': 'NS'}, nsDelete, nsPreserve)
def qryNS(self, name):
return self.qryRR(name, 'NS')
def setNS(self, name, ns):
self.addNS(name, ns)
self.delNS(name, '*', ns)
def addTLSA(self, name, tlsaDict):
tlsaDictList = tlsaFromFile(tlsaDict)
tlsaRRdictList = formatTLSAentry(name, tlsaDictList)
self.addDictList({}, tlsaRRdictList)
def delTLSA(self, name, tlsaDelete={}, tlsaPreserve = []):
if type(tlsaDelete) is dict:
tlsaDelete = [tlsaDelete]
if type(tlsaPreserve) is dict:
tlsaPreserve = [tlsaPreserve]
tlsaFromFile(tlsaDelete)
tlsaFromFile(tlsaPreserve)
for i, e in enumerate(tlsaDelete):
if 'filename' in e:
del tlsaDelete[i]['filename']
if 'op' in e:
del tlsaDelete[i]['op']
for i, e in enumerate(tlsaPreserve):
if 'filename' in e:
del tlsaPreserve[i]['filename']
if 'op' in e:
del tlsaPreserve[i]['op']
deleteRv = self.qryTLSA(name, tlsaDelete)
preserveRv = self.qryTLSA(name, tlsaPreserve)
return self.deleteRv(deleteRv, preserveRv)
def setTLSA(self, name, tlsaDict):
self.addTLSA(name, tlsaDict)
self.delTLSA(name, {}, tlsaDict)
def addTLSAfromCert(self, name, certFilenames, tlsaTypes = [[3,0,1], [3,0,2], [3,1,1], [3,1,2], [2,0,1], [2,0,2], [2,1,1], [2,1,2]]):
if 'auto' == str(tlsaTypes):
tlsaTypes = [[3,0,1], [3,0,2], [3,1,1], [3,1,2], [2,0,1], [2,0,2], [2,1,1], [2,1,2]]
log.debug('name = %s' % name)
log.debug('certFilenames = %s' % certFilenames)
self.addTLSA(name, tlsaRecordsFromCertFile(certFilenames, tlsaTypes))
def delTLSApreserveFromCert(self, name, tlsaDelete = {}, certFilenamesPreserve = []):
self.delTLSA(name, tlsaDelete, tlsaRecordsFromCertFile(certFilenamesPreserve))
def setTLSAfromCert(self, name, certFilenames, tlsaTypes = [[3,0,1], [3,0,2], [3,1,1], [3,1,2], [2,0,1], [2,0,2], [2,1,1], [2,1,2]]):
if 'auto' == str(tlsaTypes):
tlsaTypes = [[3,0,1], [3,0,2], [3,1,1], [3,1,2], [2,0,1], [2,0,2], [2,1,1], [2,1,2]]
self.setTLSA(name, tlsaRecordsFromCertFile(certFilenames, tlsaTypes))
def setSPFentry(self, name, spfADD, spfDEL = {}):
if 0 == len(spfADD) and 0 == len(spfDEL):
return
rrQ = self.qrySPF(name)
if 0 == len(rrQ):
self.setSPF(name, formatSPFentries(parseSPFentries(set(spfADD))))
return
spfQ = rrQ[0]['content'].split(' ')
spfID = rrQ[0]['id']
spfSqry = set(spfQ[1:])
spfSdel = set(spfDEL)
if '*' in spfSdel:
spfSqry = {}
spfS = {e for e in spfSqry if e not in spfSdel}
spfD = parseSPFentries(spfS)
spfD.update(parseSPFentries(set(spfADD)))
spfL = formatSPFentries(spfD)
self.setSPF(name, spfL, spfID, spfQ[0][2:])
def qrySPF(self, name):
rv = self.qry({'name': str(name), 'type': 'TXT'})
return [rr for rr in rv if 'v=spf1' in rr['content'].split(' ')]
def delSPF(self, name):
spf = self.qrySPF(name)
self.setSPF(name, [], spf['id'])
def setSPF(self, name, spf, rrID = None, v = 'spf1'):
if 0 == len(spf):
if rrID is None:
return
self.delete({'recordId': rrID})
return
spf = ' '.join(formatSPFentries(parseSPFentries(spf)))
txt = genSPF(spf, None, v)
updR = {'content': txt}
if rrID is not None:
updR['id'] = rrID
self.updOrAddDictList({'name': str(name), 'type': 'TXT'}, updR)
def delDMARC(self, name):
self.delTXT('_dmarc.'+str(name))
def setDMARC(self, name, dmarcDict):
log.debug(dmarcDict)
if {} == dmarcDict:
self.delDMARC(name)
return
dmarc = {'v': 'DMARC1', 'p': 'none'}
dmarc.update(dmarcDict)
dmarc = {k: v for k, v in dmarc.items() if '' != v}
dmarcStr = formatDMARC(dmarc)
self.update({'name': '_dmarc.'+str(name), 'type': 'TXT'}, {'content': dmarcStr})
def qryDMARC(self, name):
dmarcRv = self.qry({'name': '_dmarc.'+str(name), 'type': 'TXT'})
dmarcQ = [parseDMARC(rr['content']) for rr in dmarcRv]
return dmarcQ
def setDMARCentry(self, name, dmarcDict):
q = self.qryDMARC(name)
dmarc = {}
for e in q:
dmarc.update(e)
if '' in dmarcDict:
dmarc = dict(dmarcDict)
del dmarc['']
else:
dmarc.update(dmarcDict)
self.setDMARC(name, dmarc)
def delADSP(self, name, adspDelete = '*', adspPreserve = []):
if '*' == adspDelete:
self.delTXT('_adsp._domainkey.' + str(name), '*', adspPreserve)
else:
self.delTXT('_adsp._domainkey.' + str(name), 'dkim=' + str(adspDelete), adspPreserve)
def setADSP(self, name, adsp):
if '' == adsp:
self.delADSP(name)
return
self.update({'name': '_adsp._domainkey.' + str(name), 'type': 'TXT'}, {'content': 'dkim=' + str(adsp)})
def setACME(self, name, challenge=''):
if '' == challenge:
self.delACME(name)
return
self.update({'name': '_acme-challenge.' + str(name), 'type': 'TXT'}, {'content': str(challenge)})
def delACME(self, name):
self.delTXT('_acme-challenge.' + str(name), '*')
def addCAA(self, name, caaDict):
try:
self.addList({'name': str(name), 'type': 'CAA'}, genCAA(caaDict))
except KeyError as e:
log.warn('Not adding CAA record!')
def setCAA(self, name, caaDict):
self.addCAA(name, caaDict)
self.delCAA(name, [{}], caaDict)
def qryCAA(self, name, caaDict = {}):
if type(caaDict) is dict:
caaDict = [caaDict]
for e in caaDict:
e['name'] = str(name)
return self.qryRR(str(name), 'CAA', parseCAA, caaDict, [])
def delCAA(self, name, caaDelete = [{}], caaPreserve = []):
deleteRv = self.qryCAA(name, caaDelete)
preserveRv = self.qryCAA(name, caaPreserve)
return self.deleteRv(deleteRv, preserveRv)
def addSRV(self, name, srvDict):
log.debug(srvDict)
srvDictList = defaultDictList({'prio': 10, 'weight' : 0}, srvDict)
srvRRdictList = formatSRVentry(name, srvDictList)
self.addDictList({}, srvRRdictList)
def qryRR(self, name, rrType, parser=None, rrDict = {}, qryFilters=[MatchUpperLabels]):
rrRv = self.qryWild({'name': name, 'type': rrType}, qryFilters)
if type(rrDict) is dict:
rrDict = [rrDict]
for i, e in enumerate(rrDict):
rrDict[i]['name'] = name
return [recordFilter(e, rrRv, parser) for e in rrDict]
def qryTLSA(self, name, tlsaDict = {}):
return self.qryRR(name, 'TLSA', parseTLSAentry, tlsaDict)
def qrySRV(self, name, srvDict = {}):
return self.qryRR(name, 'SRV', parseSRVentry, srvDict)
def delSRV(self, name, srvDelete, srvPreserve = []):
deleteRv = self.qrySRV(name, srvDelete)
preserveRv = self.qrySRV(name, srvPreserve)
return self.deleteRv(deleteRv, preserveRv)
def setSRV(self, name, srvDict):
self.addSRV(name, srvDict)
self.delSRV(name, {}, srvDict)
def addDKIM(self, name, dkimDict):
dkimDict = dkimFromFile(dkimDict)
dkimRRdictList = formatDKIMentry(name, dkimDict)
self.addDictList({}, dkimRRdictList)
def addDKIMfromFile(self, name, filenames):
if type(filenames) is str:
filenames = [filenames]
dkimDictList = [{'filename': e} for e in filenames]
self.addDKIM(name, dkimDictList)
def qryDKIM(self, name, dkimDict):
rv = self.qryRR(name, 'TXT', parseDKIMentry, dkimDict)
rv = [f for e in rv for f in e if f['keyname'] != '_adsp']
return rv
def delDKIM(self, name, dkimDelete = {}, dkimPreserve = []):
if type(dkimDelete) is dict:
dkimDelete = [dkimDelete]
if type(dkimPreserve) is dict:
dkimPreserve = [dkimPreserve]
dkimFromFile(dkimDelete)
dkimFromFile(dkimPreserve)
for i, e in enumerate(dkimDelete):
if 'filename' in e:
del dkimDelete[i]['filename']
for i, e in enumerate(dkimPreserve):
if 'filename' in e:
del dkimPreserve[i]['filename']
deleteRv = self.qryDKIM(name, dkimDelete)
preserveRv = self.qryDKIM(name, dkimPreserve)
return self.deleteRv(deleteRv, preserveRv)
def delDKIMpreserveFromFile(self, name, filenames):
if type(filenames) is str:
filenames = [filenames]
dkimPreserveList = [{'filename': e} for e in filenames]
self.delDKIM(name, {}, dkimPreserveList)
def setDKIM(self, name, dkimDict):
self.addDKIM(name, dkimDict)
self.delDKIM(name, {}, dkimDict)
def setDKIMfromFile(self, name, filenames):
self.addDKIMfromFile(name, filenames)
self.delDKIMpreserveFromFile(name, filenames)
| true | true |
f731fb916b3a3a9052f6cc548e01758b391e942f | 1,647 | py | Python | curriculum/03_functions_02_numbers/03_02_02_number_cruncher.py | google/teknowledge | aa55aa59c287f5fe3052e89d539f44252eee41a8 | [
"Apache-2.0"
] | 31 | 2017-11-11T09:10:57.000Z | 2021-10-13T22:53:57.000Z | curriculum/03_functions_02_numbers/03_02_02_number_cruncher.py | google/teknowledge | aa55aa59c287f5fe3052e89d539f44252eee41a8 | [
"Apache-2.0"
] | null | null | null | curriculum/03_functions_02_numbers/03_02_02_number_cruncher.py | google/teknowledge | aa55aa59c287f5fe3052e89d539f44252eee41a8 | [
"Apache-2.0"
] | 14 | 2017-11-10T02:19:42.000Z | 2021-10-13T22:53:47.000Z | def add(x, y):
return x + y
def crunchNumbers():
print("How do you want me to crunch two numbers? ")
crunchFunction = input("Type add or something else: ")
num1 = input('First number: ')
num2 = input('Second number: ')
if crunchFunction == "add":
answer = add(num1, num2)
elif crunchFunction == "subtract":
answer = subtract(num1, num2)
else:
print("That's not a valid crunch method!")
return
print("The answer is", answer)
crunchNumbers()
# Challenge 2.1 - Run the code. The add function doesn't work right! Why is
# that? Fix it by using the built-in Python int() function.
# Hint: To see what the int() function can do, try these in Python:
# int("5")
# int(5.5)
# int(3)
# Challenge 2.2 - The subtract function is missing! Add it.
# Challenge 2.3 - Add a function called difference(x, y) that is like subtract
# but always returns the _positive_ difference between the numbers.
# Hint: You can use an if statement that uses the "greater than" comparison:
# if (x > y):
# BONUS Challenge 2.4 - Google search "Python math operators" and see if you
# can add these three new "crunch functions":
# - power(x, y) (which takes x to the power of y)
# - stringAdd(x, y) (which adds the numbers as strings, like add used to)
# Hint: You'll need the str() function, which turns str("5") -> 5
# - greatestValue(x, y, z) (which returns the greatest value of all 3)
# Hint: for greatestValue, you'll need to optionally take a third number
# as an input
| 35.042553 | 80 | 0.621736 | def add(x, y):
return x + y
def crunchNumbers():
print("How do you want me to crunch two numbers? ")
crunchFunction = input("Type add or something else: ")
num1 = input('First number: ')
num2 = input('Second number: ')
if crunchFunction == "add":
answer = add(num1, num2)
elif crunchFunction == "subtract":
answer = subtract(num1, num2)
else:
print("That's not a valid crunch method!")
return
print("The answer is", answer)
crunchNumbers()
# Challenge 2.1 - Run the code. The add function doesn't work right! Why is
# - greatestValue(x, y, z) (which returns the greatest value of all 3)
# Hint: for greatestValue, you'll need to optionally take a third number
| true | true |
f731fb9fe546857a3efba23ec45fc75883cfbe59 | 2,382 | py | Python | jogoteca/jogoteca.py | SkiereszDiego/Cursos_Alura | 8cebcfa317c47871a698e4328a3851c404d2267b | [
"MIT"
] | null | null | null | jogoteca/jogoteca.py | SkiereszDiego/Cursos_Alura | 8cebcfa317c47871a698e4328a3851c404d2267b | [
"MIT"
] | null | null | null | jogoteca/jogoteca.py | SkiereszDiego/Cursos_Alura | 8cebcfa317c47871a698e4328a3851c404d2267b | [
"MIT"
] | null | null | null | from flask import Flask, render_template, request, redirect, session, flash, url_for
app = Flask(__name__)
app.secret_key = 'alura'
class Jogo:
def __init__(self, nome, categoria, console):
self.nome = nome
self.categoria = categoria
self.console = console
class Usuario:
def __init__(self, id, nome, senha):
self.id = id
self.nome = nome
self.senha = senha
usuario1 = Usuario('luan', 'Luan Marques', '1234')
usuario2 = Usuario('nico', 'Nico Steppat', '7a1')
usuario3 = Usuario('flavio', 'Flávio', 'javascript')
usuarios = { usuario1.id: usuario1,
usuario2.id: usuario2,
usuario3.id: usuario3 }
jogo1 = Jogo('Super Mario', 'Acao', 'SNES')
jogo2 = Jogo('Pokemon Gold', 'RPG', 'GBA')
jogo3 = Jogo('Mortal Kombat', 'Luta', 'SNES')
lista = [jogo1, jogo2, jogo3]
@app.route('/')
def index():
return render_template('lista.html', titulo='Jogos',
jogos=lista)
@app.route('/novo')
def novo():
if 'usuario_logado' not in session or session['usuario_logado'] == None:
return redirect(url_for('login', proxima=url_for('novo')))
return render_template('novo.html', titulo='Novo jogo')
@app.route('/criar', methods=['POST',])
def criar():
nome = request.form['nome']
categoria = request.form['categoria']
console = request.form['console']
jogo = Jogo(nome, categoria, console)
lista.append(jogo)
return redirect(url_for('index'))
@app.route('/login')
def login():
proxima = request.args.get('proxima')
return render_template('login.html', proxima=proxima)
@app.route('/autenticar', methods=['POST',])
def autenticar():
if request.form['usuario'] in usuarios:
usuario = usuarios[request.form['usuario']]
if usuario.senha == request.form['senha']:
session['usuario_logado'] = usuario.id
flash(usuario.nome + ' logou com sucesso!')
proxima_pagina = request.form['proxima']
return redirect(proxima_pagina)
else :
flash('Não logado, tente de novo!')
return redirect(url_for('login'))
@app.route('/logout')
def logout():
session['usuario_logado'] = None
flash('Nenhum usuario logado!')
return redirect(url_for('index'))
app.run(debug=True)
| 29.775 | 85 | 0.612091 | from flask import Flask, render_template, request, redirect, session, flash, url_for
app = Flask(__name__)
app.secret_key = 'alura'
class Jogo:
def __init__(self, nome, categoria, console):
self.nome = nome
self.categoria = categoria
self.console = console
class Usuario:
def __init__(self, id, nome, senha):
self.id = id
self.nome = nome
self.senha = senha
usuario1 = Usuario('luan', 'Luan Marques', '1234')
usuario2 = Usuario('nico', 'Nico Steppat', '7a1')
usuario3 = Usuario('flavio', 'Flávio', 'javascript')
usuarios = { usuario1.id: usuario1,
usuario2.id: usuario2,
usuario3.id: usuario3 }
jogo1 = Jogo('Super Mario', 'Acao', 'SNES')
jogo2 = Jogo('Pokemon Gold', 'RPG', 'GBA')
jogo3 = Jogo('Mortal Kombat', 'Luta', 'SNES')
lista = [jogo1, jogo2, jogo3]
@app.route('/')
def index():
return render_template('lista.html', titulo='Jogos',
jogos=lista)
@app.route('/novo')
def novo():
if 'usuario_logado' not in session or session['usuario_logado'] == None:
return redirect(url_for('login', proxima=url_for('novo')))
return render_template('novo.html', titulo='Novo jogo')
@app.route('/criar', methods=['POST',])
def criar():
nome = request.form['nome']
categoria = request.form['categoria']
console = request.form['console']
jogo = Jogo(nome, categoria, console)
lista.append(jogo)
return redirect(url_for('index'))
@app.route('/login')
def login():
proxima = request.args.get('proxima')
return render_template('login.html', proxima=proxima)
@app.route('/autenticar', methods=['POST',])
def autenticar():
if request.form['usuario'] in usuarios:
usuario = usuarios[request.form['usuario']]
if usuario.senha == request.form['senha']:
session['usuario_logado'] = usuario.id
flash(usuario.nome + ' logou com sucesso!')
proxima_pagina = request.form['proxima']
return redirect(proxima_pagina)
else :
flash('Não logado, tente de novo!')
return redirect(url_for('login'))
@app.route('/logout')
def logout():
session['usuario_logado'] = None
flash('Nenhum usuario logado!')
return redirect(url_for('index'))
app.run(debug=True)
| true | true |
f731fc41c567abb5f04fc347849d441eeca55453 | 11,732 | py | Python | deepmap_cli/cli.py | yim-deepmap/cli | bf5fd3afe4d94c70f0b37111be2f749572b53ec7 | [
"Apache-2.0"
] | null | null | null | deepmap_cli/cli.py | yim-deepmap/cli | bf5fd3afe4d94c70f0b37111be2f749572b53ec7 | [
"Apache-2.0"
] | null | null | null | deepmap_cli/cli.py | yim-deepmap/cli | bf5fd3afe4d94c70f0b37111be2f749572b53ec7 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
""" A command line interface for the Deepmap API. """
import argparse
import sys
import os
from deepmap_cli.constants import USER_CONFIG_PATH
from deepmap_cli.cli_requests import make_request
def init_cli():
""" Initializes the CLI. """
parser = argparse.ArgumentParser(
prog='deepmap',
formatter_class=argparse.RawDescriptionHelpFormatter,
description="Possible commands are:"
"\n"
" login Receives and stores an authentication token for the api.\n"
" reset_password Reset a password for an account.\n"
" create Create a new access token, or session token from an access token.\n"
" download Downloads the specified files and pipes output to stdout.\n"
" list List valid users, maps, tokens, or tiles.\n"
" invite Invite a user to join your account.\n"
" get user Get a description of your account.\n"
" edit user Edit the email or admin permissions of a user.\n"
" delete Delete a user or token from your account.\n"
"\n"
"Use the -h flag for help information.\n"
"For example, for general help, run \"deepmap -h\"\n"
"For help on using a command, run: \"deepmap <command> -h\", replacing <command>\n"
"with the specific command e.g. \"deepmap login -h\" for login command help.\n"
"\n"
"Also, prefix abbreviations are allowed for parameter names,\n"
"as long as the abbreviation is unique e.g. --u or --user or --usern for\n"
"--username in the login command.\n"
"\n")
subparsers = parser.add_subparsers(dest='command')
init_login_parser(subparsers)
init_reset_password_parser(subparsers)
init_create_parser(subparsers)
init_download_parser(subparsers)
init_list_parser(subparsers)
init_invite_parser(subparsers)
init_get_parser(subparsers)
init_edit_parser(subparsers)
init_delete_parser(subparsers)
args = parser.parse_args(sys.argv[1:])
url_passed_in = False
# Cast args to namespace for membership testing
if 'server_url' in vars(args).keys():
# Check if args.server_url is not None
if args.server_url:
server_url = args.server_url
url_passed_in = True
if not url_passed_in:
# Retrieve url if a previous url is stored.
if os.path.isfile(USER_CONFIG_PATH):
with open(USER_CONFIG_PATH, mode='r') as config_file:
server_url = config_file.readline()
# Default url.
else:
server_url = 'https://api.deepmap.com'
# Call the correct command if valid
if args.command:
make_request(args, server_url)
else:
parser.print_help()
def init_login_parser(subparsers):
""" Sets up login parser args.
Args:
subparsers: subparsers object for the main parser.
"""
login_parser = subparsers.add_parser(
'login',
description=
'Login to receive an authorization token using an API access token.')
login_parser.add_argument(
'--server_url',
help="The base url of the api server requested. "
"Will persist if not reset with a new --server_url.")
login_parser.add_argument('token', help='An API access token.')
def init_reset_password_parser(subparsers):
""" Sets up password reset args.
Args:
subparsers: subparsers object for the main parser.
"""
reset_password_parser = subparsers.add_parser(
'reset_password', description='Trigger a password reset.')
reset_password_parser.add_argument(
'email', help='The email of the account to reset password.')
def init_create_parser(subparsers):
""" Sets up create args.
Args:
subparsers: subparsers object for the main parser.
"""
create_parser = subparsers.add_parser(
'create', description='Create an access token or session token.')
create_subparser = create_parser.add_subparsers(dest='create_target')
# Create an access token.
create_token_parser = create_subparser.add_parser(
'token', description='Create an access token.')
create_token_subparsers = create_token_parser.add_subparsers(
dest='create_token_target')
# Create a vehicle access token
create_vehicle_token_parser = create_token_subparsers.add_parser(
'vehicle', description='Create a vehicle access token.')
create_vehicle_token_parser.add_argument(
'vehicle_id', help='User-provided id for the vehicle.')
create_vehicle_token_parser.add_argument(
'description', help='User-provided description for the vehicle.')
# Create an API access token
create_api_token_parser = create_token_subparsers.add_parser(
'api', description='Create an API access token.')
create_api_token_parser.add_argument(
'description', help='User-provided description for the token user.')
# Create a session token.
create_session_parser = create_subparser.add_parser(
'session', description='Create a session token.')
create_session_subparsers = create_session_parser.add_subparsers(
dest='create_session_target')
# Create a vehicle session token
create_vehicle_session_parser = create_session_subparsers.add_parser(
'vehicle', description='Create a vehicle session token.')
create_vehicle_session_parser.add_argument(
'token', help='A valid vehicle access token.')
# Create an API session token
create_api_session_parser = create_session_subparsers.add_parser(
'api', description='Create an API session token.')
create_api_session_parser.add_argument('token',
help='A valid API access token.')
def init_download_parser(subparsers):
""" Sets up download parser args.
Args:
subparsers: subparsers object for the main parser.
"""
download_parser = subparsers.add_parser('download',
description='Download data.')
download_subparsers = download_parser.add_subparsers(
dest='download_target')
# Features tile is target of download.
download_feature_tile_parser = download_subparsers.add_parser(
'feature_tile', help='Download a feature tile of a map.')
download_feature_tile_parser.add_argument(
'id', help='The id of the feature_tile to download')
# Map distribution is target of download.
download_distribution_parser = download_subparsers.add_parser(
'distribution', help='Download a map distribution.')
download_distribution_parser.add_argument(
'id', help='The id of the map distribution to download')
download_distribution_parser.add_argument(
'--format',
help=
'Format of the distribution to download. Required if multiple formats are available.'
)
download_distribution_parser.add_argument(
'--version',
help=
'Optional: Version of the map to download. Otherwise latest version is downloaded.'
)
def init_invite_parser(subparsers):
""" Sets up invite parser args.
Args:
subparsers: subparsers object for the main parser.
"""
invite_parser = subparsers.add_parser(
'invite', description='Invite a user to join your account.')
invite_parser.add_argument('email',
help='The email of the user to invite.')
invite_parser.add_argument(
'--admin',
help='Optional: True if the user should be an admin.',
choices=['True', 'False'])
def init_list_parser(subparsers):
""" Sets up list parser args.
Args:
subparsers: subparsers object for the main parser.
"""
list_parser = subparsers.add_parser('list',
description='List the target objects.')
list_subparsers = list_parser.add_subparsers(dest='list_target')
# Maps are targets of list.
list_subparsers.add_parser('maps', description='List maps.')
# Feature tiles are targets of list.
list_feature_tiles_parser = list_subparsers.add_parser(
'feature_tiles', description='List feature tiles for a map.')
list_feature_tiles_parser.add_argument('id', help='Id of the map.')
# Users are targets of list.
list_subparsers.add_parser('users', description='List users.')
# Tokens are targets of list.
list_tokens_parser = list_subparsers.add_parser('tokens',
description='List tokens.')
list_tokens_subparsers = list_tokens_parser.add_subparsers(
dest='list_tokens_target')
# API token is target of list.
list_tokens_subparsers.add_parser(
'api', description='List issued API access tokens.')
# Vehicle token is target of list.
list_tokens_subparsers.add_parser(
'vehicle', description='List issued vehicle access tokens.')
def init_get_parser(subparsers):
""" Sets up get parser args.
Args:
subparsers: subparsers object for the main parser.
"""
get_parser = subparsers.add_parser(
'get', description='Get information about an object.')
get_subparsers = get_parser.add_subparsers(dest='get_target')
# A user is target of get.
get_user_parser = get_subparsers.add_parser(
'user', description='Get user information.')
get_user_parser.add_argument('id', help='The id of the user.')
def init_delete_parser(subparsers):
""" Sets up delete parser args.
Args:
subparsers: subparsers object for the main parser.
"""
delete_parser = subparsers.add_parser('delete',
description='Delete something.')
delete_subparsers = delete_parser.add_subparsers(dest='del_target')
# A user is target of delete.
delete_user_parser = delete_subparsers.add_parser(
'user', description='Delete a user.')
delete_user_parser.add_argument('id', help='The id of the user.')
# A token is target of delete.
delete_token_parser = delete_subparsers.add_parser(
'token', description='Delete a token.')
delete_token_subparsers = delete_token_parser.add_subparsers(
dest='del_token_target')
# API token is target of delete.
delete_api_token_parser = delete_token_subparsers.add_parser(
'api', description='Delete an issued API access token.')
delete_api_token_parser.add_argument('id', help='The id of the API token.')
# Vehicle token is target of delete.
delete_vehicle_token_parser = delete_token_subparsers.add_parser(
'vehicle', description='Delete an issued vehicle access token.')
delete_vehicle_token_parser.add_argument(
'id', help='The id of the vehicle token.')
def init_edit_parser(subparsers):
""" Sets up edit parser args.
Args:
subparsers: subparsers object for the main parser.
"""
edit_parser = subparsers.add_parser('edit', description='Edit something.')
edit_subparsers = edit_parser.add_subparsers(dest='edit_target')
# A user is target of edit.
edit_user_parser = edit_subparsers.add_parser(
'user', description='Edit a user\'s information.')
edit_user_parser.add_argument('id', help='The target user to edit.')
edit_user_parser.add_argument('--email',
help='Optional: The user\'s new email.')
edit_user_parser.add_argument(
'--admin',
help='Optional: True or False, if the user is to be an admin.',
choices=['True', 'False'])
if __name__ == '__main__':
init_cli()
| 36.434783 | 96 | 0.670389 |
import argparse
import sys
import os
from deepmap_cli.constants import USER_CONFIG_PATH
from deepmap_cli.cli_requests import make_request
def init_cli():
parser = argparse.ArgumentParser(
prog='deepmap',
formatter_class=argparse.RawDescriptionHelpFormatter,
description="Possible commands are:"
"\n"
" login Receives and stores an authentication token for the api.\n"
" reset_password Reset a password for an account.\n"
" create Create a new access token, or session token from an access token.\n"
" download Downloads the specified files and pipes output to stdout.\n"
" list List valid users, maps, tokens, or tiles.\n"
" invite Invite a user to join your account.\n"
" get user Get a description of your account.\n"
" edit user Edit the email or admin permissions of a user.\n"
" delete Delete a user or token from your account.\n"
"\n"
"Use the -h flag for help information.\n"
"For example, for general help, run \"deepmap -h\"\n"
"For help on using a command, run: \"deepmap <command> -h\", replacing <command>\n"
"with the specific command e.g. \"deepmap login -h\" for login command help.\n"
"\n"
"Also, prefix abbreviations are allowed for parameter names,\n"
"as long as the abbreviation is unique e.g. --u or --user or --usern for\n"
"--username in the login command.\n"
"\n")
subparsers = parser.add_subparsers(dest='command')
init_login_parser(subparsers)
init_reset_password_parser(subparsers)
init_create_parser(subparsers)
init_download_parser(subparsers)
init_list_parser(subparsers)
init_invite_parser(subparsers)
init_get_parser(subparsers)
init_edit_parser(subparsers)
init_delete_parser(subparsers)
args = parser.parse_args(sys.argv[1:])
url_passed_in = False
if 'server_url' in vars(args).keys():
if args.server_url:
server_url = args.server_url
url_passed_in = True
if not url_passed_in:
if os.path.isfile(USER_CONFIG_PATH):
with open(USER_CONFIG_PATH, mode='r') as config_file:
server_url = config_file.readline()
else:
server_url = 'https://api.deepmap.com'
if args.command:
make_request(args, server_url)
else:
parser.print_help()
def init_login_parser(subparsers):
login_parser = subparsers.add_parser(
'login',
description=
'Login to receive an authorization token using an API access token.')
login_parser.add_argument(
'--server_url',
help="The base url of the api server requested. "
"Will persist if not reset with a new --server_url.")
login_parser.add_argument('token', help='An API access token.')
def init_reset_password_parser(subparsers):
reset_password_parser = subparsers.add_parser(
'reset_password', description='Trigger a password reset.')
reset_password_parser.add_argument(
'email', help='The email of the account to reset password.')
def init_create_parser(subparsers):
create_parser = subparsers.add_parser(
'create', description='Create an access token or session token.')
create_subparser = create_parser.add_subparsers(dest='create_target')
create_token_parser = create_subparser.add_parser(
'token', description='Create an access token.')
create_token_subparsers = create_token_parser.add_subparsers(
dest='create_token_target')
create_vehicle_token_parser = create_token_subparsers.add_parser(
'vehicle', description='Create a vehicle access token.')
create_vehicle_token_parser.add_argument(
'vehicle_id', help='User-provided id for the vehicle.')
create_vehicle_token_parser.add_argument(
'description', help='User-provided description for the vehicle.')
create_api_token_parser = create_token_subparsers.add_parser(
'api', description='Create an API access token.')
create_api_token_parser.add_argument(
'description', help='User-provided description for the token user.')
create_session_parser = create_subparser.add_parser(
'session', description='Create a session token.')
create_session_subparsers = create_session_parser.add_subparsers(
dest='create_session_target')
create_vehicle_session_parser = create_session_subparsers.add_parser(
'vehicle', description='Create a vehicle session token.')
create_vehicle_session_parser.add_argument(
'token', help='A valid vehicle access token.')
create_api_session_parser = create_session_subparsers.add_parser(
'api', description='Create an API session token.')
create_api_session_parser.add_argument('token',
help='A valid API access token.')
def init_download_parser(subparsers):
download_parser = subparsers.add_parser('download',
description='Download data.')
download_subparsers = download_parser.add_subparsers(
dest='download_target')
download_feature_tile_parser = download_subparsers.add_parser(
'feature_tile', help='Download a feature tile of a map.')
download_feature_tile_parser.add_argument(
'id', help='The id of the feature_tile to download')
download_distribution_parser = download_subparsers.add_parser(
'distribution', help='Download a map distribution.')
download_distribution_parser.add_argument(
'id', help='The id of the map distribution to download')
download_distribution_parser.add_argument(
'--format',
help=
'Format of the distribution to download. Required if multiple formats are available.'
)
download_distribution_parser.add_argument(
'--version',
help=
'Optional: Version of the map to download. Otherwise latest version is downloaded.'
)
def init_invite_parser(subparsers):
invite_parser = subparsers.add_parser(
'invite', description='Invite a user to join your account.')
invite_parser.add_argument('email',
help='The email of the user to invite.')
invite_parser.add_argument(
'--admin',
help='Optional: True if the user should be an admin.',
choices=['True', 'False'])
def init_list_parser(subparsers):
list_parser = subparsers.add_parser('list',
description='List the target objects.')
list_subparsers = list_parser.add_subparsers(dest='list_target')
list_subparsers.add_parser('maps', description='List maps.')
list_feature_tiles_parser = list_subparsers.add_parser(
'feature_tiles', description='List feature tiles for a map.')
list_feature_tiles_parser.add_argument('id', help='Id of the map.')
list_subparsers.add_parser('users', description='List users.')
list_tokens_parser = list_subparsers.add_parser('tokens',
description='List tokens.')
list_tokens_subparsers = list_tokens_parser.add_subparsers(
dest='list_tokens_target')
list_tokens_subparsers.add_parser(
'api', description='List issued API access tokens.')
list_tokens_subparsers.add_parser(
'vehicle', description='List issued vehicle access tokens.')
def init_get_parser(subparsers):
get_parser = subparsers.add_parser(
'get', description='Get information about an object.')
get_subparsers = get_parser.add_subparsers(dest='get_target')
get_user_parser = get_subparsers.add_parser(
'user', description='Get user information.')
get_user_parser.add_argument('id', help='The id of the user.')
def init_delete_parser(subparsers):
delete_parser = subparsers.add_parser('delete',
description='Delete something.')
delete_subparsers = delete_parser.add_subparsers(dest='del_target')
delete_user_parser = delete_subparsers.add_parser(
'user', description='Delete a user.')
delete_user_parser.add_argument('id', help='The id of the user.')
delete_token_parser = delete_subparsers.add_parser(
'token', description='Delete a token.')
delete_token_subparsers = delete_token_parser.add_subparsers(
dest='del_token_target')
delete_api_token_parser = delete_token_subparsers.add_parser(
'api', description='Delete an issued API access token.')
delete_api_token_parser.add_argument('id', help='The id of the API token.')
delete_vehicle_token_parser = delete_token_subparsers.add_parser(
'vehicle', description='Delete an issued vehicle access token.')
delete_vehicle_token_parser.add_argument(
'id', help='The id of the vehicle token.')
def init_edit_parser(subparsers):
edit_parser = subparsers.add_parser('edit', description='Edit something.')
edit_subparsers = edit_parser.add_subparsers(dest='edit_target')
edit_user_parser = edit_subparsers.add_parser(
'user', description='Edit a user\'s information.')
edit_user_parser.add_argument('id', help='The target user to edit.')
edit_user_parser.add_argument('--email',
help='Optional: The user\'s new email.')
edit_user_parser.add_argument(
'--admin',
help='Optional: True or False, if the user is to be an admin.',
choices=['True', 'False'])
if __name__ == '__main__':
init_cli()
| true | true |
f731fcf55c0b961d14a2de8a323e7d7f817e7911 | 657 | py | Python | from_python_community/get_century.py | ZaytsevNS/python_practice | 109e14923a2ddeacc5360fd72947275afd2159e3 | [
"MIT"
] | null | null | null | from_python_community/get_century.py | ZaytsevNS/python_practice | 109e14923a2ddeacc5360fd72947275afd2159e3 | [
"MIT"
] | null | null | null | from_python_community/get_century.py | ZaytsevNS/python_practice | 109e14923a2ddeacc5360fd72947275afd2159e3 | [
"MIT"
] | null | null | null | # Условие:
# Написать простую функцию, которая будет возвращать век, на основе года.
# Пример:
# get_century(2021) -> 21
# get_century(1999) -> 20
# get_century(2000) -> 20
# get_century(101) -> 2
import unittest
def get_century(n: int) -> int:
a, b = divmod(n, 100)
return a + 1 if b > 0 else a
class TestGetCentury(unittest.TestCase):
def test_one(self):
""" Should return century """
self.assertEqual(21, get_century(2021))
self.assertEqual(20, get_century(1999))
self.assertEqual(20, get_century(2000))
self.assertEqual(2, get_century(101))
if __name__ == '__main__':
unittest.main()
| 22.655172 | 73 | 0.649924 |
import unittest
def get_century(n: int) -> int:
a, b = divmod(n, 100)
return a + 1 if b > 0 else a
class TestGetCentury(unittest.TestCase):
def test_one(self):
self.assertEqual(21, get_century(2021))
self.assertEqual(20, get_century(1999))
self.assertEqual(20, get_century(2000))
self.assertEqual(2, get_century(101))
if __name__ == '__main__':
unittest.main()
| true | true |
f731fe0518ecc272120697d6ea77fda740bb2ada | 2,565 | py | Python | tests/test_app.py | mogul/github-issue-lifecycle | c31a753b904799c57a7468bf590a280e8be3bb6f | [
"CC0-1.0"
] | 1 | 2017-06-08T11:37:21.000Z | 2017-06-08T11:37:21.000Z | tests/test_app.py | mogul/github-issue-lifecycle | c31a753b904799c57a7468bf590a280e8be3bb6f | [
"CC0-1.0"
] | 2 | 2016-10-20T20:39:17.000Z | 2016-10-20T20:45:50.000Z | tests/test_app.py | mogul/github-issue-lifecycle | c31a753b904799c57a7468bf590a280e8be3bb6f | [
"CC0-1.0"
] | 3 | 2016-10-20T20:32:06.000Z | 2021-02-15T10:00:02.000Z | import unittest
from unittest import mock
import requests
from flask.ext.testing import TestCase
from app import db, models
from app.app import app
from config import config
from .mock_github import requests_get_stub
app.config.from_object(config['testing'])
class AppTestCase(TestCase):
def create_app(self):
app.config.from_object(config['testing'])
return app
def setUp(self):
requests.get = mock.MagicMock(side_effect=requests_get_stub)
db.init_app(app)
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
def test_repo_retrieved(self):
url = '/api/real/repo/'
resp = self.client.get(url)
assert resp.json['owner'] == 'real'
assert resp.json['name'] == 'repo'
assert len(resp.json['issues']) == 2
def test_repo_includes_spans(self):
resp = self.client.get('/api/real/repo/')
assert 'spans' in resp.json['issues'][0]
assert 'milestones' in resp.json['issues'][0]['spans'][0]
def test_repo_persisted(self):
owner = '18f'
name = 'fictionalrepo1'
assert not models.Repo.query.filter_by(owner=owner, name=name).first()
resp = self.client.get('/api/{}/{}/'.format(owner, name))
repo = models.Repo.query.filter_by(owner=owner, name=name).first()
assert repo
assert len(repo.issues) == 2
def test_cached_data_used(self):
owner = '18f'
name = 'fictionalrepo2'
resp = self.client.get('/api/{}/{}/'.format(owner, name))
calls_before = requests.get.call_count
resp = self.client.get('/api/{}/{}/?data_age=3600'.format(owner, name))
assert requests.get.call_count == calls_before
def test_cached_data_not_used(self):
owner = '18f'
name = 'fictionalrepo2'
resp = self.client.get('/api/{}/{}/'.format(owner, name))
calls_before = requests.get.call_count
resp = self.client.get('/api/{}/{}/?data_age=0'.format(owner, name))
assert requests.get.call_count > calls_before
def test_nonexistent_repo(self):
resp = self.client.get('/api/doesnot/exist/')
assert resp.status_code == 404
def test_chart_served(self):
owner = '18f'
name = 'fictionalrepo2'
resp = self.client.get('/{}/{}/'.format(owner, name))
assert resp.status_code == 200
assert 'text/html; charset=utf-8' in resp.headers.values()
assert b'Bokeh' in resp.data
if __name__ == '__main__':
unittest.main()
| 31.666667 | 79 | 0.62768 | import unittest
from unittest import mock
import requests
from flask.ext.testing import TestCase
from app import db, models
from app.app import app
from config import config
from .mock_github import requests_get_stub
app.config.from_object(config['testing'])
class AppTestCase(TestCase):
def create_app(self):
app.config.from_object(config['testing'])
return app
def setUp(self):
requests.get = mock.MagicMock(side_effect=requests_get_stub)
db.init_app(app)
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
def test_repo_retrieved(self):
url = '/api/real/repo/'
resp = self.client.get(url)
assert resp.json['owner'] == 'real'
assert resp.json['name'] == 'repo'
assert len(resp.json['issues']) == 2
def test_repo_includes_spans(self):
resp = self.client.get('/api/real/repo/')
assert 'spans' in resp.json['issues'][0]
assert 'milestones' in resp.json['issues'][0]['spans'][0]
def test_repo_persisted(self):
owner = '18f'
name = 'fictionalrepo1'
assert not models.Repo.query.filter_by(owner=owner, name=name).first()
resp = self.client.get('/api/{}/{}/'.format(owner, name))
repo = models.Repo.query.filter_by(owner=owner, name=name).first()
assert repo
assert len(repo.issues) == 2
def test_cached_data_used(self):
owner = '18f'
name = 'fictionalrepo2'
resp = self.client.get('/api/{}/{}/'.format(owner, name))
calls_before = requests.get.call_count
resp = self.client.get('/api/{}/{}/?data_age=3600'.format(owner, name))
assert requests.get.call_count == calls_before
def test_cached_data_not_used(self):
owner = '18f'
name = 'fictionalrepo2'
resp = self.client.get('/api/{}/{}/'.format(owner, name))
calls_before = requests.get.call_count
resp = self.client.get('/api/{}/{}/?data_age=0'.format(owner, name))
assert requests.get.call_count > calls_before
def test_nonexistent_repo(self):
resp = self.client.get('/api/doesnot/exist/')
assert resp.status_code == 404
def test_chart_served(self):
owner = '18f'
name = 'fictionalrepo2'
resp = self.client.get('/{}/{}/'.format(owner, name))
assert resp.status_code == 200
assert 'text/html; charset=utf-8' in resp.headers.values()
assert b'Bokeh' in resp.data
if __name__ == '__main__':
unittest.main()
| true | true |
f731ff23a6b491175ed0b509bddbf9dfdcfb99b4 | 12,069 | py | Python | yolov3_tiny_deer_detection/evaluate_mAP.py | Pradeep-Gopal/yolo_deer_people_final_project | 2337e8cbb88f467a6d19ab9cdb14abbf2ba04bc2 | [
"MIT"
] | null | null | null | yolov3_tiny_deer_detection/evaluate_mAP.py | Pradeep-Gopal/yolo_deer_people_final_project | 2337e8cbb88f467a6d19ab9cdb14abbf2ba04bc2 | [
"MIT"
] | null | null | null | yolov3_tiny_deer_detection/evaluate_mAP.py | Pradeep-Gopal/yolo_deer_people_final_project | 2337e8cbb88f467a6d19ab9cdb14abbf2ba04bc2 | [
"MIT"
] | null | null | null |
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import cv2
import numpy as np
import tensorflow as tf
from tensorflow.python.saved_model import tag_constants
from yolov3.dataset import Dataset
from yolov3.yolov4 import Create_Yolo
from yolov3.utils import load_yolo_weights, detect_image, image_preprocess, postprocess_boxes, nms, read_class_names
from yolov3.configs import *
import shutil
import json
import time
gpus = tf.config.experimental.list_physical_devices('GPU')
if len(gpus) > 0:
try: tf.config.experimental.set_memory_growth(gpus[0], True)
except RuntimeError: print("RuntimeError in tf.config.experimental.list_physical_devices('GPU')")
def voc_ap(rec, prec):
"""
--- Official matlab code VOC2012---
mrec=[0 ; rec ; 1];
mpre=[0 ; prec ; 0];
for i=numel(mpre)-1:-1:1
mpre(i)=max(mpre(i),mpre(i+1));
end
i=find(mrec(2:end)~=mrec(1:end-1))+1;
ap=sum((mrec(i)-mrec(i-1)).*mpre(i));
"""
rec.insert(0, 0.0) # insert 0.0 at begining of list
rec.append(1.0) # insert 1.0 at end of list
mrec = rec[:]
prec.insert(0, 0.0) # insert 0.0 at begining of list
prec.append(0.0) # insert 0.0 at end of list
mpre = prec[:]
"""
This part makes the precision monotonically decreasing
(goes from the end to the beginning)
matlab: for i=numel(mpre)-1:-1:1
mpre(i)=max(mpre(i),mpre(i+1));
"""
# matlab indexes start in 1 but python in 0, so I have to do:
# range(start=(len(mpre) - 2), end=0, step=-1)
# also the python function range excludes the end, resulting in:
# range(start=(len(mpre) - 2), end=-1, step=-1)
for i in range(len(mpre)-2, -1, -1):
mpre[i] = max(mpre[i], mpre[i+1])
"""
This part creates a list of indexes where the recall changes
matlab: i=find(mrec(2:end)~=mrec(1:end-1))+1;
"""
i_list = []
for i in range(1, len(mrec)):
if mrec[i] != mrec[i-1]:
i_list.append(i) # if it was matlab would be i + 1
"""
The Average Precision (AP) is the area under the curve
(numerical integration)
matlab: ap=sum((mrec(i)-mrec(i-1)).*mpre(i));
"""
ap = 0.0
for i in i_list:
ap += ((mrec[i]-mrec[i-1])*mpre[i])
return ap, mrec, mpre
def get_mAP(Yolo, dataset, score_threshold=0.25, iou_threshold=0.50, TEST_INPUT_SIZE=TEST_INPUT_SIZE):
MINOVERLAP = 0.5 # default value (defined in the PASCAL VOC2012 challenge)
NUM_CLASS = read_class_names(TRAIN_CLASSES)
ground_truth_dir_path = 'mAP/ground-truth'
if os.path.exists(ground_truth_dir_path): shutil.rmtree(ground_truth_dir_path)
if not os.path.exists('mAP'): os.mkdir('mAP')
os.mkdir(ground_truth_dir_path)
print(f'\ncalculating mAP{int(iou_threshold*100)}...\n')
gt_counter_per_class = {}
for index in range(dataset.num_samples):
ann_dataset = dataset.annotations[index]
original_image, bbox_data_gt = dataset.parse_annotation(ann_dataset, True)
if len(bbox_data_gt) == 0:
bboxes_gt = []
classes_gt = []
else:
bboxes_gt, classes_gt = bbox_data_gt[:, :4], bbox_data_gt[:, 4]
ground_truth_path = os.path.join(ground_truth_dir_path, str(index) + '.txt')
num_bbox_gt = len(bboxes_gt)
bounding_boxes = []
for i in range(num_bbox_gt):
class_name = NUM_CLASS[classes_gt[i]]
xmin, ymin, xmax, ymax = list(map(str, bboxes_gt[i]))
bbox = xmin + " " + ymin + " " + xmax + " " +ymax
bounding_boxes.append({"class_name":class_name, "bbox":bbox, "used":False})
# count that object
if class_name in gt_counter_per_class:
gt_counter_per_class[class_name] += 1
else:
# if class didn't exist yet
gt_counter_per_class[class_name] = 1
bbox_mess = ' '.join([class_name, xmin, ymin, xmax, ymax]) + '\n'
with open(f'{ground_truth_dir_path}/{str(index)}_ground_truth.json', 'w') as outfile:
json.dump(bounding_boxes, outfile)
gt_classes = list(gt_counter_per_class.keys())
# sort the classes alphabetically
gt_classes = sorted(gt_classes)
n_classes = len(gt_classes)
times = []
json_pred = [[] for i in range(n_classes)]
for index in range(dataset.num_samples):
ann_dataset = dataset.annotations[index]
image_name = ann_dataset[0].split('/')[-1]
original_image, bbox_data_gt = dataset.parse_annotation(ann_dataset, True)
image = image_preprocess(np.copy(original_image), [TEST_INPUT_SIZE, TEST_INPUT_SIZE])
image_data = image[np.newaxis, ...].astype(np.float32)
t1 = time.time()
if YOLO_FRAMEWORK == "tf":
pred_bbox = Yolo.predict(image_data)
elif YOLO_FRAMEWORK == "trt":
batched_input = tf.constant(image_data)
result = Yolo(batched_input)
pred_bbox = []
for key, value in result.items():
value = value.numpy()
pred_bbox.append(value)
t2 = time.time()
times.append(t2-t1)
pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
pred_bbox = tf.concat(pred_bbox, axis=0)
bboxes = postprocess_boxes(pred_bbox, original_image, TEST_INPUT_SIZE, score_threshold)
bboxes = nms(bboxes, iou_threshold, method='nms')
for bbox in bboxes:
coor = np.array(bbox[:4], dtype=np.int32)
score = bbox[4]
class_ind = int(bbox[5])
class_name = NUM_CLASS[class_ind]
score = '%.4f' % score
xmin, ymin, xmax, ymax = list(map(str, coor))
bbox = xmin + " " + ymin + " " + xmax + " " +ymax
json_pred[gt_classes.index(class_name)].append({"confidence": str(score), "file_id": str(index), "bbox": str(bbox)})
ms = sum(times)/len(times)*1000
fps = 1000 / ms
for class_name in gt_classes:
json_pred[gt_classes.index(class_name)].sort(key=lambda x:float(x['confidence']), reverse=True)
with open(f'{ground_truth_dir_path}/{class_name}_predictions.json', 'w') as outfile:
json.dump(json_pred[gt_classes.index(class_name)], outfile)
# Calculate the AP for each class
sum_AP = 0.0
ap_dictionary = {}
# open file to store the results
with open("mAP/results.txt", 'w') as results_file:
results_file.write("# AP and precision/recall per class\n")
count_true_positives = {}
for class_index, class_name in enumerate(gt_classes):
count_true_positives[class_name] = 0
# Load predictions of that class
predictions_file = f'{ground_truth_dir_path}/{class_name}_predictions.json'
predictions_data = json.load(open(predictions_file))
# Assign predictions to ground truth objects
nd = len(predictions_data)
tp = [0] * nd # creates an array of zeros of size nd
fp = [0] * nd
for idx, prediction in enumerate(predictions_data):
file_id = prediction["file_id"]
# assign prediction to ground truth object if any
# open ground-truth with that file_id
gt_file = f'{ground_truth_dir_path}/{str(file_id)}_ground_truth.json'
ground_truth_data = json.load(open(gt_file))
ovmax = -1
gt_match = -1
# load prediction bounding-box
bb = [ float(x) for x in prediction["bbox"].split() ] # bounding box of prediction
for obj in ground_truth_data:
# look for a class_name match
if obj["class_name"] == class_name:
bbgt = [ float(x) for x in obj["bbox"].split() ] # bounding box of ground truth
bi = [max(bb[0],bbgt[0]), max(bb[1],bbgt[1]), min(bb[2],bbgt[2]), min(bb[3],bbgt[3])]
iw = bi[2] - bi[0] + 1
ih = bi[3] - bi[1] + 1
if iw > 0 and ih > 0:
# compute overlap (IoU) = area of intersection / area of union
ua = (bb[2] - bb[0] + 1) * (bb[3] - bb[1] + 1) + (bbgt[2] - bbgt[0]
+ 1) * (bbgt[3] - bbgt[1] + 1) - iw * ih
ov = iw * ih / ua
if ov > ovmax:
ovmax = ov
gt_match = obj
# assign prediction as true positive/don't care/false positive
if ovmax >= MINOVERLAP:# if ovmax > minimum overlap
if not bool(gt_match["used"]):
# true positive
tp[idx] = 1
gt_match["used"] = True
count_true_positives[class_name] += 1
# update the ".json" file
with open(gt_file, 'w') as f:
f.write(json.dumps(ground_truth_data))
else:
# false positive (multiple detection)
fp[idx] = 1
else:
# false positive
fp[idx] = 1
# compute precision/recall
cumsum = 0
for idx, val in enumerate(fp):
fp[idx] += cumsum
cumsum += val
cumsum = 0
for idx, val in enumerate(tp):
tp[idx] += cumsum
cumsum += val
#print(tp)
rec = tp[:]
for idx, val in enumerate(tp):
rec[idx] = float(tp[idx]) / gt_counter_per_class[class_name]
#print(rec)
prec = tp[:]
for idx, val in enumerate(tp):
prec[idx] = float(tp[idx]) / (fp[idx] + tp[idx])
#print(prec)
ap, mrec, mprec = voc_ap(rec, prec)
sum_AP += ap
text = "{0:.3f}%".format(ap*100) + " = " + class_name + " AP " #class_name + " AP = {0:.2f}%".format(ap*100)
rounded_prec = [ '%.3f' % elem for elem in prec ]
rounded_rec = [ '%.3f' % elem for elem in rec ]
# Write to results.txt
results_file.write(text + "\n Precision: " + str(rounded_prec) + "\n Recall :" + str(rounded_rec) + "\n\n")
print(text)
ap_dictionary[class_name] = ap
results_file.write("\n# mAP of all classes\n")
mAP = sum_AP / n_classes
text = "mAP = {:.3f}%, {:.2f} FPS".format(mAP*100, fps)
results_file.write(text + "\n")
print(text)
return mAP*100
if __name__ == '__main__':
if YOLO_FRAMEWORK == "tf": # TensorFlow detection
if YOLO_TYPE == "yolov4":
Darknet_weights = YOLO_V4_TINY_WEIGHTS if TRAIN_YOLO_TINY else YOLO_V4_WEIGHTS
if YOLO_TYPE == "yolov3":
Darknet_weights = YOLO_V3_TINY_WEIGHTS if TRAIN_YOLO_TINY else YOLO_V3_WEIGHTS
if YOLO_CUSTOM_WEIGHTS == False:
yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE, CLASSES=YOLO_COCO_CLASSES)
load_yolo_weights(yolo, Darknet_weights) # use Darknet weights
else:
yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE, CLASSES=TRAIN_CLASSES)
yolo.load_weights(f"./checkpoints/{TRAIN_MODEL_NAME}") # use custom weights
elif YOLO_FRAMEWORK == "trt": # TensorRT detection
saved_model_loaded = tf.saved_model.load(f"./checkpoints/{TRAIN_MODEL_NAME}", tags=[tag_constants.SERVING])
signature_keys = list(saved_model_loaded.signatures.keys())
yolo = saved_model_loaded.signatures['serving_default']
testset = Dataset('test', TEST_INPUT_SIZE=YOLO_INPUT_SIZE)
get_mAP(yolo, testset, score_threshold=0.05, iou_threshold=0.50, TEST_INPUT_SIZE=YOLO_INPUT_SIZE)
| 41.761246 | 128 | 0.567653 |
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import cv2
import numpy as np
import tensorflow as tf
from tensorflow.python.saved_model import tag_constants
from yolov3.dataset import Dataset
from yolov3.yolov4 import Create_Yolo
from yolov3.utils import load_yolo_weights, detect_image, image_preprocess, postprocess_boxes, nms, read_class_names
from yolov3.configs import *
import shutil
import json
import time
gpus = tf.config.experimental.list_physical_devices('GPU')
if len(gpus) > 0:
try: tf.config.experimental.set_memory_growth(gpus[0], True)
except RuntimeError: print("RuntimeError in tf.config.experimental.list_physical_devices('GPU')")
def voc_ap(rec, prec):
rec.insert(0, 0.0)
rec.append(1.0)
mrec = rec[:]
prec.insert(0, 0.0)
prec.append(0.0)
mpre = prec[:]
for i in range(len(mpre)-2, -1, -1):
mpre[i] = max(mpre[i], mpre[i+1])
i_list = []
for i in range(1, len(mrec)):
if mrec[i] != mrec[i-1]:
i_list.append(i)
ap = 0.0
for i in i_list:
ap += ((mrec[i]-mrec[i-1])*mpre[i])
return ap, mrec, mpre
def get_mAP(Yolo, dataset, score_threshold=0.25, iou_threshold=0.50, TEST_INPUT_SIZE=TEST_INPUT_SIZE):
MINOVERLAP = 0.5
NUM_CLASS = read_class_names(TRAIN_CLASSES)
ground_truth_dir_path = 'mAP/ground-truth'
if os.path.exists(ground_truth_dir_path): shutil.rmtree(ground_truth_dir_path)
if not os.path.exists('mAP'): os.mkdir('mAP')
os.mkdir(ground_truth_dir_path)
print(f'\ncalculating mAP{int(iou_threshold*100)}...\n')
gt_counter_per_class = {}
for index in range(dataset.num_samples):
ann_dataset = dataset.annotations[index]
original_image, bbox_data_gt = dataset.parse_annotation(ann_dataset, True)
if len(bbox_data_gt) == 0:
bboxes_gt = []
classes_gt = []
else:
bboxes_gt, classes_gt = bbox_data_gt[:, :4], bbox_data_gt[:, 4]
ground_truth_path = os.path.join(ground_truth_dir_path, str(index) + '.txt')
num_bbox_gt = len(bboxes_gt)
bounding_boxes = []
for i in range(num_bbox_gt):
class_name = NUM_CLASS[classes_gt[i]]
xmin, ymin, xmax, ymax = list(map(str, bboxes_gt[i]))
bbox = xmin + " " + ymin + " " + xmax + " " +ymax
bounding_boxes.append({"class_name":class_name, "bbox":bbox, "used":False})
if class_name in gt_counter_per_class:
gt_counter_per_class[class_name] += 1
else:
gt_counter_per_class[class_name] = 1
bbox_mess = ' '.join([class_name, xmin, ymin, xmax, ymax]) + '\n'
with open(f'{ground_truth_dir_path}/{str(index)}_ground_truth.json', 'w') as outfile:
json.dump(bounding_boxes, outfile)
gt_classes = list(gt_counter_per_class.keys())
# sort the classes alphabetically
gt_classes = sorted(gt_classes)
n_classes = len(gt_classes)
times = []
json_pred = [[] for i in range(n_classes)]
for index in range(dataset.num_samples):
ann_dataset = dataset.annotations[index]
image_name = ann_dataset[0].split('/')[-1]
original_image, bbox_data_gt = dataset.parse_annotation(ann_dataset, True)
image = image_preprocess(np.copy(original_image), [TEST_INPUT_SIZE, TEST_INPUT_SIZE])
image_data = image[np.newaxis, ...].astype(np.float32)
t1 = time.time()
if YOLO_FRAMEWORK == "tf":
pred_bbox = Yolo.predict(image_data)
elif YOLO_FRAMEWORK == "trt":
batched_input = tf.constant(image_data)
result = Yolo(batched_input)
pred_bbox = []
for key, value in result.items():
value = value.numpy()
pred_bbox.append(value)
t2 = time.time()
times.append(t2-t1)
pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
pred_bbox = tf.concat(pred_bbox, axis=0)
bboxes = postprocess_boxes(pred_bbox, original_image, TEST_INPUT_SIZE, score_threshold)
bboxes = nms(bboxes, iou_threshold, method='nms')
for bbox in bboxes:
coor = np.array(bbox[:4], dtype=np.int32)
score = bbox[4]
class_ind = int(bbox[5])
class_name = NUM_CLASS[class_ind]
score = '%.4f' % score
xmin, ymin, xmax, ymax = list(map(str, coor))
bbox = xmin + " " + ymin + " " + xmax + " " +ymax
json_pred[gt_classes.index(class_name)].append({"confidence": str(score), "file_id": str(index), "bbox": str(bbox)})
ms = sum(times)/len(times)*1000
fps = 1000 / ms
for class_name in gt_classes:
json_pred[gt_classes.index(class_name)].sort(key=lambda x:float(x['confidence']), reverse=True)
with open(f'{ground_truth_dir_path}/{class_name}_predictions.json', 'w') as outfile:
json.dump(json_pred[gt_classes.index(class_name)], outfile)
# Calculate the AP for each class
sum_AP = 0.0
ap_dictionary = {}
# open file to store the results
with open("mAP/results.txt", 'w') as results_file:
results_file.write("# AP and precision/recall per class\n")
count_true_positives = {}
for class_index, class_name in enumerate(gt_classes):
count_true_positives[class_name] = 0
# Load predictions of that class
predictions_file = f'{ground_truth_dir_path}/{class_name}_predictions.json'
predictions_data = json.load(open(predictions_file))
# Assign predictions to ground truth objects
nd = len(predictions_data)
tp = [0] * nd # creates an array of zeros of size nd
fp = [0] * nd
for idx, prediction in enumerate(predictions_data):
file_id = prediction["file_id"]
# assign prediction to ground truth object if any
# open ground-truth with that file_id
gt_file = f'{ground_truth_dir_path}/{str(file_id)}_ground_truth.json'
ground_truth_data = json.load(open(gt_file))
ovmax = -1
gt_match = -1
# load prediction bounding-box
bb = [ float(x) for x in prediction["bbox"].split() ] # bounding box of prediction
for obj in ground_truth_data:
# look for a class_name match
if obj["class_name"] == class_name:
bbgt = [ float(x) for x in obj["bbox"].split() ] # bounding box of ground truth
bi = [max(bb[0],bbgt[0]), max(bb[1],bbgt[1]), min(bb[2],bbgt[2]), min(bb[3],bbgt[3])]
iw = bi[2] - bi[0] + 1
ih = bi[3] - bi[1] + 1
if iw > 0 and ih > 0:
# compute overlap (IoU) = area of intersection / area of union
ua = (bb[2] - bb[0] + 1) * (bb[3] - bb[1] + 1) + (bbgt[2] - bbgt[0]
+ 1) * (bbgt[3] - bbgt[1] + 1) - iw * ih
ov = iw * ih / ua
if ov > ovmax:
ovmax = ov
gt_match = obj
# assign prediction as true positive/don't care/false positive
if ovmax >= MINOVERLAP:
if not bool(gt_match["used"]):
tp[idx] = 1
gt_match["used"] = True
count_true_positives[class_name] += 1
with open(gt_file, 'w') as f:
f.write(json.dumps(ground_truth_data))
else:
fp[idx] = 1
else:
fp[idx] = 1
cumsum = 0
for idx, val in enumerate(fp):
fp[idx] += cumsum
cumsum += val
cumsum = 0
for idx, val in enumerate(tp):
tp[idx] += cumsum
cumsum += val
rec = tp[:]
for idx, val in enumerate(tp):
rec[idx] = float(tp[idx]) / gt_counter_per_class[class_name]
prec = tp[:]
for idx, val in enumerate(tp):
prec[idx] = float(tp[idx]) / (fp[idx] + tp[idx])
ap, mrec, mprec = voc_ap(rec, prec)
sum_AP += ap
text = "{0:.3f}%".format(ap*100) + " = " + class_name + " AP "
rounded_prec = [ '%.3f' % elem for elem in prec ]
rounded_rec = [ '%.3f' % elem for elem in rec ]
results_file.write(text + "\n Precision: " + str(rounded_prec) + "\n Recall :" + str(rounded_rec) + "\n\n")
print(text)
ap_dictionary[class_name] = ap
results_file.write("\n# mAP of all classes\n")
mAP = sum_AP / n_classes
text = "mAP = {:.3f}%, {:.2f} FPS".format(mAP*100, fps)
results_file.write(text + "\n")
print(text)
return mAP*100
if __name__ == '__main__':
if YOLO_FRAMEWORK == "tf":
if YOLO_TYPE == "yolov4":
Darknet_weights = YOLO_V4_TINY_WEIGHTS if TRAIN_YOLO_TINY else YOLO_V4_WEIGHTS
if YOLO_TYPE == "yolov3":
Darknet_weights = YOLO_V3_TINY_WEIGHTS if TRAIN_YOLO_TINY else YOLO_V3_WEIGHTS
if YOLO_CUSTOM_WEIGHTS == False:
yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE, CLASSES=YOLO_COCO_CLASSES)
load_yolo_weights(yolo, Darknet_weights)
else:
yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE, CLASSES=TRAIN_CLASSES)
yolo.load_weights(f"./checkpoints/{TRAIN_MODEL_NAME}")
elif YOLO_FRAMEWORK == "trt":
saved_model_loaded = tf.saved_model.load(f"./checkpoints/{TRAIN_MODEL_NAME}", tags=[tag_constants.SERVING])
signature_keys = list(saved_model_loaded.signatures.keys())
yolo = saved_model_loaded.signatures['serving_default']
testset = Dataset('test', TEST_INPUT_SIZE=YOLO_INPUT_SIZE)
get_mAP(yolo, testset, score_threshold=0.05, iou_threshold=0.50, TEST_INPUT_SIZE=YOLO_INPUT_SIZE)
| true | true |
f731ff9471a2cbbe6e265123faf63ee9b93f92f6 | 33,241 | py | Python | frappe/database/database.py | rizkiheryandi/frappe | 1767d87dfd90be4f8b62e85af53f1ebc74dec370 | [
"MIT"
] | 1 | 2021-04-02T15:23:12.000Z | 2021-04-02T15:23:12.000Z | frappe/database/database.py | rizkiheryandi/frappe | 1767d87dfd90be4f8b62e85af53f1ebc74dec370 | [
"MIT"
] | null | null | null | frappe/database/database.py | rizkiheryandi/frappe | 1767d87dfd90be4f8b62e85af53f1ebc74dec370 | [
"MIT"
] | null | null | null | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
# Database Module
# --------------------
from __future__ import unicode_literals
import re
import time
import frappe
import datetime
import frappe.defaults
import frappe.model.meta
from frappe import _
from time import time
from frappe.utils import now, getdate, cast_fieldtype, get_datetime
from frappe.model.utils.link_count import flush_local_link_count
from frappe.utils import cint
# imports - compatibility imports
from six import (
integer_types,
string_types,
text_type,
iteritems
)
class Database(object):
"""
Open a database connection with the given parmeters, if use_default is True, use the
login details from `conf.py`. This is called by the request handler and is accessible using
the `db` global variable. the `sql` method is also global to run queries
"""
VARCHAR_LEN = 140
MAX_COLUMN_LENGTH = 64
OPTIONAL_COLUMNS = ["_user_tags", "_comments", "_assign", "_liked_by"]
DEFAULT_SHORTCUTS = ['_Login', '__user', '_Full Name', 'Today', '__today', "now", "Now"]
STANDARD_VARCHAR_COLUMNS = ('name', 'owner', 'modified_by', 'parent', 'parentfield', 'parenttype')
DEFAULT_COLUMNS = ['name', 'creation', 'modified', 'modified_by', 'owner', 'docstatus', 'parent',
'parentfield', 'parenttype', 'idx']
class InvalidColumnName(frappe.ValidationError): pass
def __init__(self, host=None, user=None, password=None, ac_name=None, use_default=0, port=None):
self.setup_type_map()
self.host = host or frappe.conf.db_host or '127.0.0.1'
self.port = port or frappe.conf.db_port or ''
self.user = user or frappe.conf.db_name
self.db_name = frappe.conf.db_name
self._conn = None
if ac_name:
self.user = ac_name or frappe.conf.db_name
if use_default:
self.user = frappe.conf.db_name
self.transaction_writes = 0
self.auto_commit_on_many_writes = 0
self.password = password or frappe.conf.db_password
self.value_cache = {}
def setup_type_map(self):
pass
def connect(self):
"""Connects to a database as set in `site_config.json`."""
self.cur_db_name = self.user
self._conn = self.get_connection()
self._cursor = self._conn.cursor()
frappe.local.rollback_observers = []
def use(self, db_name):
"""`USE` db_name."""
self._conn.select_db(db_name)
def get_connection(self):
pass
def get_database_size(self):
pass
def sql(self, query, values=(), as_dict = 0, as_list = 0, formatted = 0,
debug=0, ignore_ddl=0, as_utf8=0, auto_commit=0, update=None, explain=False):
"""Execute a SQL query and fetch all rows.
:param query: SQL query.
:param values: List / dict of values to be escaped and substituted in the query.
:param as_dict: Return as a dictionary.
:param as_list: Always return as a list.
:param formatted: Format values like date etc.
:param debug: Print query and `EXPLAIN` in debug log.
:param ignore_ddl: Catch exception if table, column missing.
:param as_utf8: Encode values as UTF 8.
:param auto_commit: Commit after executing the query.
:param update: Update this dict to all rows (if returned `as_dict`).
Examples:
# return customer names as dicts
frappe.db.sql("select name from tabCustomer", as_dict=True)
# return names beginning with a
frappe.db.sql("select name from tabCustomer where name like %s", "a%")
# values as dict
frappe.db.sql("select name from tabCustomer where name like %(name)s and owner=%(owner)s",
{"name": "a%", "owner":"test@example.com"})
"""
if re.search(r'ifnull\(', query, flags=re.IGNORECASE):
# replaces ifnull in query with coalesce
query = re.sub(r'ifnull\(', 'coalesce(', query, flags=re.IGNORECASE)
if not self._conn:
self.connect()
# in transaction validations
self.check_transaction_status(query)
self.clear_db_table_cache(query)
# autocommit
if auto_commit: self.commit()
# execute
try:
if debug:
time_start = time()
self.log_query(query, values, debug, explain)
if values!=():
if isinstance(values, dict):
values = dict(values)
# MySQL-python==1.2.5 hack!
if not isinstance(values, (dict, tuple, list)):
values = (values,)
self._cursor.execute(query, values)
if frappe.flags.in_migrate:
self.log_touched_tables(query, values)
else:
self._cursor.execute(query)
if frappe.flags.in_migrate:
self.log_touched_tables(query)
if debug:
time_end = time()
frappe.errprint(("Execution time: {0} sec").format(round(time_end - time_start, 2)))
except Exception as e:
if frappe.conf.db_type == 'postgres':
self.rollback()
elif self.is_syntax_error(e):
# only for mariadb
frappe.errprint('Syntax error in query:')
frappe.errprint(query)
if ignore_ddl and (self.is_missing_column(e) or self.is_missing_table(e) or self.cant_drop_field_or_key(e)):
pass
else:
raise
if auto_commit: self.commit()
if not self._cursor.description:
return ()
# scrub output if required
if as_dict:
ret = self.fetch_as_dict(formatted, as_utf8)
if update:
for r in ret:
r.update(update)
return ret
elif as_list:
return self.convert_to_lists(self._cursor.fetchall(), formatted, as_utf8)
elif as_utf8:
return self.convert_to_lists(self._cursor.fetchall(), formatted, as_utf8)
else:
return self._cursor.fetchall()
def log_query(self, query, values, debug, explain):
# for debugging in tests
if frappe.conf.get('allow_tests') and frappe.cache().get_value('flag_print_sql'):
print(self.mogrify(query, values))
# debug
if debug:
if explain and query.strip().lower().startswith('select'):
self.explain_query(query, values)
frappe.errprint(self.mogrify(query, values))
# info
if (frappe.conf.get("logging") or False)==2:
frappe.log("<<<< query")
frappe.log(self.mogrify(query, values))
frappe.log(">>>>")
def mogrify(self, query, values):
'''build the query string with values'''
if not values:
return query
else:
try:
return self._cursor.mogrify(query, values)
except: # noqa: E722
return (query, values)
def explain_query(self, query, values=None):
"""Print `EXPLAIN` in error log."""
try:
frappe.errprint("--- query explain ---")
if values is None:
self._cursor.execute("explain " + query)
else:
self._cursor.execute("explain " + query, values)
import json
frappe.errprint(json.dumps(self.fetch_as_dict(), indent=1))
frappe.errprint("--- query explain end ---")
except Exception:
frappe.errprint("error in query explain")
def sql_list(self, query, values=(), debug=False):
"""Return data as list of single elements (first column).
Example:
# doctypes = ["DocType", "DocField", "User", ...]
doctypes = frappe.db.sql_list("select name from DocType")
"""
return [r[0] for r in self.sql(query, values, debug=debug)]
def sql_ddl(self, query, values=(), debug=False):
"""Commit and execute a query. DDL (Data Definition Language) queries that alter schema
autocommit in MariaDB."""
self.commit()
self.sql(query, debug=debug)
def check_transaction_status(self, query):
"""Raises exception if more than 20,000 `INSERT`, `UPDATE` queries are
executed in one transaction. This is to ensure that writes are always flushed otherwise this
could cause the system to hang."""
if self.transaction_writes and \
query and query.strip().split()[0].lower() in ['start', 'alter', 'drop', 'create', "begin", "truncate"]:
raise Exception('This statement can cause implicit commit')
if query and query.strip().lower() in ('commit', 'rollback'):
self.transaction_writes = 0
if query[:6].lower() in ('update', 'insert', 'delete'):
self.transaction_writes += 1
if self.transaction_writes > 200000:
if self.auto_commit_on_many_writes:
self.commit()
else:
frappe.throw(_("Too many writes in one request. Please send smaller requests"), frappe.ValidationError)
def fetch_as_dict(self, formatted=0, as_utf8=0):
"""Internal. Converts results to dict."""
result = self._cursor.fetchall()
ret = []
if result:
keys = [column[0] for column in self._cursor.description]
for r in result:
values = []
for value in r:
if as_utf8 and isinstance(value, text_type):
value = value.encode('utf-8')
values.append(value)
ret.append(frappe._dict(zip(keys, values)))
return ret
@staticmethod
def clear_db_table_cache(query):
if query and query.strip().split()[0].lower() in {'drop', 'create'}:
frappe.cache().delete_key('db_tables')
@staticmethod
def needs_formatting(result, formatted):
"""Returns true if the first row in the result has a Date, Datetime, Long Int."""
if result and result[0]:
for v in result[0]:
if isinstance(v, (datetime.date, datetime.timedelta, datetime.datetime, integer_types)):
return True
if formatted and isinstance(v, (int, float)):
return True
return False
def get_description(self):
"""Returns result metadata."""
return self._cursor.description
@staticmethod
def convert_to_lists(res, formatted=0, as_utf8=0):
"""Convert tuple output to lists (internal)."""
nres = []
for r in res:
nr = []
for val in r:
if as_utf8 and isinstance(val, text_type):
val = val.encode('utf-8')
nr.append(val)
nres.append(nr)
return nres
def build_conditions(self, filters):
"""Convert filters sent as dict, lists to SQL conditions. filter's key
is passed by map function, build conditions like:
* ifnull(`fieldname`, default_value) = %(fieldname)s
* `fieldname` [=, !=, >, >=, <, <=] %(fieldname)s
"""
conditions = []
values = {}
def _build_condition(key):
"""
filter's key is passed by map function
build conditions like:
* ifnull(`fieldname`, default_value) = %(fieldname)s
* `fieldname` [=, !=, >, >=, <, <=] %(fieldname)s
"""
_operator = "="
_rhs = " %(" + key + ")s"
value = filters.get(key)
values[key] = value
if isinstance(value, (list, tuple)):
# value is a tuple like ("!=", 0)
_operator = value[0]
values[key] = value[1]
if isinstance(value[1], (tuple, list)):
# value is a list in tuple ("in", ("A", "B"))
_rhs = " ({0})".format(", ".join([self.escape(v) for v in value[1]]))
del values[key]
if _operator not in ["=", "!=", ">", ">=", "<", "<=", "like", "in", "not in", "not like"]:
_operator = "="
if "[" in key:
split_key = key.split("[")
condition = "coalesce(`" + split_key[0] + "`, " + split_key[1][:-1] + ") " \
+ _operator + _rhs
else:
condition = "`" + key + "` " + _operator + _rhs
conditions.append(condition)
if isinstance(filters, int):
# docname is a number, convert to string
filters = str(filters)
if isinstance(filters, string_types):
filters = { "name": filters }
for f in filters:
_build_condition(f)
return " and ".join(conditions), values
def get(self, doctype, filters=None, as_dict=True, cache=False):
"""Returns `get_value` with fieldname='*'"""
return self.get_value(doctype, filters, "*", as_dict=as_dict, cache=cache)
def get_value(self, doctype, filters=None, fieldname="name", ignore=None, as_dict=False,
debug=False, order_by=None, cache=False, for_update=False):
"""Returns a document property or list of properties.
:param doctype: DocType name.
:param filters: Filters like `{"x":"y"}` or name of the document. `None` if Single DocType.
:param fieldname: Column name.
:param ignore: Don't raise exception if table, column is missing.
:param as_dict: Return values as dict.
:param debug: Print query in error log.
:param order_by: Column to order by
Example:
# return first customer starting with a
frappe.db.get_value("Customer", {"name": ("like a%")})
# return last login of **User** `test@example.com`
frappe.db.get_value("User", "test@example.com", "last_login")
last_login, last_ip = frappe.db.get_value("User", "test@example.com",
["last_login", "last_ip"])
# returns default date_format
frappe.db.get_value("System Settings", None, "date_format")
"""
ret = self.get_values(doctype, filters, fieldname, ignore, as_dict, debug,
order_by, cache=cache, for_update=for_update)
return ((len(ret[0]) > 1 or as_dict) and ret[0] or ret[0][0]) if ret else None
def get_values(self, doctype, filters=None, fieldname="name", ignore=None, as_dict=False,
debug=False, order_by=None, update=None, cache=False, for_update=False):
"""Returns multiple document properties.
:param doctype: DocType name.
:param filters: Filters like `{"x":"y"}` or name of the document.
:param fieldname: Column name.
:param ignore: Don't raise exception if table, column is missing.
:param as_dict: Return values as dict.
:param debug: Print query in error log.
:param order_by: Column to order by
Example:
# return first customer starting with a
customers = frappe.db.get_values("Customer", {"name": ("like a%")})
# return last login of **User** `test@example.com`
user = frappe.db.get_values("User", "test@example.com", "*")[0]
"""
out = None
if cache and isinstance(filters, string_types) and \
(doctype, filters, fieldname) in self.value_cache:
return self.value_cache[(doctype, filters, fieldname)]
if not order_by: order_by = 'modified desc'
if isinstance(filters, list):
out = self._get_value_for_many_names(doctype, filters, fieldname, debug=debug)
else:
fields = fieldname
if fieldname!="*":
if isinstance(fieldname, string_types):
fields = [fieldname]
else:
fields = fieldname
if (filters is not None) and (filters!=doctype or doctype=="DocType"):
try:
out = self._get_values_from_table(fields, filters, doctype, as_dict, debug, order_by, update, for_update=for_update)
except Exception as e:
if ignore and (frappe.db.is_missing_column(e) or frappe.db.is_table_missing(e)):
# table or column not found, return None
out = None
elif (not ignore) and frappe.db.is_table_missing(e):
# table not found, look in singles
out = self.get_values_from_single(fields, filters, doctype, as_dict, debug, update)
if not out and frappe.get_meta(doctype).get('is_virtual'):
# check for virtual doctype
out = self.get_value_from_virtual_doctype(fields, filters, doctype, as_dict, debug, update)
else:
raise
else:
out = self.get_values_from_single(fields, filters, doctype, as_dict, debug, update)
if cache and isinstance(filters, string_types):
self.value_cache[(doctype, filters, fieldname)] = out
return out
def get_values_from_single(self, fields, filters, doctype, as_dict=False, debug=False, update=None):
"""Get values from `tabSingles` (Single DocTypes) (internal).
:param fields: List of fields,
:param filters: Filters (dict).
:param doctype: DocType name.
"""
# TODO
# if not frappe.model.meta.is_single(doctype):
# raise frappe.DoesNotExistError("DocType", doctype)
if fields=="*" or isinstance(filters, dict):
# check if single doc matches with filters
values = self.get_singles_dict(doctype)
if isinstance(filters, dict):
for key, value in filters.items():
if values.get(key) != value:
return []
if as_dict:
return values and [values] or []
if isinstance(fields, list):
return [map(values.get, fields)]
else:
r = self.sql("""select field, value
from `tabSingles` where field in (%s) and doctype=%s"""
% (', '.join(['%s'] * len(fields)), '%s'),
tuple(fields) + (doctype,), as_dict=False, debug=debug)
if as_dict:
if r:
r = frappe._dict(r)
if update:
r.update(update)
return [r]
else:
return []
else:
return r and [[i[1] for i in r]] or []
def get_value_from_virtual_doctype(self, fields, filters, doctype, as_dict=False, debug=False, update=None):
"""Return a single value from virtual doctype."""
return frappe.get_doc(doctype).get_value(fields, filters, as_dict=as_dict, debug=debug, update=update)
def get_singles_dict(self, doctype, debug = False):
"""Get Single DocType as dict.
:param doctype: DocType of the single object whose value is requested
Example:
# Get coulmn and value of the single doctype Accounts Settings
account_settings = frappe.db.get_singles_dict("Accounts Settings")
"""
result = self.sql("""
SELECT field, value
FROM `tabSingles`
WHERE doctype = %s
""", doctype)
# result = _cast_result(doctype, result)
dict_ = frappe._dict(result)
return dict_
@staticmethod
def get_all(*args, **kwargs):
return frappe.get_all(*args, **kwargs)
@staticmethod
def get_list(*args, **kwargs):
return frappe.get_list(*args, **kwargs)
def get_single_value(self, doctype, fieldname, cache=False):
"""Get property of Single DocType. Cache locally by default
:param doctype: DocType of the single object whose value is requested
:param fieldname: `fieldname` of the property whose value is requested
Example:
# Get the default value of the company from the Global Defaults doctype.
company = frappe.db.get_single_value('Global Defaults', 'default_company')
"""
if not doctype in self.value_cache:
self.value_cache = self.value_cache[doctype] = {}
if fieldname in self.value_cache[doctype]:
return self.value_cache[doctype][fieldname]
val = self.sql("""select `value` from
`tabSingles` where `doctype`=%s and `field`=%s""", (doctype, fieldname))
val = val[0][0] if val else None
df = frappe.get_meta(doctype).get_field(fieldname)
if not df:
frappe.throw(_('Invalid field name: {0}').format(frappe.bold(fieldname)), self.InvalidColumnName)
if df.fieldtype in frappe.model.numeric_fieldtypes:
val = cint(val)
self.value_cache[doctype][fieldname] = val
return val
def get_singles_value(self, *args, **kwargs):
"""Alias for get_single_value"""
return self.get_single_value(*args, **kwargs)
def _get_values_from_table(self, fields, filters, doctype, as_dict, debug, order_by=None, update=None, for_update=False):
fl = []
if isinstance(fields, (list, tuple)):
for f in fields:
if "(" in f or " as " in f: # function
fl.append(f)
else:
fl.append("`" + f + "`")
fl = ", ".join(fl)
else:
fl = fields
if fields=="*":
as_dict = True
conditions, values = self.build_conditions(filters)
order_by = ("order by " + order_by) if order_by else ""
r = self.sql("select {fields} from `tab{doctype}` {where} {conditions} {order_by} {for_update}"
.format(
for_update = 'for update' if for_update else '',
fields = fl,
doctype = doctype,
where = "where" if conditions else "",
conditions = conditions,
order_by = order_by),
values, as_dict=as_dict, debug=debug, update=update)
return r
def _get_value_for_many_names(self, doctype, names, field, debug=False):
names = list(filter(None, names))
if names:
return self.get_all(doctype,
fields=['name', field],
filters=[['name', 'in', names]],
debug=debug, as_list=1)
else:
return {}
def update(self, *args, **kwargs):
"""Update multiple values. Alias for `set_value`."""
return self.set_value(*args, **kwargs)
def set_value(self, dt, dn, field, val=None, modified=None, modified_by=None,
update_modified=True, debug=False, for_update=True):
"""Set a single value in the database, do not call the ORM triggers
but update the modified timestamp (unless specified not to).
**Warning:** this function will not call Document events and should be avoided in normal cases.
:param dt: DocType name.
:param dn: Document name.
:param field: Property / field name or dictionary of values to be updated
:param value: Value to be updated.
:param modified: Use this as the `modified` timestamp.
:param modified_by: Set this user as `modified_by`.
:param update_modified: default True. Set as false, if you don't want to update the timestamp.
:param debug: Print the query in the developer / js console.
:param for_update: Will add a row-level lock to the value that is being set so that it can be released on commit.
"""
if not modified:
modified = now()
if not modified_by:
modified_by = frappe.session.user
to_update = {}
if update_modified:
to_update = {"modified": modified, "modified_by": modified_by}
if isinstance(field, dict):
to_update.update(field)
else:
to_update.update({field: val})
if dn and dt!=dn:
# with table
set_values = []
for key in to_update:
set_values.append('`{0}`=%({0})s'.format(key))
for name in self.get_values(dt, dn, 'name', for_update=for_update):
values = dict(name=name[0])
values.update(to_update)
self.sql("""update `tab{0}`
set {1} where name=%(name)s""".format(dt, ', '.join(set_values)),
values, debug=debug)
else:
# for singles
keys = list(to_update)
self.sql('''
delete from `tabSingles`
where field in ({0}) and
doctype=%s'''.format(', '.join(['%s']*len(keys))),
list(keys) + [dt], debug=debug)
for key, value in iteritems(to_update):
self.sql('''insert into `tabSingles` (doctype, field, value) values (%s, %s, %s)''',
(dt, key, value), debug=debug)
if dt in self.value_cache:
del self.value_cache[dt]
frappe.clear_document_cache(dt, dn)
@staticmethod
def set(doc, field, val):
"""Set value in document. **Avoid**"""
doc.db_set(field, val)
def touch(self, doctype, docname):
"""Update the modified timestamp of this document."""
modified = now()
self.sql("""update `tab{doctype}` set `modified`=%s
where name=%s""".format(doctype=doctype), (modified, docname))
return modified
@staticmethod
def set_temp(value):
"""Set a temperory value and return a key."""
key = frappe.generate_hash()
frappe.cache().hset("temp", key, value)
return key
@staticmethod
def get_temp(key):
"""Return the temperory value and delete it."""
return frappe.cache().hget("temp", key)
def set_global(self, key, val, user='__global'):
"""Save a global key value. Global values will be automatically set if they match fieldname."""
self.set_default(key, val, user)
def get_global(self, key, user='__global'):
"""Returns a global key value."""
return self.get_default(key, user)
def get_default(self, key, parent="__default"):
"""Returns default value as a list if multiple or single"""
d = self.get_defaults(key, parent)
return isinstance(d, list) and d[0] or d
@staticmethod
def set_default(key, val, parent="__default", parenttype=None):
"""Sets a global / user default value."""
frappe.defaults.set_default(key, val, parent, parenttype)
@staticmethod
def add_default(key, val, parent="__default", parenttype=None):
"""Append a default value for a key, there can be multiple default values for a particular key."""
frappe.defaults.add_default(key, val, parent, parenttype)
@staticmethod
def get_defaults(key=None, parent="__default"):
"""Get all defaults"""
if key:
defaults = frappe.defaults.get_defaults(parent)
d = defaults.get(key, None)
if(not d and key != frappe.scrub(key)):
d = defaults.get(frappe.scrub(key), None)
return d
else:
return frappe.defaults.get_defaults(parent)
def begin(self):
self.sql("START TRANSACTION")
def commit(self):
"""Commit current transaction. Calls SQL `COMMIT`."""
for method in frappe.local.before_commit:
frappe.call(method[0], *(method[1] or []), **(method[2] or {}))
self.sql("commit")
frappe.local.rollback_observers = []
self.flush_realtime_log()
enqueue_jobs_after_commit()
flush_local_link_count()
def add_before_commit(self, method, args=None, kwargs=None):
frappe.local.before_commit.append([method, args, kwargs])
@staticmethod
def flush_realtime_log():
for args in frappe.local.realtime_log:
frappe.realtime.emit_via_redis(*args)
frappe.local.realtime_log = []
def rollback(self):
"""`ROLLBACK` current transaction."""
self.sql("rollback")
self.begin()
for obj in frappe.local.rollback_observers:
if hasattr(obj, "on_rollback"):
obj.on_rollback()
frappe.local.rollback_observers = []
def field_exists(self, dt, fn):
"""Return true of field exists."""
return self.exists('DocField', {
'fieldname': fn,
'parent': dt
})
def table_exists(self, doctype):
"""Returns True if table for given doctype exists."""
return ("tab" + doctype) in self.get_tables()
def has_table(self, doctype):
return self.table_exists(doctype)
def get_tables(self):
tables = frappe.cache().get_value('db_tables')
if not tables:
table_rows = self.sql("""
SELECT table_name
FROM information_schema.tables
WHERE table_schema NOT IN ('pg_catalog', 'information_schema')
""")
tables = {d[0] for d in table_rows}
frappe.cache().set_value('db_tables', tables)
return tables
def a_row_exists(self, doctype):
"""Returns True if atleast one row exists."""
return self.sql("select name from `tab{doctype}` limit 1".format(doctype=doctype))
def exists(self, dt, dn=None, cache=False):
"""Returns true if document exists.
:param dt: DocType name.
:param dn: Document name or filter dict."""
if isinstance(dt, string_types):
if dt!="DocType" and dt==dn:
return True # single always exists (!)
try:
return self.get_value(dt, dn, "name", cache=cache)
except Exception:
return None
elif isinstance(dt, dict) and dt.get('doctype'):
try:
conditions = []
for d in dt:
if d == 'doctype': continue
conditions.append([d, '=', dt[d]])
return self.get_all(dt['doctype'], filters=conditions, as_list=1)
except Exception:
return None
def count(self, dt, filters=None, debug=False, cache=False):
"""Returns `COUNT(*)` for given DocType and filters."""
if cache and not filters:
cache_count = frappe.cache().get_value('doctype:count:{}'.format(dt))
if cache_count is not None:
return cache_count
if filters:
conditions, filters = self.build_conditions(filters)
count = self.sql("""select count(*)
from `tab%s` where %s""" % (dt, conditions), filters, debug=debug)[0][0]
return count
else:
count = self.sql("""select count(*)
from `tab%s`""" % (dt,))[0][0]
if cache:
frappe.cache().set_value('doctype:count:{}'.format(dt), count, expires_in_sec = 86400)
return count
@staticmethod
def format_date(date):
return getdate(date).strftime("%Y-%m-%d")
@staticmethod
def format_datetime(datetime):
if not datetime:
return '0001-01-01 00:00:00.000000'
if isinstance(datetime, frappe.string_types):
if ':' not in datetime:
datetime = datetime + ' 00:00:00.000000'
else:
datetime = datetime.strftime("%Y-%m-%d %H:%M:%S.%f")
return datetime
def get_creation_count(self, doctype, minutes):
"""Get count of records created in the last x minutes"""
from frappe.utils import now_datetime
from dateutil.relativedelta import relativedelta
return self.sql("""select count(name) from `tab{doctype}`
where creation >= %s""".format(doctype=doctype),
now_datetime() - relativedelta(minutes=minutes))[0][0]
def get_db_table_columns(self, table):
"""Returns list of column names from given table."""
columns = frappe.cache().hget('table_columns', table)
if columns is None:
columns = [r[0] for r in self.sql('''
select column_name
from information_schema.columns
where table_name = %s ''', table)]
if columns:
frappe.cache().hset('table_columns', table, columns)
return columns
def get_table_columns(self, doctype):
"""Returns list of column names from given doctype."""
columns = self.get_db_table_columns('tab' + doctype)
if not columns:
raise self.TableMissingError('DocType', doctype)
return columns
def has_column(self, doctype, column):
"""Returns True if column exists in database."""
return column in self.get_table_columns(doctype)
def get_column_type(self, doctype, column):
return self.sql('''SELECT column_type FROM INFORMATION_SCHEMA.COLUMNS
WHERE table_name = 'tab{0}' AND column_name = '{1}' '''.format(doctype, column))[0][0]
def has_index(self, table_name, index_name):
pass
def add_index(self, doctype, fields, index_name=None):
pass
def add_unique(self, doctype, fields, constraint_name=None):
pass
@staticmethod
def get_index_name(fields):
index_name = "_".join(fields) + "_index"
# remove index length if present e.g. (10) from index name
index_name = re.sub(r"\s*\([^)]+\)\s*", r"", index_name)
return index_name
def get_system_setting(self, key):
def _load_system_settings():
return self.get_singles_dict("System Settings")
return frappe.cache().get_value("system_settings", _load_system_settings).get(key)
def close(self):
"""Close database connection."""
if self._conn:
# self._cursor.close()
self._conn.close()
self._cursor = None
self._conn = None
@staticmethod
def escape(s, percent=True):
"""Excape quotes and percent in given string."""
# implemented in specific class
pass
@staticmethod
def is_column_missing(e):
return frappe.db.is_missing_column(e)
def get_descendants(self, doctype, name):
'''Return descendants of the current record'''
node_location_indexes = self.get_value(doctype, name, ('lft', 'rgt'))
if node_location_indexes:
lft, rgt = node_location_indexes
return self.sql_list('''select name from `tab{doctype}`
where lft > {lft} and rgt < {rgt}'''.format(doctype=doctype, lft=lft, rgt=rgt))
else:
# when document does not exist
return []
def is_missing_table_or_column(self, e):
return self.is_missing_column(e) or self.is_missing_table(e)
def multisql(self, sql_dict, values=(), **kwargs):
current_dialect = frappe.db.db_type or 'mariadb'
query = sql_dict.get(current_dialect)
return self.sql(query, values, **kwargs)
def delete(self, doctype, conditions, debug=False):
if conditions:
conditions, values = self.build_conditions(conditions)
return self.sql("DELETE FROM `tab{doctype}` where {conditions}".format(
doctype=doctype,
conditions=conditions
), values, debug=debug)
else:
frappe.throw(_('No conditions provided'))
def get_last_created(self, doctype):
last_record = self.get_all(doctype, ('creation'), limit=1, order_by='creation desc')
if last_record:
return get_datetime(last_record[0].creation)
else:
return None
def clear_table(self, doctype):
self.sql('truncate `tab{}`'.format(doctype))
def log_touched_tables(self, query, values=None):
if values:
query = frappe.safe_decode(self._cursor.mogrify(query, values))
if query.strip().lower().split()[0] in ('insert', 'delete', 'update', 'alter'):
# single_word_regex is designed to match following patterns
# `tabXxx`, tabXxx and "tabXxx"
# multi_word_regex is designed to match following patterns
# `tabXxx Xxx` and "tabXxx Xxx"
# ([`"]?) Captures " or ` at the begining of the table name (if provided)
# \1 matches the first captured group (quote character) at the end of the table name
# multi word table name must have surrounding quotes.
# (tab([A-Z]\w+)( [A-Z]\w+)*) Captures table names that start with "tab"
# and are continued with multiple words that start with a captital letter
# e.g. 'tabXxx' or 'tabXxx Xxx' or 'tabXxx Xxx Xxx' and so on
single_word_regex = r'([`"]?)(tab([A-Z]\w+))\1'
multi_word_regex = r'([`"])(tab([A-Z]\w+)( [A-Z]\w+)+)\1'
tables = []
for regex in (single_word_regex, multi_word_regex):
tables += [groups[1] for groups in re.findall(regex, query)]
if frappe.flags.touched_tables is None:
frappe.flags.touched_tables = set()
frappe.flags.touched_tables.update(tables)
def bulk_insert(self, doctype, fields, values, ignore_duplicates=False):
"""
Insert multiple records at a time
:param doctype: Doctype name
:param fields: list of fields
:params values: list of list of values
"""
insert_list = []
fields = ", ".join(["`"+field+"`" for field in fields])
for idx, value in enumerate(values):
insert_list.append(tuple(value))
if idx and (idx%10000 == 0 or idx < len(values)-1):
self.sql("""INSERT {ignore_duplicates} INTO `tab{doctype}` ({fields}) VALUES {values}""".format(
ignore_duplicates="IGNORE" if ignore_duplicates else "",
doctype=doctype,
fields=fields,
values=", ".join(['%s'] * len(insert_list))
), tuple(insert_list))
insert_list = []
def enqueue_jobs_after_commit():
from frappe.utils.background_jobs import execute_job, get_queue
if frappe.flags.enqueue_after_commit and len(frappe.flags.enqueue_after_commit) > 0:
for job in frappe.flags.enqueue_after_commit:
q = get_queue(job.get("queue"), is_async=job.get("is_async"))
q.enqueue_call(execute_job, timeout=job.get("timeout"),
kwargs=job.get("queue_args"))
frappe.flags.enqueue_after_commit = []
# Helpers
def _cast_result(doctype, result):
batch = [ ]
try:
for field, value in result:
df = frappe.get_meta(doctype).get_field(field)
if df:
value = cast_fieldtype(df.fieldtype, value)
batch.append(tuple([field, value]))
except frappe.exceptions.DoesNotExistError:
return result
return tuple(batch)
| 31.182927 | 122 | 0.686682 |
from __future__ import unicode_literals
import re
import time
import frappe
import datetime
import frappe.defaults
import frappe.model.meta
from frappe import _
from time import time
from frappe.utils import now, getdate, cast_fieldtype, get_datetime
from frappe.model.utils.link_count import flush_local_link_count
from frappe.utils import cint
from six import (
integer_types,
string_types,
text_type,
iteritems
)
class Database(object):
VARCHAR_LEN = 140
MAX_COLUMN_LENGTH = 64
OPTIONAL_COLUMNS = ["_user_tags", "_comments", "_assign", "_liked_by"]
DEFAULT_SHORTCUTS = ['_Login', '__user', '_Full Name', 'Today', '__today', "now", "Now"]
STANDARD_VARCHAR_COLUMNS = ('name', 'owner', 'modified_by', 'parent', 'parentfield', 'parenttype')
DEFAULT_COLUMNS = ['name', 'creation', 'modified', 'modified_by', 'owner', 'docstatus', 'parent',
'parentfield', 'parenttype', 'idx']
class InvalidColumnName(frappe.ValidationError): pass
def __init__(self, host=None, user=None, password=None, ac_name=None, use_default=0, port=None):
self.setup_type_map()
self.host = host or frappe.conf.db_host or '127.0.0.1'
self.port = port or frappe.conf.db_port or ''
self.user = user or frappe.conf.db_name
self.db_name = frappe.conf.db_name
self._conn = None
if ac_name:
self.user = ac_name or frappe.conf.db_name
if use_default:
self.user = frappe.conf.db_name
self.transaction_writes = 0
self.auto_commit_on_many_writes = 0
self.password = password or frappe.conf.db_password
self.value_cache = {}
def setup_type_map(self):
pass
def connect(self):
self.cur_db_name = self.user
self._conn = self.get_connection()
self._cursor = self._conn.cursor()
frappe.local.rollback_observers = []
def use(self, db_name):
self._conn.select_db(db_name)
def get_connection(self):
pass
def get_database_size(self):
pass
def sql(self, query, values=(), as_dict = 0, as_list = 0, formatted = 0,
debug=0, ignore_ddl=0, as_utf8=0, auto_commit=0, update=None, explain=False):
if re.search(r'ifnull\(', query, flags=re.IGNORECASE):
query = re.sub(r'ifnull\(', 'coalesce(', query, flags=re.IGNORECASE)
if not self._conn:
self.connect()
self.check_transaction_status(query)
self.clear_db_table_cache(query)
if auto_commit: self.commit()
try:
if debug:
time_start = time()
self.log_query(query, values, debug, explain)
if values!=():
if isinstance(values, dict):
values = dict(values)
if not isinstance(values, (dict, tuple, list)):
values = (values,)
self._cursor.execute(query, values)
if frappe.flags.in_migrate:
self.log_touched_tables(query, values)
else:
self._cursor.execute(query)
if frappe.flags.in_migrate:
self.log_touched_tables(query)
if debug:
time_end = time()
frappe.errprint(("Execution time: {0} sec").format(round(time_end - time_start, 2)))
except Exception as e:
if frappe.conf.db_type == 'postgres':
self.rollback()
elif self.is_syntax_error(e):
frappe.errprint('Syntax error in query:')
frappe.errprint(query)
if ignore_ddl and (self.is_missing_column(e) or self.is_missing_table(e) or self.cant_drop_field_or_key(e)):
pass
else:
raise
if auto_commit: self.commit()
if not self._cursor.description:
return ()
if as_dict:
ret = self.fetch_as_dict(formatted, as_utf8)
if update:
for r in ret:
r.update(update)
return ret
elif as_list:
return self.convert_to_lists(self._cursor.fetchall(), formatted, as_utf8)
elif as_utf8:
return self.convert_to_lists(self._cursor.fetchall(), formatted, as_utf8)
else:
return self._cursor.fetchall()
def log_query(self, query, values, debug, explain):
if frappe.conf.get('allow_tests') and frappe.cache().get_value('flag_print_sql'):
print(self.mogrify(query, values))
if debug:
if explain and query.strip().lower().startswith('select'):
self.explain_query(query, values)
frappe.errprint(self.mogrify(query, values))
if (frappe.conf.get("logging") or False)==2:
frappe.log("<<<< query")
frappe.log(self.mogrify(query, values))
frappe.log(">>>>")
def mogrify(self, query, values):
if not values:
return query
else:
try:
return self._cursor.mogrify(query, values)
except:
return (query, values)
def explain_query(self, query, values=None):
try:
frappe.errprint("--- query explain ---")
if values is None:
self._cursor.execute("explain " + query)
else:
self._cursor.execute("explain " + query, values)
import json
frappe.errprint(json.dumps(self.fetch_as_dict(), indent=1))
frappe.errprint("--- query explain end ---")
except Exception:
frappe.errprint("error in query explain")
def sql_list(self, query, values=(), debug=False):
return [r[0] for r in self.sql(query, values, debug=debug)]
def sql_ddl(self, query, values=(), debug=False):
self.commit()
self.sql(query, debug=debug)
def check_transaction_status(self, query):
if self.transaction_writes and \
query and query.strip().split()[0].lower() in ['start', 'alter', 'drop', 'create', "begin", "truncate"]:
raise Exception('This statement can cause implicit commit')
if query and query.strip().lower() in ('commit', 'rollback'):
self.transaction_writes = 0
if query[:6].lower() in ('update', 'insert', 'delete'):
self.transaction_writes += 1
if self.transaction_writes > 200000:
if self.auto_commit_on_many_writes:
self.commit()
else:
frappe.throw(_("Too many writes in one request. Please send smaller requests"), frappe.ValidationError)
def fetch_as_dict(self, formatted=0, as_utf8=0):
result = self._cursor.fetchall()
ret = []
if result:
keys = [column[0] for column in self._cursor.description]
for r in result:
values = []
for value in r:
if as_utf8 and isinstance(value, text_type):
value = value.encode('utf-8')
values.append(value)
ret.append(frappe._dict(zip(keys, values)))
return ret
@staticmethod
def clear_db_table_cache(query):
if query and query.strip().split()[0].lower() in {'drop', 'create'}:
frappe.cache().delete_key('db_tables')
@staticmethod
def needs_formatting(result, formatted):
if result and result[0]:
for v in result[0]:
if isinstance(v, (datetime.date, datetime.timedelta, datetime.datetime, integer_types)):
return True
if formatted and isinstance(v, (int, float)):
return True
return False
def get_description(self):
return self._cursor.description
@staticmethod
def convert_to_lists(res, formatted=0, as_utf8=0):
nres = []
for r in res:
nr = []
for val in r:
if as_utf8 and isinstance(val, text_type):
val = val.encode('utf-8')
nr.append(val)
nres.append(nr)
return nres
def build_conditions(self, filters):
conditions = []
values = {}
def _build_condition(key):
_operator = "="
_rhs = " %(" + key + ")s"
value = filters.get(key)
values[key] = value
if isinstance(value, (list, tuple)):
_operator = value[0]
values[key] = value[1]
if isinstance(value[1], (tuple, list)):
_rhs = " ({0})".format(", ".join([self.escape(v) for v in value[1]]))
del values[key]
if _operator not in ["=", "!=", ">", ">=", "<", "<=", "like", "in", "not in", "not like"]:
_operator = "="
if "[" in key:
split_key = key.split("[")
condition = "coalesce(`" + split_key[0] + "`, " + split_key[1][:-1] + ") " \
+ _operator + _rhs
else:
condition = "`" + key + "` " + _operator + _rhs
conditions.append(condition)
if isinstance(filters, int):
filters = str(filters)
if isinstance(filters, string_types):
filters = { "name": filters }
for f in filters:
_build_condition(f)
return " and ".join(conditions), values
def get(self, doctype, filters=None, as_dict=True, cache=False):
return self.get_value(doctype, filters, "*", as_dict=as_dict, cache=cache)
def get_value(self, doctype, filters=None, fieldname="name", ignore=None, as_dict=False,
debug=False, order_by=None, cache=False, for_update=False):
ret = self.get_values(doctype, filters, fieldname, ignore, as_dict, debug,
order_by, cache=cache, for_update=for_update)
return ((len(ret[0]) > 1 or as_dict) and ret[0] or ret[0][0]) if ret else None
def get_values(self, doctype, filters=None, fieldname="name", ignore=None, as_dict=False,
debug=False, order_by=None, update=None, cache=False, for_update=False):
out = None
if cache and isinstance(filters, string_types) and \
(doctype, filters, fieldname) in self.value_cache:
return self.value_cache[(doctype, filters, fieldname)]
if not order_by: order_by = 'modified desc'
if isinstance(filters, list):
out = self._get_value_for_many_names(doctype, filters, fieldname, debug=debug)
else:
fields = fieldname
if fieldname!="*":
if isinstance(fieldname, string_types):
fields = [fieldname]
else:
fields = fieldname
if (filters is not None) and (filters!=doctype or doctype=="DocType"):
try:
out = self._get_values_from_table(fields, filters, doctype, as_dict, debug, order_by, update, for_update=for_update)
except Exception as e:
if ignore and (frappe.db.is_missing_column(e) or frappe.db.is_table_missing(e)):
out = None
elif (not ignore) and frappe.db.is_table_missing(e):
out = self.get_values_from_single(fields, filters, doctype, as_dict, debug, update)
if not out and frappe.get_meta(doctype).get('is_virtual'):
out = self.get_value_from_virtual_doctype(fields, filters, doctype, as_dict, debug, update)
else:
raise
else:
out = self.get_values_from_single(fields, filters, doctype, as_dict, debug, update)
if cache and isinstance(filters, string_types):
self.value_cache[(doctype, filters, fieldname)] = out
return out
def get_values_from_single(self, fields, filters, doctype, as_dict=False, debug=False, update=None):
if fields=="*" or isinstance(filters, dict):
values = self.get_singles_dict(doctype)
if isinstance(filters, dict):
for key, value in filters.items():
if values.get(key) != value:
return []
if as_dict:
return values and [values] or []
if isinstance(fields, list):
return [map(values.get, fields)]
else:
r = self.sql("""select field, value
from `tabSingles` where field in (%s) and doctype=%s"""
% (', '.join(['%s'] * len(fields)), '%s'),
tuple(fields) + (doctype,), as_dict=False, debug=debug)
if as_dict:
if r:
r = frappe._dict(r)
if update:
r.update(update)
return [r]
else:
return []
else:
return r and [[i[1] for i in r]] or []
def get_value_from_virtual_doctype(self, fields, filters, doctype, as_dict=False, debug=False, update=None):
return frappe.get_doc(doctype).get_value(fields, filters, as_dict=as_dict, debug=debug, update=update)
def get_singles_dict(self, doctype, debug = False):
result = self.sql("""
SELECT field, value
FROM `tabSingles`
WHERE doctype = %s
""", doctype)
dict_ = frappe._dict(result)
return dict_
@staticmethod
def get_all(*args, **kwargs):
return frappe.get_all(*args, **kwargs)
@staticmethod
def get_list(*args, **kwargs):
return frappe.get_list(*args, **kwargs)
def get_single_value(self, doctype, fieldname, cache=False):
if not doctype in self.value_cache:
self.value_cache = self.value_cache[doctype] = {}
if fieldname in self.value_cache[doctype]:
return self.value_cache[doctype][fieldname]
val = self.sql("""select `value` from
`tabSingles` where `doctype`=%s and `field`=%s""", (doctype, fieldname))
val = val[0][0] if val else None
df = frappe.get_meta(doctype).get_field(fieldname)
if not df:
frappe.throw(_('Invalid field name: {0}').format(frappe.bold(fieldname)), self.InvalidColumnName)
if df.fieldtype in frappe.model.numeric_fieldtypes:
val = cint(val)
self.value_cache[doctype][fieldname] = val
return val
def get_singles_value(self, *args, **kwargs):
return self.get_single_value(*args, **kwargs)
def _get_values_from_table(self, fields, filters, doctype, as_dict, debug, order_by=None, update=None, for_update=False):
fl = []
if isinstance(fields, (list, tuple)):
for f in fields:
if "(" in f or " as " in f:
fl.append(f)
else:
fl.append("`" + f + "`")
fl = ", ".join(fl)
else:
fl = fields
if fields=="*":
as_dict = True
conditions, values = self.build_conditions(filters)
order_by = ("order by " + order_by) if order_by else ""
r = self.sql("select {fields} from `tab{doctype}` {where} {conditions} {order_by} {for_update}"
.format(
for_update = 'for update' if for_update else '',
fields = fl,
doctype = doctype,
where = "where" if conditions else "",
conditions = conditions,
order_by = order_by),
values, as_dict=as_dict, debug=debug, update=update)
return r
def _get_value_for_many_names(self, doctype, names, field, debug=False):
names = list(filter(None, names))
if names:
return self.get_all(doctype,
fields=['name', field],
filters=[['name', 'in', names]],
debug=debug, as_list=1)
else:
return {}
def update(self, *args, **kwargs):
return self.set_value(*args, **kwargs)
def set_value(self, dt, dn, field, val=None, modified=None, modified_by=None,
update_modified=True, debug=False, for_update=True):
if not modified:
modified = now()
if not modified_by:
modified_by = frappe.session.user
to_update = {}
if update_modified:
to_update = {"modified": modified, "modified_by": modified_by}
if isinstance(field, dict):
to_update.update(field)
else:
to_update.update({field: val})
if dn and dt!=dn:
set_values = []
for key in to_update:
set_values.append('`{0}`=%({0})s'.format(key))
for name in self.get_values(dt, dn, 'name', for_update=for_update):
values = dict(name=name[0])
values.update(to_update)
self.sql("""update `tab{0}`
set {1} where name=%(name)s""".format(dt, ', '.join(set_values)),
values, debug=debug)
else:
keys = list(to_update)
self.sql('''
delete from `tabSingles`
where field in ({0}) and
doctype=%s'''.format(', '.join(['%s']*len(keys))),
list(keys) + [dt], debug=debug)
for key, value in iteritems(to_update):
self.sql('''insert into `tabSingles` (doctype, field, value) values (%s, %s, %s)''',
(dt, key, value), debug=debug)
if dt in self.value_cache:
del self.value_cache[dt]
frappe.clear_document_cache(dt, dn)
@staticmethod
def set(doc, field, val):
doc.db_set(field, val)
def touch(self, doctype, docname):
modified = now()
self.sql("""update `tab{doctype}` set `modified`=%s
where name=%s""".format(doctype=doctype), (modified, docname))
return modified
@staticmethod
def set_temp(value):
key = frappe.generate_hash()
frappe.cache().hset("temp", key, value)
return key
@staticmethod
def get_temp(key):
return frappe.cache().hget("temp", key)
def set_global(self, key, val, user='__global'):
self.set_default(key, val, user)
def get_global(self, key, user='__global'):
return self.get_default(key, user)
def get_default(self, key, parent="__default"):
d = self.get_defaults(key, parent)
return isinstance(d, list) and d[0] or d
@staticmethod
def set_default(key, val, parent="__default", parenttype=None):
frappe.defaults.set_default(key, val, parent, parenttype)
@staticmethod
def add_default(key, val, parent="__default", parenttype=None):
frappe.defaults.add_default(key, val, parent, parenttype)
@staticmethod
def get_defaults(key=None, parent="__default"):
if key:
defaults = frappe.defaults.get_defaults(parent)
d = defaults.get(key, None)
if(not d and key != frappe.scrub(key)):
d = defaults.get(frappe.scrub(key), None)
return d
else:
return frappe.defaults.get_defaults(parent)
def begin(self):
self.sql("START TRANSACTION")
def commit(self):
for method in frappe.local.before_commit:
frappe.call(method[0], *(method[1] or []), **(method[2] or {}))
self.sql("commit")
frappe.local.rollback_observers = []
self.flush_realtime_log()
enqueue_jobs_after_commit()
flush_local_link_count()
def add_before_commit(self, method, args=None, kwargs=None):
frappe.local.before_commit.append([method, args, kwargs])
@staticmethod
def flush_realtime_log():
for args in frappe.local.realtime_log:
frappe.realtime.emit_via_redis(*args)
frappe.local.realtime_log = []
def rollback(self):
self.sql("rollback")
self.begin()
for obj in frappe.local.rollback_observers:
if hasattr(obj, "on_rollback"):
obj.on_rollback()
frappe.local.rollback_observers = []
def field_exists(self, dt, fn):
return self.exists('DocField', {
'fieldname': fn,
'parent': dt
})
def table_exists(self, doctype):
return ("tab" + doctype) in self.get_tables()
def has_table(self, doctype):
return self.table_exists(doctype)
def get_tables(self):
tables = frappe.cache().get_value('db_tables')
if not tables:
table_rows = self.sql("""
SELECT table_name
FROM information_schema.tables
WHERE table_schema NOT IN ('pg_catalog', 'information_schema')
""")
tables = {d[0] for d in table_rows}
frappe.cache().set_value('db_tables', tables)
return tables
def a_row_exists(self, doctype):
return self.sql("select name from `tab{doctype}` limit 1".format(doctype=doctype))
def exists(self, dt, dn=None, cache=False):
if isinstance(dt, string_types):
if dt!="DocType" and dt==dn:
return True
try:
return self.get_value(dt, dn, "name", cache=cache)
except Exception:
return None
elif isinstance(dt, dict) and dt.get('doctype'):
try:
conditions = []
for d in dt:
if d == 'doctype': continue
conditions.append([d, '=', dt[d]])
return self.get_all(dt['doctype'], filters=conditions, as_list=1)
except Exception:
return None
def count(self, dt, filters=None, debug=False, cache=False):
if cache and not filters:
cache_count = frappe.cache().get_value('doctype:count:{}'.format(dt))
if cache_count is not None:
return cache_count
if filters:
conditions, filters = self.build_conditions(filters)
count = self.sql("""select count(*)
from `tab%s` where %s""" % (dt, conditions), filters, debug=debug)[0][0]
return count
else:
count = self.sql("""select count(*)
from `tab%s`""" % (dt,))[0][0]
if cache:
frappe.cache().set_value('doctype:count:{}'.format(dt), count, expires_in_sec = 86400)
return count
@staticmethod
def format_date(date):
return getdate(date).strftime("%Y-%m-%d")
@staticmethod
def format_datetime(datetime):
if not datetime:
return '0001-01-01 00:00:00.000000'
if isinstance(datetime, frappe.string_types):
if ':' not in datetime:
datetime = datetime + ' 00:00:00.000000'
else:
datetime = datetime.strftime("%Y-%m-%d %H:%M:%S.%f")
return datetime
def get_creation_count(self, doctype, minutes):
from frappe.utils import now_datetime
from dateutil.relativedelta import relativedelta
return self.sql("""select count(name) from `tab{doctype}`
where creation >= %s""".format(doctype=doctype),
now_datetime() - relativedelta(minutes=minutes))[0][0]
def get_db_table_columns(self, table):
columns = frappe.cache().hget('table_columns', table)
if columns is None:
columns = [r[0] for r in self.sql('''
select column_name
from information_schema.columns
where table_name = %s ''', table)]
if columns:
frappe.cache().hset('table_columns', table, columns)
return columns
def get_table_columns(self, doctype):
columns = self.get_db_table_columns('tab' + doctype)
if not columns:
raise self.TableMissingError('DocType', doctype)
return columns
def has_column(self, doctype, column):
return column in self.get_table_columns(doctype)
def get_column_type(self, doctype, column):
return self.sql('''SELECT column_type FROM INFORMATION_SCHEMA.COLUMNS
WHERE table_name = 'tab{0}' AND column_name = '{1}' '''.format(doctype, column))[0][0]
def has_index(self, table_name, index_name):
pass
def add_index(self, doctype, fields, index_name=None):
pass
def add_unique(self, doctype, fields, constraint_name=None):
pass
@staticmethod
def get_index_name(fields):
index_name = "_".join(fields) + "_index"
index_name = re.sub(r"\s*\([^)]+\)\s*", r"", index_name)
return index_name
def get_system_setting(self, key):
def _load_system_settings():
return self.get_singles_dict("System Settings")
return frappe.cache().get_value("system_settings", _load_system_settings).get(key)
def close(self):
if self._conn:
self._conn.close()
self._cursor = None
self._conn = None
@staticmethod
def escape(s, percent=True):
pass
@staticmethod
def is_column_missing(e):
return frappe.db.is_missing_column(e)
def get_descendants(self, doctype, name):
node_location_indexes = self.get_value(doctype, name, ('lft', 'rgt'))
if node_location_indexes:
lft, rgt = node_location_indexes
return self.sql_list('''select name from `tab{doctype}`
where lft > {lft} and rgt < {rgt}'''.format(doctype=doctype, lft=lft, rgt=rgt))
else:
return []
def is_missing_table_or_column(self, e):
return self.is_missing_column(e) or self.is_missing_table(e)
def multisql(self, sql_dict, values=(), **kwargs):
current_dialect = frappe.db.db_type or 'mariadb'
query = sql_dict.get(current_dialect)
return self.sql(query, values, **kwargs)
def delete(self, doctype, conditions, debug=False):
if conditions:
conditions, values = self.build_conditions(conditions)
return self.sql("DELETE FROM `tab{doctype}` where {conditions}".format(
doctype=doctype,
conditions=conditions
), values, debug=debug)
else:
frappe.throw(_('No conditions provided'))
def get_last_created(self, doctype):
last_record = self.get_all(doctype, ('creation'), limit=1, order_by='creation desc')
if last_record:
return get_datetime(last_record[0].creation)
else:
return None
def clear_table(self, doctype):
self.sql('truncate `tab{}`'.format(doctype))
def log_touched_tables(self, query, values=None):
if values:
query = frappe.safe_decode(self._cursor.mogrify(query, values))
if query.strip().lower().split()[0] in ('insert', 'delete', 'update', 'alter'):
single_word_regex = r'([`"]?)(tab([A-Z]\w+))\1'
multi_word_regex = r'([`"])(tab([A-Z]\w+)( [A-Z]\w+)+)\1'
tables = []
for regex in (single_word_regex, multi_word_regex):
tables += [groups[1] for groups in re.findall(regex, query)]
if frappe.flags.touched_tables is None:
frappe.flags.touched_tables = set()
frappe.flags.touched_tables.update(tables)
def bulk_insert(self, doctype, fields, values, ignore_duplicates=False):
insert_list = []
fields = ", ".join(["`"+field+"`" for field in fields])
for idx, value in enumerate(values):
insert_list.append(tuple(value))
if idx and (idx%10000 == 0 or idx < len(values)-1):
self.sql("""INSERT {ignore_duplicates} INTO `tab{doctype}` ({fields}) VALUES {values}""".format(
ignore_duplicates="IGNORE" if ignore_duplicates else "",
doctype=doctype,
fields=fields,
values=", ".join(['%s'] * len(insert_list))
), tuple(insert_list))
insert_list = []
def enqueue_jobs_after_commit():
from frappe.utils.background_jobs import execute_job, get_queue
if frappe.flags.enqueue_after_commit and len(frappe.flags.enqueue_after_commit) > 0:
for job in frappe.flags.enqueue_after_commit:
q = get_queue(job.get("queue"), is_async=job.get("is_async"))
q.enqueue_call(execute_job, timeout=job.get("timeout"),
kwargs=job.get("queue_args"))
frappe.flags.enqueue_after_commit = []
def _cast_result(doctype, result):
batch = [ ]
try:
for field, value in result:
df = frappe.get_meta(doctype).get_field(field)
if df:
value = cast_fieldtype(df.fieldtype, value)
batch.append(tuple([field, value]))
except frappe.exceptions.DoesNotExistError:
return result
return tuple(batch)
| true | true |
f731ffc418c409ea5c8ec121e5505721921146e2 | 164 | py | Python | natwork/chats/admin.py | Potisin/Natwork | a42b89f18fdd8f8ac69e56cb7184696d6883a9f7 | [
"BSD-3-Clause"
] | null | null | null | natwork/chats/admin.py | Potisin/Natwork | a42b89f18fdd8f8ac69e56cb7184696d6883a9f7 | [
"BSD-3-Clause"
] | null | null | null | natwork/chats/admin.py | Potisin/Natwork | a42b89f18fdd8f8ac69e56cb7184696d6883a9f7 | [
"BSD-3-Clause"
] | null | null | null | from django.contrib import admin
from .models import Chat
class ChatAdmin(admin.ModelAdmin):
list_display = ("pk",)
admin.site.register(Chat, ChatAdmin)
| 12.615385 | 36 | 0.737805 | from django.contrib import admin
from .models import Chat
class ChatAdmin(admin.ModelAdmin):
list_display = ("pk",)
admin.site.register(Chat, ChatAdmin)
| true | true |
f73201674c64269afedc778a05e242056dcf0449 | 2,282 | py | Python | tests/models/symbol/ioc_dump_retrieve_start_details_test.py | NetApp/santricity-webapi-pythonsdk | 1d3df4a00561192f4cdcdd1890f4d27547ed2de2 | [
"BSD-3-Clause-Clear"
] | 5 | 2016-08-23T17:52:22.000Z | 2019-05-16T08:45:30.000Z | tests/models/symbol/ioc_dump_retrieve_start_details_test.py | NetApp/santricity-webapi-pythonsdk | 1d3df4a00561192f4cdcdd1890f4d27547ed2de2 | [
"BSD-3-Clause-Clear"
] | 2 | 2016-11-10T05:30:21.000Z | 2019-04-05T15:03:37.000Z | tests/models/symbol/ioc_dump_retrieve_start_details_test.py | NetApp/santricity-webapi-pythonsdk | 1d3df4a00561192f4cdcdd1890f4d27547ed2de2 | [
"BSD-3-Clause-Clear"
] | 7 | 2016-08-25T16:11:44.000Z | 2021-02-22T05:31:25.000Z | #!/usr/bin/env python
# coding: utf-8
"""
The Clear BSD License
Copyright (c) – 2016, NetApp, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below) provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of NetApp, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import unittest
from netapp.santricity.models.symbol.ioc_dump_retrieve_start_details import IOCDumpRetrieveStartDetails
class IOCDumpRetrieveStartDetailsTest(unittest.TestCase):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
# Try instantiating the model
def test_ioc_dump_retrieve_start_details(self):
ioc_dump_retrieve_start_details_obj = IOCDumpRetrieveStartDetails()
self.assertNotEqual(ioc_dump_retrieve_start_details_obj, None)
| 60.052632 | 845 | 0.783523 |
import unittest
from netapp.santricity.models.symbol.ioc_dump_retrieve_start_details import IOCDumpRetrieveStartDetails
class IOCDumpRetrieveStartDetailsTest(unittest.TestCase):
def test_ioc_dump_retrieve_start_details(self):
ioc_dump_retrieve_start_details_obj = IOCDumpRetrieveStartDetails()
self.assertNotEqual(ioc_dump_retrieve_start_details_obj, None)
| true | true |
f73201bccc5b600843b87458b521f0313fadcc75 | 355 | py | Python | tests/mssql_test.py | technetbytes/ModelService | 5268d53b4bedb400d8ba4a326297fa7f6b8bc666 | [
"Apache-2.0"
] | 2 | 2020-04-06T07:59:02.000Z | 2020-04-06T07:59:04.000Z | tests/mssql_test.py | technetbytes/ModelService | 5268d53b4bedb400d8ba4a326297fa7f6b8bc666 | [
"Apache-2.0"
] | 2 | 2020-04-06T12:13:34.000Z | 2020-04-06T13:49:29.000Z | tests/mssql_test.py | technetbytes/Pythonic-Template | 78b565359d640b208ed6189ebefc5760751c16d7 | [
"Apache-2.0"
] | null | null | null | import unittest
from db import mssql
def connect_mssql():
obj_sql = mssql.MsSqlDb(None)
conn = obj_sql.get_connection
if conn is None:
return False
else:
return True
class MyTest(unittest.TestCase):
def test(self):
self.assertTrue(connect_mssql(), True)
if __name__ == '__main__':
unittest.main() | 20.882353 | 46 | 0.650704 | import unittest
from db import mssql
def connect_mssql():
obj_sql = mssql.MsSqlDb(None)
conn = obj_sql.get_connection
if conn is None:
return False
else:
return True
class MyTest(unittest.TestCase):
def test(self):
self.assertTrue(connect_mssql(), True)
if __name__ == '__main__':
unittest.main() | true | true |
f732029a686f8b8f4cf7e9bb74a3d607318b643a | 1,396 | py | Python | client/main.py | naoki-sawada/m5stack-ble | 57b464cfbbf60bf232ac3a8480499890a07a2d8e | [
"MIT"
] | 37 | 2018-08-08T11:10:45.000Z | 2022-02-21T12:19:13.000Z | client/main.py | naoki-sawada/m5stack-ble | 57b464cfbbf60bf232ac3a8480499890a07a2d8e | [
"MIT"
] | 2 | 2020-08-30T02:44:16.000Z | 2021-11-29T10:12:57.000Z | client/main.py | naoki-sawada/m5stack-ble | 57b464cfbbf60bf232ac3a8480499890a07a2d8e | [
"MIT"
] | 7 | 2019-08-17T15:37:30.000Z | 2021-03-14T15:43:22.000Z | #!/usr/bin/env python3
import asyncio
import logging
import uuid
from bleak import BleakScanner, BleakClient
# Enable debug output
# logging.basicConfig(level=logging.DEBUG)
DEVICE_NAME = "m5-stack"
SERVICE_UUID = uuid.UUID("4fafc201-1fb5-459e-8fcc-c5c9c331914b")
CHAR_UUID = uuid.UUID("beb5483e-36e1-4688-b7f5-ea07361b26a8")
async def run(loop):
print("Searching devices...")
devices = await BleakScanner.discover()
device = list(filter(lambda d: d.name == DEVICE_NAME, devices))
if len(device) == 0:
raise RuntimeError(f"Failed to find a device name '{DEVICE_NAME}'")
address = device[0].address
print(f"Connecting to the device... (address: {address})")
async with BleakClient(address, loop=loop) as client:
print("Message from the device...")
value = await client.read_gatt_char(CHAR_UUID)
print(value.decode())
print("Sending message to the device...")
message = bytearray(b"hi!")
await client.write_gatt_char(CHAR_UUID, message, True)
def callback(sender, data):
print(f"Received: {data}")
print("Subscribing to characteristic changes...")
await client.start_notify(CHAR_UUID, callback)
print("Waiting 60 seconds to receive data from the device...")
await asyncio.sleep(60)
loop = asyncio.get_event_loop()
loop.run_until_complete(run(loop))
| 29.702128 | 75 | 0.68553 |
import asyncio
import logging
import uuid
from bleak import BleakScanner, BleakClient
DEVICE_NAME = "m5-stack"
SERVICE_UUID = uuid.UUID("4fafc201-1fb5-459e-8fcc-c5c9c331914b")
CHAR_UUID = uuid.UUID("beb5483e-36e1-4688-b7f5-ea07361b26a8")
async def run(loop):
print("Searching devices...")
devices = await BleakScanner.discover()
device = list(filter(lambda d: d.name == DEVICE_NAME, devices))
if len(device) == 0:
raise RuntimeError(f"Failed to find a device name '{DEVICE_NAME}'")
address = device[0].address
print(f"Connecting to the device... (address: {address})")
async with BleakClient(address, loop=loop) as client:
print("Message from the device...")
value = await client.read_gatt_char(CHAR_UUID)
print(value.decode())
print("Sending message to the device...")
message = bytearray(b"hi!")
await client.write_gatt_char(CHAR_UUID, message, True)
def callback(sender, data):
print(f"Received: {data}")
print("Subscribing to characteristic changes...")
await client.start_notify(CHAR_UUID, callback)
print("Waiting 60 seconds to receive data from the device...")
await asyncio.sleep(60)
loop = asyncio.get_event_loop()
loop.run_until_complete(run(loop))
| true | true |
f732040accb451226761ed261b82111127e933b2 | 1,220 | py | Python | 4. Data Warehousing with AWS Redshift/redshift_cluster_teardown.py | jrderek/Data-Engineering-projects | c4903b28fcf6ec2d78e8543ec490b9be6d0c35ad | [
"MIT"
] | null | null | null | 4. Data Warehousing with AWS Redshift/redshift_cluster_teardown.py | jrderek/Data-Engineering-projects | c4903b28fcf6ec2d78e8543ec490b9be6d0c35ad | [
"MIT"
] | null | null | null | 4. Data Warehousing with AWS Redshift/redshift_cluster_teardown.py | jrderek/Data-Engineering-projects | c4903b28fcf6ec2d78e8543ec490b9be6d0c35ad | [
"MIT"
] | null | null | null | import boto3
import configparser
def main():
"""
Description:
- Sets up a Redshift cluster on AWS
Returns:
None
"""
KEY = config.get('AWS','KEY')
SECRET = config.get('AWS','SECRET')
DWH_CLUSTER_IDENTIFIER = config.get("DWH","DWH_CLUSTER_IDENTIFIER")
DWH_IAM_ROLE_NAME = config.get("DWH", "DWH_IAM_ROLE_NAME")
redshift = boto3.client('redshift',
region_name="us-west-2",
aws_access_key_id=KEY,
aws_secret_access_key=SECRET)
iam = boto3.client('iam',
region_name='us-west-2',
aws_access_key_id=KEY,
aws_secret_access_key=SECRET)
redshift.delete_cluster(ClusterIdentifier=DWH_CLUSTER_IDENTIFIER,
SkipFinalClusterSnapshot=True)
# Remove role:
iam.detach_role_policy(RoleName=DWH_IAM_ROLE_NAME,
PolicyArn="arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess")
iam.delete_role(RoleName=DWH_IAM_ROLE_NAME)
print("Cluster and IAM role has been deleted")
if __name__ == "__main__":
main() | 32.972973 | 86 | 0.568852 | import boto3
import configparser
def main():
KEY = config.get('AWS','KEY')
SECRET = config.get('AWS','SECRET')
DWH_CLUSTER_IDENTIFIER = config.get("DWH","DWH_CLUSTER_IDENTIFIER")
DWH_IAM_ROLE_NAME = config.get("DWH", "DWH_IAM_ROLE_NAME")
redshift = boto3.client('redshift',
region_name="us-west-2",
aws_access_key_id=KEY,
aws_secret_access_key=SECRET)
iam = boto3.client('iam',
region_name='us-west-2',
aws_access_key_id=KEY,
aws_secret_access_key=SECRET)
redshift.delete_cluster(ClusterIdentifier=DWH_CLUSTER_IDENTIFIER,
SkipFinalClusterSnapshot=True)
iam.detach_role_policy(RoleName=DWH_IAM_ROLE_NAME,
PolicyArn="arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess")
iam.delete_role(RoleName=DWH_IAM_ROLE_NAME)
print("Cluster and IAM role has been deleted")
if __name__ == "__main__":
main() | true | true |
f7320490c051b51caf2f1913b631c004f6196e85 | 5,301 | py | Python | create_json.py | rakaar/Narmada-server | 2d241bf6205332534abd2bd75f3283781564106a | [
"MIT"
] | 2 | 2021-05-28T01:37:04.000Z | 2021-09-20T14:51:42.000Z | create_json.py | rakaar/Narmada-server | 2d241bf6205332534abd2bd75f3283781564106a | [
"MIT"
] | 3 | 2021-05-12T18:15:54.000Z | 2022-03-12T00:58:06.000Z | create_json.py | rakaar/Narmada-server | 2d241bf6205332534abd2bd75f3283781564106a | [
"MIT"
] | 2 | 2021-05-12T18:41:02.000Z | 2021-12-08T22:13:11.000Z | import pickle
import sys
import ast
import re
import json
from word2number import w2n
import os, sys
try:
location=sys.argv[1]
except Exception as e:
location='roma'
try:
type_=sys.argv[2]
except Exception as e:
type_='needs'
with open('OUTPUT/'+location+'_'+type_+'.p','rb') as handle:
need_dict=pickle.load(handle)
need_json=[]
for elem in need_dict:
sample_dict={}
elem_id=elem
tweet_text=need_dict[elem][0]
resource_class_dict= need_dict[elem][-1]
sample_dict['_id']=elem_id
sample_dict['lang']='en'
sample_dict['text']=tweet_text
sample_dict['Classification']='Need'
sample_dict['isCompleted']=False
sample_dict['username']='@Username'
sample_dict['Matched']=-1
sample_dict['Locations']={}
sample_dict['Sources']=[]
sample_dict['Resources']=[]
sample_dict['Contact']={}
sample_dict['Contact']['Email']=[]
sample_dict['Contact']['Phone Number']=[]
source_list= list(set(need_dict[elem][1]))
for i in source_list:
sample_dict['Sources'].append(i)
for i in list(set(need_dict[elem][3])):
loc_name=i[0]
lat=i[1][0]
long_=i[1][1]
sample_dict['Locations'][loc_name]={}
sample_dict['Locations'][loc_name]['lat']=lat
sample_dict['Locations'][loc_name]['long']=long_
for i in list(set(need_dict[elem][4][0])):
sample_dict['Contact']['Phone Number'].append(i)
for i in list(set(need_dict[elem][4][1])):
sample_dict['Contact']['Email'].append(i[0])
resources=list(set(need_dict[elem][-2]))
print(resources)
print(resource_class_dict)
# resource_list=",".join(list(set(need_dict[elem][-1])))
split_text=tweet_text.split()
quantity_list=[]
class_list={}
for resource in resources:
s={}
try:
res_class = resource_class_dict[resource][1]
except Exception as e:
res_class = 'ERROR'
continue
if res_class not in class_list:
class_list[res_class]={}
# s['resource']=resource
prev_words=[ split_text[i-1] for i in range(0,len(split_text)) if resource.startswith(split_text[i]) ]
# prev_words_2=[ str(split_text[i-2])+' '+ str(split_text[i-1]) for i in range(0,len(split_text)) if i == resource ]
qt='None'
try:
for word in prev_words:
word=word.replace(',','')
if word.isnumeric()==True:
qt=str(word)
break
else:
try:
qt=str(w2n.word_to_num(word))
break
except Exception as e:
continue
if qt=='None':
elems=resource.strip().split()
word=elems[0]
resource2=" ".join(elems[1:])
word=word.replace(',','')
if word.isnumeric()==True:
qt=str(word)
else:
try:
qt=str(w2n.word_to_num(word))
except Exception as e:
pass
if qt!='None' and qt in resource:
print(resource, qt)
continue
if resource not in class_list[res_class]:
class_list[res_class][resource]=qt
else:
continue
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
qt='None'
class_list[res_class][resource]= qt
# sample_dict['Resources'].append(s)
sample_dict['Resources']= class_list
need_json.append(sample_dict)
with open(location+'_'+type_+'.json','w') as fp:
json.dump(need_json,fp, indent= 3)
# offer_csv=open(location+'_offers.csv','w')
# with open('OUTPUT/'+location+'_offers.p','rb') as handle:
# need_dict=pickle.load(handle)
# offer_csv.write('Tweet ID Tweet text Source List Location list Resource list Phone number Email Url Quantity Dict\n')
# for elem in need_dict:
# elem_id=elem
# tweet_text=need_dict[elem][0]
# source_list= ",".join(list(set(need_dict[elem][1])))
# loc_list=",".join(list(set([i[0] for i in need_dict[elem][3]])))
# resources=list(set(need_dict[elem][-1]))
# resource_list=",".join(list(set(need_dict[elem][-1])))
# contact_list_0=','.join(list(set(need_dict[elem][4][0])))
# contact_list_1=','.join([i[0] for i in list(set(need_dict[elem][4][1]))])
# contact_list_2=','.join(list(set(need_dict[elem][4][2])))
# split_text=tweet_text.split()
# quantity_list=[]
# for resource in resources:
# prev_words=[ split_text[i-1] for i in range(0,len(split_text)) if resource.startswith(split_text[i])]
# for word in prev_words:
# try:
# word=word.replace(',','')
# if word.isnumeric()==True:
# quantity_list.append(str(resource)+'-'+str(word))
# # quantity_dict[resource]=word
# else:
# quantity_list.append(str(resource)+'-'+str(w2n.word_to_num(word)))
# # quantity_dict[resource]=w2n.word_to_num(word)
# except Exception as e:
# continue
# elems=resource.split()
# word=elems[0]
# resource=" ".join(elems[1:-1])
# try:
# word=word.replace(',','')
# if word.isnumeric()==True:
# quantity_list.append(str(resource)+'-'+str(word))
# # quantity_dict[resource]=word
# else:
# quantity_list.append(str(resource)+'-'+str(w2n.word_to_num(word)))
# # quantity_dict[resource]=w2n.word_to_num(word)
# except Exception as e:
# continue
# quantity_list=','.join(list(set(quantity_list)))
# offer_csv.write(str(elem_id)+' '+tweet_text+' '+source_list+' '+loc_list+' '+ resource_list+' '+ contact_list_0+' '+ contact_list_1+' '+ contact_list_2+" "+ quantity_list+'\n') | 23.7713 | 179 | 0.660253 | import pickle
import sys
import ast
import re
import json
from word2number import w2n
import os, sys
try:
location=sys.argv[1]
except Exception as e:
location='roma'
try:
type_=sys.argv[2]
except Exception as e:
type_='needs'
with open('OUTPUT/'+location+'_'+type_+'.p','rb') as handle:
need_dict=pickle.load(handle)
need_json=[]
for elem in need_dict:
sample_dict={}
elem_id=elem
tweet_text=need_dict[elem][0]
resource_class_dict= need_dict[elem][-1]
sample_dict['_id']=elem_id
sample_dict['lang']='en'
sample_dict['text']=tweet_text
sample_dict['Classification']='Need'
sample_dict['isCompleted']=False
sample_dict['username']='@Username'
sample_dict['Matched']=-1
sample_dict['Locations']={}
sample_dict['Sources']=[]
sample_dict['Resources']=[]
sample_dict['Contact']={}
sample_dict['Contact']['Email']=[]
sample_dict['Contact']['Phone Number']=[]
source_list= list(set(need_dict[elem][1]))
for i in source_list:
sample_dict['Sources'].append(i)
for i in list(set(need_dict[elem][3])):
loc_name=i[0]
lat=i[1][0]
long_=i[1][1]
sample_dict['Locations'][loc_name]={}
sample_dict['Locations'][loc_name]['lat']=lat
sample_dict['Locations'][loc_name]['long']=long_
for i in list(set(need_dict[elem][4][0])):
sample_dict['Contact']['Phone Number'].append(i)
for i in list(set(need_dict[elem][4][1])):
sample_dict['Contact']['Email'].append(i[0])
resources=list(set(need_dict[elem][-2]))
print(resources)
print(resource_class_dict)
split_text=tweet_text.split()
quantity_list=[]
class_list={}
for resource in resources:
s={}
try:
res_class = resource_class_dict[resource][1]
except Exception as e:
res_class = 'ERROR'
continue
if res_class not in class_list:
class_list[res_class]={}
prev_words=[ split_text[i-1] for i in range(0,len(split_text)) if resource.startswith(split_text[i]) ]
qt='None'
try:
for word in prev_words:
word=word.replace(',','')
if word.isnumeric()==True:
qt=str(word)
break
else:
try:
qt=str(w2n.word_to_num(word))
break
except Exception as e:
continue
if qt=='None':
elems=resource.strip().split()
word=elems[0]
resource2=" ".join(elems[1:])
word=word.replace(',','')
if word.isnumeric()==True:
qt=str(word)
else:
try:
qt=str(w2n.word_to_num(word))
except Exception as e:
pass
if qt!='None' and qt in resource:
print(resource, qt)
continue
if resource not in class_list[res_class]:
class_list[res_class][resource]=qt
else:
continue
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
qt='None'
class_list[res_class][resource]= qt
sample_dict['Resources']= class_list
need_json.append(sample_dict)
with open(location+'_'+type_+'.json','w') as fp:
json.dump(need_json,fp, indent= 3)
| true | true |
f73207fe8f7010c1550988d7636a3f322d128758 | 20,293 | py | Python | svca_limix/limix/core/mean/mean.py | DenisSch/svca | bd029c120ca8310f43311253e4d7ce19bc08350c | [
"Apache-2.0"
] | 65 | 2015-01-20T20:46:26.000Z | 2021-06-27T14:40:35.000Z | svca_limix/limix/core/mean/mean.py | DenisSch/svca | bd029c120ca8310f43311253e4d7ce19bc08350c | [
"Apache-2.0"
] | 29 | 2015-02-01T22:35:17.000Z | 2017-08-07T08:18:23.000Z | svca_limix/limix/core/mean/mean.py | DenisSch/svca | bd029c120ca8310f43311253e4d7ce19bc08350c | [
"Apache-2.0"
] | 35 | 2015-02-01T17:26:50.000Z | 2019-09-13T07:06:16.000Z | import sys
from limix.core.old.cobj import *
from limix.utils.preprocess import regressOut
import numpy as np
import scipy.linalg as LA
import copy
def compute_X1KX2(Y, D, X1, X2, A1=None, A2=None):
R,C = Y.shape
if A1 is None:
nW_A1 = Y.shape[1]
#A1 = np.eye(Y.shape[1]) #for now this creates A1 and A2
else:
nW_A1 = A1.shape[0]
if A2 is None:
nW_A2 = Y.shape[1]
#A2 = np.eye(Y.shape[1]) #for now this creates A1 and A2
else:
nW_A2 = A2.shape[0]
nW_X1 = X1.shape[1]
rows_block = nW_A1 * nW_X1
if 0:#independentX2:
nW_X2 = 1
else:
nW_X2 = X2.shape[1]
cols_block = nW_A2 * nW_X2
block = np.zeros((rows_block,cols_block))
if (R>C) or (A1 is None) or (A2 is None):
for c in range(C):
X1D = X1 * D[:,c:c+1]
X1X2 = X1D.T.dot(X2)
if (A1 is None) and (A2 is None):
block[c*X1.shape[1]:(c+1)*X1.shape[1], c*X2.shape[1]:(c+1)*X2.shape[1]] += X1X2
elif (A1 is None):
block[c*X1.shape[1]:(c+1)*X1.shape[1],:] += np.kron(A2[:,c:c+1].T,X1X2)
elif (A2 is None):
block[:,c*X2.shape[1]:(c+1)*X2.shape[1]] += np.kron(A1[:,c:c+1],X1X2)
else:
A1A2 = np.outer(A1[:,c],A2[:,c])
block += np.kron(A1A2,X1X2)
else:
for r in range(R):
A1D = A1 * D[r:r+1,:]
A1A2 = A1D.dot(A2.T)
X1X2 = X1[r,:][:,np.newaxis].dot(X2[r,:][np.newaxis,:])
block += np.kron(A1A2,X1X2)
return block
class mean(cObject):
def __init__(self,Y, identity_trick=False):
""" init data term """
self.Y = Y
self.identity_trick=identity_trick
self.clearFixedEffect()
#########################################
# Properties
#########################################
@property
def A(self):
return self._A
@property
def B(self):
return self._B
@property
def F(self):
return self._F
@property
def A_identity(self):
return self._A_identity
@property
def REML_term(self):
return self._REML_term
@property
def Y(self):
return self._Y
@property
def N(self):
return self._N
@property
def P(self):
return self._P
@property
def n_fixed_effs(self):
return self._n_fixed_effs
@property
def n_terms(self):
return self._n_terms
@property
def Lr(self):
return self._Lr
@property
def Lc(self):
return self._Lc
@property
def d(self):
return self._d
@property
def D(self):
return np.reshape(self.d,(self.N,self.P), order='F')
@property
def LRLdiag(self):
return self._LRLdiag
@property
def LCL(self):
return self._LCL
#########################################
# Setters
#########################################
def use_identity_trick(self,identity_trick=True):
self.identity_trick=identity_trick
self.clear_cache('Fstar','Astar','Xstar','Xhat',
'Areml','Areml_eigh','Areml_chol','Areml_inv','beta_hat','B_hat',
'LRLdiag_Xhat_tens','Areml_grad',
'beta_grad','Xstar_beta_grad','Zstar','DLZ')
def clearFixedEffect(self):
""" erase all fixed effects """
self._A = []
self._F = []
self._B = []
self._A_identity = []
self._REML_term = []
self._n_terms = 0
self._n_fixed_effs = 0
self._n_fixed_effs_REML = 0
self.indicator = {'term':np.array([]),
'row':np.array([]),
'col':np.array([])}
self.clear_cache('Fstar','Astar','Xstar','Xhat',
'Areml','Areml_eigh','Areml_chol','Areml_inv','beta_hat','B_hat',
'LRLdiag_Xhat_tens','Areml_grad',
'beta_grad','Xstar_beta_grad','Zstar','DLZ')
def addFixedEffect(self,F=None,A=None, REML=True, index=None):
"""
set sample and trait designs
F: NxK sample design
A: LxP sample design
REML: REML for this term?
index: index of which fixed effect to replace. If None, just append.
"""
if F is None: F = np.ones((self.N,1))
if A is None:
A = np.eye(self.P)
A_identity = True
elif (A.shape == (self.P,self.P)) & (A==np.eye(self.P)).all():
A_identity = True
else:
A_identity = False
assert F.shape[0]==self.N, "F dimension mismatch"
assert A.shape[1]==self.P, "A dimension mismatch"
if index is None or index==self.n_terms:
self.F.append(F)
self.A.append(A)
self.A_identity.append(A_identity)
self.REML_term.append(REML)
# build B matrix and indicator
self.B.append(np.zeros((F.shape[1],A.shape[0])))
self._n_terms+=1
self._update_indicator(F.shape[1],A.shape[0])
elif index >self.n_terms:
raise Exception("index exceeds max index of terms")
else:
self._n_fixed_effs-=self.F[index].shape[1]*self.A[index].shape[0]
if self.REML_term[index]:
self._n_fixed_effs_REML-=self.F[index].shape[1]*self.A[index].shape[0]
self.F[index] = F
self.A[index] = A
self.A_identity[index] = A_identity
self.REML_term[index]=REML
self.B[index] = np.zeros((F.shape[1],A.shape[0]))
self._rebuild_indicator()
self._n_fixed_effs+=F.shape[1]*A.shape[0]
if REML:
self._n_fixed_effs_REML+=F.shape[1]*A.shape[0]
self.clear_cache('Fstar','Astar','Xstar','Xhat',
'Areml','Areml_eigh','Areml_chol','Areml_inv','beta_hat','B_hat',
'LRLdiag_Xhat_tens','Areml_grad',
'beta_grad','Xstar_beta_grad','Zstar','DLZ')
def removeFixedEffect(self, index=None):
"""
set sample and trait designs
F: NxK sample design
A: LxP sample design
REML: REML for this term?
index: index of which fixed effect to replace. If None, remove last term.
"""
if self._n_terms==0:
pass
if index is None or index==(self._n_terms-1):
self._n_terms-=1
F = self._F.pop() #= self.F[:-1]
A = self._A.pop() #= self.A[:-1]
self._A_identity.pop() #= self.A_identity[:-1]
REML_term = self._REML_term.pop()# = self.REML_term[:-1]
self._B.pop()# = self.B[:-1]
self._n_fixed_effs-=F.shape[1]*A.shape[0]
if REML_term:
self._n_fixed_effs_REML-=F.shape[1]*A.shape[0]
pass
elif index >= self.n_terms:
raise Exception("index exceeds max index of terms")
else:
raise NotImplementedError("currently only last term can be removed")
pass
self._rebuild_indicator()
self.clear_cache('Fstar','Astar','Xstar','Xhat',
'Areml','Areml_eigh','Areml_chol','Areml_inv','beta_hat','B_hat',
'LRLdiag_Xhat_tens','Areml_grad',
'beta_grad','Xstar_beta_grad','Zstar','DLZ')
@Y.setter
def Y(self,value):
""" set phenotype """
self._N,self._P = value.shape
self._Y = value
self.clear_cache('Ystar1','Ystar','Yhat','LRLdiag_Yhat',
'beta_grad','Xstar_beta_grad','Zstar','DLZ')
@Lr.setter
def Lr(self,value):
""" set row rotation """
assert value.shape[0]==self._N, 'dimension mismatch'
assert value.shape[1]==self._N, 'dimension mismatch'
self._Lr = value
self.clear_cache('Fstar','Ystar1','Ystar','Yhat','Xstar','Xhat',
'Areml','Areml_eigh','Areml_chol','Areml_inv','beta_hat','B_hat',
'LRLdiag_Xhat_tens','LRLdiag_Yhat','Areml_grad',
'beta_grad','Xstar_beta_grad',
'LRLdiag_Xhat_tens','LRLdiag_Yhat','Areml_grad',
'beta_grad','Xstar_beta_grad','Zstar','DLZ')
@Lc.setter
def Lc(self,value):
""" set col rotation """
assert value.shape[0]==self._P, 'Lc dimension mismatch'
assert value.shape[1]==self._P, 'Lc dimension mismatch'
self._Lc = value
self.clear_cache('Astar','Ystar','Yhat','Xstar','Xhat',
'Areml','Areml_eigh','Areml_chol','Areml_inv','beta_hat','B_hat',
'LRLdiag_Xhat_tens','LRLdiag_Yhat','Areml_grad',
'beta_grad','Xstar_beta_grad','Zstar','DLZ')
@d.setter
def d(self,value):
""" set anisotropic scaling """
assert value.shape[0]==self._P*self._N, 'd dimension mismatch'
self._d = value
self.clear_cache('Yhat','Xhat','Areml','Areml_eigh','Areml_chol','Areml_inv','beta_hat','B_hat',
'LRLdiag_Xhat_tens','LRLdiag_Yhat','Areml_grad',
'beta_grad','Xstar_beta_grad','Zstar','DLZ')
@LRLdiag.setter
def LRLdiag(self,value):
""" set anisotropic scaling """
self._LRLdiag = value
self.clear_cache('LRLdiag_Xhat_tens','LRLdiag_Yhat','Areml_grad',
'beta_grad','Xstar_beta_grad')
@LCL.setter
def LCL(self,value):
""" set anisotropic scaling """
self._LCL = value
self.clear_cache('Areml_grad','beta_grad','Xstar_beta_grad')
#########################################
# Getters (caching)
#########################################
@cached
def Astar(self):
RV = []
for term_i in range(self.n_terms):
RV.append(np.dot(self.A[term_i],self.Lc.T))
return RV
@cached
def Fstar(self):
RV = []
for term_i in range(self.n_terms):
RV.append(np.dot(self.Lr,self.F[term_i]))
return RV
def Ystar1(self):
return np.dot(self.Lr,self.Y)
@cached
def Ystar(self):
return np.dot(self.Ystar1(),self.Lc.T)
@cached
def Yhat(self):
return self.D*self.Ystar()
@cached
def Xstar(self):
RV = np.zeros((self.N*self.P,self.n_fixed_effs))
ip = 0
for i in range(self.n_terms):
Ki = self.A[i].shape[0]*self.F[i].shape[1]
RV[:,ip:ip+Ki] = np.kron(self.Astar()[i].T,self.Fstar()[i])
ip += Ki
return RV
def var_total(self):
return (self.Yhat()*self.Ystar()).sum()
def var_explained(self):
XKY = self.compute_XKY(M=self.Yhat())
beta_hat = self.Areml_solve(XKY)
return (XKY*beta_hat).sum(), beta_hat
@cached
def Xhat(self):
RV = self.d[:,np.newaxis]*self.Xstar()
return RV
@cached
def Areml(self):
#A1 = self.XstarT_dot(self.Xhat())
A2 = self.compute_XKX()
return A2
@cached
def Areml_chol(self):
return LA.cholesky(self.Areml()).T
@cached
def Areml_REML_chol(self):
return LA.cholesky(self.Areml()).T
@cached
def Areml_inv(self):
return LA.cho_solve((self.Areml_chol(),True),np.eye(self.n_fixed_effs))
#caching bug:
#@cached
def beta_hat(self):
XKY = self.compute_XKY(M=self.Yhat())
beta_hat = self.Areml_solve(XKY)
return beta_hat
@cached
def B_hat(self):
RV = []
ip = 0
for term_i in range(self.n_terms):
RV.append(np.reshape(self.beta_hat()[ip:ip+self.B[term_i].size],self.B[term_i].shape, order='F'))
ip += self.B[term_i].size
return RV
@cached
def LRLdiag_Xhat_tens(self):
RV = np.reshape(self.Xhat(),(self.N,self.P,self.n_fixed_effs),order='F').copy()
RV *= self.LRLdiag[:,np.newaxis,np.newaxis]
return RV
@cached
def LRLdiag_Yhat(self):
return self.LRLdiag[:,np.newaxis]*self.Yhat()
@cached
def Areml_grad(self):
RV = np.einsum('jpk,lp->jlk',self.LRLdiag_Xhat_tens(),self.LCL)
RV = RV.reshape((self.N*self.P,self.n_fixed_effs),order='F')
RV*= self.d[:,np.newaxis]
RV = -self.XstarT_dot(RV)
return RV
@cached
def beta_grad(self):
RV = np.reshape(np.dot(self.LRLdiag_Yhat(),self.LCL.T),(self.N*self.P),order='F')
RV *= self.d
RV = self.XstarT_dot(RV)
RV += np.dot(self.Areml_grad(),self.beta_hat())
RV = -np.dot(self.Areml_inv(),RV)
return RV
@cached
def Xstar_beta_grad(self):
RV = np.zeros((self.N,self.P))
ip = 0
for term_i in range(self.n_terms):
_Bgrad = np.reshape(self.beta_grad()[ip:ip+self.B[term_i].size],self.B[term_i].shape, order='F')
RV+=np.dot(self.Fstar()[term_i],np.dot(_Bgrad,self.Astar()[term_i]))
ip += self.B[term_i].size
return RV
@cached
def Zstar(self):
""" predict the value of the fixed effect """
RV = self.Ystar().copy()
for term_i in range(self.n_terms):
if self.identity_trick and self.A_identity[term_i]:
RV-=np.dot(self.Fstar()[term_i],self.B_hat()[term_i])
else:
RV-=np.dot(self.Fstar()[term_i],np.dot(self.B_hat()[term_i],self.Astar()[term_i]))
self.clear_cache('DLZ')
return RV
@cached
def Areml_eigh(self):
"""compute the eigenvalue decomposition of Astar"""
s,U = LA.eigh(self.Areml(),lower=True)
i_pos = (s>1e-10)
s = s[i_pos]
U = U[:,i_pos]
return s,U
@cached
def DLZ(self):
return self.Zstar()*np.reshape(self.D,(self.N,self.P), order='F')
###############################################
# Other getters with no caching, should not they have caching somehow?
###############################################
def Areml_solve(self, b):
try:
res = LA.cho_solve((self.Areml_chol(),True),b)
except LA.LinAlgError:
s,U = self.Areml_eigh()
res = U.T.dot(b)
res /= s[:,np.newaxis]
res = U.dot(res)
return res
def compute_XKY(self, M=None):
if M is None:
M = self.Yhat()
assert M.shape==(self.N,self.P)
XKY = np.zeros((self.n_fixed_effs))
n_weights = 0
for term in range(self.n_terms):
if self.identity_trick and self.A_identity[term]:
XKY_block = compute_XYA(DY=M, X=self.Fstar()[term], A=None)
else:
XKY_block = compute_XYA(DY=M, X=self.Fstar()[term], A=self.Astar()[term])
XKY[n_weights:n_weights + self.A[term].shape[0] * self.F[term].shape[1]] = XKY_block.ravel(order='F')
n_weights += self.A[term].shape[0] * self.F[term].shape[1]
return XKY
def compute_XKX(self):
#n_weights1 = 0
#
#for term1 in xrange(self.n_terms):
# n_weights1+=self.Astar()[term1].shape[0] * self.Fstar()[term1].shape[1]
#cov_beta = np.zeros((n_weights1,n_weights1))
cov_beta = np.zeros((self.n_fixed_effs,self.n_fixed_effs))
n_weights1 = 0
for term1 in range(self.n_terms):
if self.identity_trick and self.A_identity[term1]:
A_term1 = None
else:
A_term1 = self.Astar()[term1]
n_weights2 = n_weights1
for term2 in range(term1,self.n_terms):
if self.identity_trick and self.A_identity[term2]:
A_term2 = None
else:
A_term2 = self.Astar()[term2]
block = compute_X1KX2(Y=self.Ystar(), D=self.D, X1=self.Fstar()[term1], X2=self.Fstar()[term2], A1=A_term1, A2=A_term2)
cov_beta[n_weights1:n_weights1 + self.A[term1].shape[0] * self.F[term1].shape[1], n_weights2:n_weights2 + self.A[term2].shape[0] * self.F[term2].shape[1]] = block
if term1!=term2:
cov_beta[n_weights2:n_weights2 + self.A[term2].shape[0] * self.F[term2].shape[1], n_weights1:n_weights1 + self.A[term1].shape[0] * self.F[term1].shape[1]] = block.T
n_weights2+=self.A[term2].shape[0] * self.F[term2].shape[1]
n_weights1+=self.A[term1].shape[0] * self.F[term1].shape[1]
return cov_beta
def predict(self):
""" predict the value of the fixed effect """
RV = np.zeros((self.N,self.P))
for term_i in range(self.n_terms):
RV+=np.dot(self.Fstar()[term_i],np.dot(self.B()[term_i],self.Astar()[term_i]))
return RV
def evaluate(self):
""" predict the value of """
RV = -self.predict()
RV += self.Ystar()
return RV
def getGradient(self,j):
""" get rotated gradient for fixed effect i """
i = int(self.indicator['term'][j])
r = int(self.indicator['row'][j])
c = int(self.indicator['col'][j])
rv = -np.kron(self.Fstar()[i][:,[r]],self.Astar()[i][[c],:])
return rv
def XstarT_dot(self,M):
""" get dot product of Xhat and M """
if 0:
#TODO: implement this properly
pass
else:
RV = np.dot(self.Xstar().T,M)
return RV
def getResiduals(self):
""" regress out fixed effects and results residuals """
X = np.zeros((self.N*self.P,self.n_fixed_effs))
ip = 0
for i in range(self.n_terms):
Ki = self.A[i].shape[0]*self.F[i].shape[1]
X[:,ip:ip+Ki] = np.kron(self.A[i].T,self.F[i])
ip += Ki
y = np.reshape(self.Y,(self.Y.size,1),order='F')
RV = regressOut(y,X)
RV = np.reshape(RV,self.Y.shape,order='F')
return RV
#########################################
# Params manipulation
#########################################
def getParams(self):
""" get params """
rv = np.array([])
if self.n_terms>0:
rv = np.concatenate([np.reshape(self.B[term_i],self.B[term_i].size, order='F') for term_i in range(self.n_terms)])
return rv
def setParams(self,params):
""" set params """
start = 0
for i in range(self.n_terms):
n_effects = self.B[i].size
self.B[i] = np.reshape(params[start:start+n_effects],self.B[i].shape, order='F')
start += n_effects
#########################################
# Utility functions
#########################################
def getDimensions(self):
""" get phenotype dimensions """
return self.N,self.P
def _set_toChange(x):
""" set variables in list x toChange """
for key in list(x.keys()):
self.toChange[key] = True
def _update_indicator(self,K,L):
""" update the indicator """
_update = {'term': self.n_terms*np.ones((K,L)).T.ravel(),
'row': np.kron(np.arange(K)[:,np.newaxis],np.ones((1,L))).T.ravel(),
'col': np.kron(np.ones((K,1)),np.arange(L)[np.newaxis,:]).T.ravel()}
for key in list(_update.keys()):
self.indicator[key] = np.concatenate([self.indicator[key],_update[key]])
def _rebuild_indicator(self):
""" update the indicator """
indicator = {'term':np.array([]),
'row':np.array([]),
'col':np.array([])}
for term in range(self.n_terms):
L = self.A[term].shape[0]
K = self.F[term].shape[1]
_update = {'term': (term+1)*np.ones((K,L)).T.ravel(),
'row': np.kron(np.arange(K)[:,np.newaxis],np.ones((1,L))).T.ravel(),
'col': np.kron(np.ones((K,1)),np.arange(L)[np.newaxis,:]).T.ravel()}
for key in list(_update.keys()):
indicator[key] = np.concatenate([indicator[key],_update[key]])
self.indicator = indicator
| 33.158497 | 184 | 0.525206 | import sys
from limix.core.old.cobj import *
from limix.utils.preprocess import regressOut
import numpy as np
import scipy.linalg as LA
import copy
def compute_X1KX2(Y, D, X1, X2, A1=None, A2=None):
R,C = Y.shape
if A1 is None:
nW_A1 = Y.shape[1]
hape[0]
if A2 is None:
nW_A2 = Y.shape[1]
hape[0]
nW_X1 = X1.shape[1]
rows_block = nW_A1 * nW_X1
if 0:
nW_X2 = 1
else:
nW_X2 = X2.shape[1]
cols_block = nW_A2 * nW_X2
block = np.zeros((rows_block,cols_block))
if (R>C) or (A1 is None) or (A2 is None):
for c in range(C):
X1D = X1 * D[:,c:c+1]
X1X2 = X1D.T.dot(X2)
if (A1 is None) and (A2 is None):
block[c*X1.shape[1]:(c+1)*X1.shape[1], c*X2.shape[1]:(c+1)*X2.shape[1]] += X1X2
elif (A1 is None):
block[c*X1.shape[1]:(c+1)*X1.shape[1],:] += np.kron(A2[:,c:c+1].T,X1X2)
elif (A2 is None):
block[:,c*X2.shape[1]:(c+1)*X2.shape[1]] += np.kron(A1[:,c:c+1],X1X2)
else:
A1A2 = np.outer(A1[:,c],A2[:,c])
block += np.kron(A1A2,X1X2)
else:
for r in range(R):
A1D = A1 * D[r:r+1,:]
A1A2 = A1D.dot(A2.T)
X1X2 = X1[r,:][:,np.newaxis].dot(X2[r,:][np.newaxis,:])
block += np.kron(A1A2,X1X2)
return block
class mean(cObject):
def __init__(self,Y, identity_trick=False):
self.Y = Y
self.identity_trick=identity_trick
self.clearFixedEffect()
self.A_identity[index] = A_identity
self.REML_term[index]=REML
self.B[index] = np.zeros((F.shape[1],A.shape[0]))
self._rebuild_indicator()
self._n_fixed_effs+=F.shape[1]*A.shape[0]
if REML:
self._n_fixed_effs_REML+=F.shape[1]*A.shape[0]
self.clear_cache('Fstar','Astar','Xstar','Xhat',
'Areml','Areml_eigh','Areml_chol','Areml_inv','beta_hat','B_hat',
'LRLdiag_Xhat_tens','Areml_grad',
'beta_grad','Xstar_beta_grad','Zstar','DLZ')
def removeFixedEffect(self, index=None):
if self._n_terms==0:
pass
if index is None or index==(self._n_terms-1):
self._n_terms-=1
F = self._F.pop()
A = self._A.pop()
self._A_identity.pop()
REML_term = self._REML_term.pop()
self._B.pop()
self._n_fixed_effs-=F.shape[1]*A.shape[0]
if REML_term:
self._n_fixed_effs_REML-=F.shape[1]*A.shape[0]
pass
elif index >= self.n_terms:
raise Exception("index exceeds max index of terms")
else:
raise NotImplementedError("currently only last term can be removed")
pass
self._rebuild_indicator()
self.clear_cache('Fstar','Astar','Xstar','Xhat',
'Areml','Areml_eigh','Areml_chol','Areml_inv','beta_hat','B_hat',
'LRLdiag_Xhat_tens','Areml_grad',
'beta_grad','Xstar_beta_grad','Zstar','DLZ')
@Y.setter
def Y(self,value):
self._N,self._P = value.shape
self._Y = value
self.clear_cache('Ystar1','Ystar','Yhat','LRLdiag_Yhat',
'beta_grad','Xstar_beta_grad','Zstar','DLZ')
@Lr.setter
def Lr(self,value):
assert value.shape[0]==self._N, 'dimension mismatch'
assert value.shape[1]==self._N, 'dimension mismatch'
self._Lr = value
self.clear_cache('Fstar','Ystar1','Ystar','Yhat','Xstar','Xhat',
'Areml','Areml_eigh','Areml_chol','Areml_inv','beta_hat','B_hat',
'LRLdiag_Xhat_tens','LRLdiag_Yhat','Areml_grad',
'beta_grad','Xstar_beta_grad',
'LRLdiag_Xhat_tens','LRLdiag_Yhat','Areml_grad',
'beta_grad','Xstar_beta_grad','Zstar','DLZ')
@Lc.setter
def Lc(self,value):
assert value.shape[0]==self._P, 'Lc dimension mismatch'
assert value.shape[1]==self._P, 'Lc dimension mismatch'
self._Lc = value
self.clear_cache('Astar','Ystar','Yhat','Xstar','Xhat',
'Areml','Areml_eigh','Areml_chol','Areml_inv','beta_hat','B_hat',
'LRLdiag_Xhat_tens','LRLdiag_Yhat','Areml_grad',
'beta_grad','Xstar_beta_grad','Zstar','DLZ')
@d.setter
def d(self,value):
assert value.shape[0]==self._P*self._N, 'd dimension mismatch'
self._d = value
self.clear_cache('Yhat','Xhat','Areml','Areml_eigh','Areml_chol','Areml_inv','beta_hat','B_hat',
'LRLdiag_Xhat_tens','LRLdiag_Yhat','Areml_grad',
'beta_grad','Xstar_beta_grad','Zstar','DLZ')
@LRLdiag.setter
def LRLdiag(self,value):
self._LRLdiag = value
self.clear_cache('LRLdiag_Xhat_tens','LRLdiag_Yhat','Areml_grad',
'beta_grad','Xstar_beta_grad')
@LCL.setter
def LCL(self,value):
self._LCL = value
self.clear_cache('Areml_grad','beta_grad','Xstar_beta_grad')
te_XKY(M=self.Yhat())
beta_hat = self.Areml_solve(XKY)
return beta_hat
@cached
def B_hat(self):
RV = []
ip = 0
for term_i in range(self.n_terms):
RV.append(np.reshape(self.beta_hat()[ip:ip+self.B[term_i].size],self.B[term_i].shape, order='F'))
ip += self.B[term_i].size
return RV
@cached
def LRLdiag_Xhat_tens(self):
RV = np.reshape(self.Xhat(),(self.N,self.P,self.n_fixed_effs),order='F').copy()
RV *= self.LRLdiag[:,np.newaxis,np.newaxis]
return RV
@cached
def LRLdiag_Yhat(self):
return self.LRLdiag[:,np.newaxis]*self.Yhat()
@cached
def Areml_grad(self):
RV = np.einsum('jpk,lp->jlk',self.LRLdiag_Xhat_tens(),self.LCL)
RV = RV.reshape((self.N*self.P,self.n_fixed_effs),order='F')
RV*= self.d[:,np.newaxis]
RV = -self.XstarT_dot(RV)
return RV
@cached
def beta_grad(self):
RV = np.reshape(np.dot(self.LRLdiag_Yhat(),self.LCL.T),(self.N*self.P),order='F')
RV *= self.d
RV = self.XstarT_dot(RV)
RV += np.dot(self.Areml_grad(),self.beta_hat())
RV = -np.dot(self.Areml_inv(),RV)
return RV
@cached
def Xstar_beta_grad(self):
RV = np.zeros((self.N,self.P))
ip = 0
for term_i in range(self.n_terms):
_Bgrad = np.reshape(self.beta_grad()[ip:ip+self.B[term_i].size],self.B[term_i].shape, order='F')
RV+=np.dot(self.Fstar()[term_i],np.dot(_Bgrad,self.Astar()[term_i]))
ip += self.B[term_i].size
return RV
@cached
def Zstar(self):
RV = self.Ystar().copy()
for term_i in range(self.n_terms):
if self.identity_trick and self.A_identity[term_i]:
RV-=np.dot(self.Fstar()[term_i],self.B_hat()[term_i])
else:
RV-=np.dot(self.Fstar()[term_i],np.dot(self.B_hat()[term_i],self.Astar()[term_i]))
self.clear_cache('DLZ')
return RV
@cached
def Areml_eigh(self):
s,U = LA.eigh(self.Areml(),lower=True)
i_pos = (s>1e-10)
s = s[i_pos]
U = U[:,i_pos]
return s,U
@cached
def DLZ(self):
return self.Zstar()*np.reshape(self.D,(self.N,self.P), order='F')
s2+=self.A[term2].shape[0] * self.F[term2].shape[1]
n_weights1+=self.A[term1].shape[0] * self.F[term1].shape[1]
return cov_beta
def predict(self):
RV = np.zeros((self.N,self.P))
for term_i in range(self.n_terms):
RV+=np.dot(self.Fstar()[term_i],np.dot(self.B()[term_i],self.Astar()[term_i]))
return RV
def evaluate(self):
RV = -self.predict()
RV += self.Ystar()
return RV
def getGradient(self,j):
i = int(self.indicator['term'][j])
r = int(self.indicator['row'][j])
c = int(self.indicator['col'][j])
rv = -np.kron(self.Fstar()[i][:,[r]],self.Astar()[i][[c],:])
return rv
def XstarT_dot(self,M):
if 0:
pass
else:
RV = np.dot(self.Xstar().T,M)
return RV
def getResiduals(self):
X = np.zeros((self.N*self.P,self.n_fixed_effs))
ip = 0
for i in range(self.n_terms):
Ki = self.A[i].shape[0]*self.F[i].shape[1]
X[:,ip:ip+Ki] = np.kron(self.A[i].T,self.F[i])
ip += Ki
y = np.reshape(self.Y,(self.Y.size,1),order='F')
RV = regressOut(y,X)
RV = np.reshape(RV,self.Y.shape,order='F')
return RV
| true | true |
f73208d87a7c1b01380a02572131d5fffaf76eab | 3,888 | py | Python | app/capture.py | karanveersingh5623/EdgeRealtimeVideoAnalytics | 0765ff9145c2163f9e361495fbb0bda147e536cc | [
"Apache-2.0"
] | 22 | 2020-10-31T05:13:37.000Z | 2021-11-17T23:18:29.000Z | app/capture.py | karanveersingh5623/EdgeRealtimeVideoAnalytics | 0765ff9145c2163f9e361495fbb0bda147e536cc | [
"Apache-2.0"
] | null | null | null | app/capture.py | karanveersingh5623/EdgeRealtimeVideoAnalytics | 0765ff9145c2163f9e361495fbb0bda147e536cc | [
"Apache-2.0"
] | 4 | 2021-02-06T11:14:29.000Z | 2021-07-21T23:40:06.000Z | # RedisEdge realtime video analytics video capture script
import argparse
import cv2
import redis
import time
from urllib.parse import urlparse
class SimpleMovingAverage(object):
''' Simple moving average '''
def __init__(self, value=0.0, count=7):
self.count = int(count)
self.current = float(value)
self.samples = [self.current] * self.count
def __str__(self):
return str(round(self.current, 3))
def add(self, value):
v = float(value)
self.samples.insert(0, v)
o = self.samples.pop()
self.current = self.current + (v-o)/self.count
class Video:
def __init__(self, infile=0, fps=30.0):
self.isFile = not str(infile).isdecimal()
self.ts = time.time()
self.infile = infile
self.cam = cv2.VideoCapture(self.infile)
if not self.isFile:
self.cam.set(cv2.CAP_PROP_FPS, fps)
self.fps = fps
# TODO: some cameras don't respect the fps directive
self.cam.set(cv2.CAP_PROP_FRAME_WIDTH, 800)
self.cam.set(cv2.CAP_PROP_FRAME_HEIGHT, 600)
else:
self.fps = self.cam.get(cv2.CAP_PROP_FPS)
self.sma = SimpleMovingAverage(value=0.1, count=19)
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
# Respect FPS for files
if self.isFile:
delta = time.time() - self.ts
self.sma.add(delta)
time.sleep(max(0,(1 - self.sma.current*self.fps)/self.fps))
self.ts = time.time()
# Read image
ret_val, img0 = self.cam.read()
if not ret_val and self.isFile:
self.cam.set(cv2.CAP_PROP_POS_FRAMES, 0)
ret_val, img0 = self.cam.read()
assert ret_val, 'Video Error'
# Preprocess
img = img0
if not self.isFile:
img = cv2.flip(img, 1)
return self.count, img
def __len__(self):
return 0
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('infile', help='Input file (leave empty to use webcam)', nargs='?', type=str, default=None)
parser.add_argument('-o', '--output', help='Output stream key name', type=str, default='camera:0')
parser.add_argument('-u', '--url', help='Redis URL', type=str, default='redis://127.0.0.1:6379')
parser.add_argument('-w', '--webcam', help='Webcam device number', type=int, default=0)
parser.add_argument('-v', '--verbose', help='Verbose output', type=bool, default=False)
parser.add_argument('--count', help='Count of frames to capture', type=int, default=None)
parser.add_argument('--fmt', help='Frame storage format', type=str, default='.jpg')
parser.add_argument('--fps', help='Frames per second (webcam)', type=float, default=15.0)
parser.add_argument('--maxlen', help='Maximum length of output stream', type=int, default=10000)
args = parser.parse_args()
# Set up Redis connection
url = urlparse(args.url)
conn = redis.Redis(host=url.hostname, port=url.port)
if not conn.ping():
raise Exception('Redis unavailable')
# Choose video source
if args.infile is None:
loader = Video(infile=args.webcam, fps=args.fps) # Default to webcam
else:
loader = Video(infile=args.infile, fps=args.fps) # Unless an input file (image or video) was specified
for (count, img) in loader:
_, data = cv2.imencode(args.fmt, img)
msg = {
'count': count,
'image': data.tobytes()
}
_id = conn.xadd(args.output, msg, maxlen=args.maxlen)
if args.verbose:
print('frame: {} id: {}'.format(count, _id))
if args.count is not None and count+1 == args.count:
print('Stopping after {} frames.'.format(count))
break
| 36 | 115 | 0.608282 |
import argparse
import cv2
import redis
import time
from urllib.parse import urlparse
class SimpleMovingAverage(object):
def __init__(self, value=0.0, count=7):
self.count = int(count)
self.current = float(value)
self.samples = [self.current] * self.count
def __str__(self):
return str(round(self.current, 3))
def add(self, value):
v = float(value)
self.samples.insert(0, v)
o = self.samples.pop()
self.current = self.current + (v-o)/self.count
class Video:
def __init__(self, infile=0, fps=30.0):
self.isFile = not str(infile).isdecimal()
self.ts = time.time()
self.infile = infile
self.cam = cv2.VideoCapture(self.infile)
if not self.isFile:
self.cam.set(cv2.CAP_PROP_FPS, fps)
self.fps = fps
self.cam.set(cv2.CAP_PROP_FRAME_WIDTH, 800)
self.cam.set(cv2.CAP_PROP_FRAME_HEIGHT, 600)
else:
self.fps = self.cam.get(cv2.CAP_PROP_FPS)
self.sma = SimpleMovingAverage(value=0.1, count=19)
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
# Respect FPS for files
if self.isFile:
delta = time.time() - self.ts
self.sma.add(delta)
time.sleep(max(0,(1 - self.sma.current*self.fps)/self.fps))
self.ts = time.time()
# Read image
ret_val, img0 = self.cam.read()
if not ret_val and self.isFile:
self.cam.set(cv2.CAP_PROP_POS_FRAMES, 0)
ret_val, img0 = self.cam.read()
assert ret_val, 'Video Error'
# Preprocess
img = img0
if not self.isFile:
img = cv2.flip(img, 1)
return self.count, img
def __len__(self):
return 0
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('infile', help='Input file (leave empty to use webcam)', nargs='?', type=str, default=None)
parser.add_argument('-o', '--output', help='Output stream key name', type=str, default='camera:0')
parser.add_argument('-u', '--url', help='Redis URL', type=str, default='redis://127.0.0.1:6379')
parser.add_argument('-w', '--webcam', help='Webcam device number', type=int, default=0)
parser.add_argument('-v', '--verbose', help='Verbose output', type=bool, default=False)
parser.add_argument('--count', help='Count of frames to capture', type=int, default=None)
parser.add_argument('--fmt', help='Frame storage format', type=str, default='.jpg')
parser.add_argument('--fps', help='Frames per second (webcam)', type=float, default=15.0)
parser.add_argument('--maxlen', help='Maximum length of output stream', type=int, default=10000)
args = parser.parse_args()
# Set up Redis connection
url = urlparse(args.url)
conn = redis.Redis(host=url.hostname, port=url.port)
if not conn.ping():
raise Exception('Redis unavailable')
# Choose video source
if args.infile is None:
loader = Video(infile=args.webcam, fps=args.fps) # Default to webcam
else:
loader = Video(infile=args.infile, fps=args.fps) # Unless an input file (image or video) was specified
for (count, img) in loader:
_, data = cv2.imencode(args.fmt, img)
msg = {
'count': count,
'image': data.tobytes()
}
_id = conn.xadd(args.output, msg, maxlen=args.maxlen)
if args.verbose:
print('frame: {} id: {}'.format(count, _id))
if args.count is not None and count+1 == args.count:
print('Stopping after {} frames.'.format(count))
break
| true | true |
f7320a0309d437c0bb2ee152a0442a5a71c318f3 | 13,419 | py | Python | eppy/client.py | infonetworks-global/eppy | d16d796a532455f8aca21c09ff0d0aef3293d806 | [
"MIT"
] | null | null | null | eppy/client.py | infonetworks-global/eppy | d16d796a532455f8aca21c09ff0d0aef3293d806 | [
"MIT"
] | null | null | null | eppy/client.py | infonetworks-global/eppy | d16d796a532455f8aca21c09ff0d0aef3293d806 | [
"MIT"
] | null | null | null | """
Module that implements the EppClient class
"""
try:
# use gevent if available
import gevent.socket as socket
import gevent.ssl as ssl
except ImportError:
import socket
import ssl
import struct
from collections import deque
import logging
from six import PY2, PY3
from past.builtins import xrange # Python 2 backwards compatibility
from .exceptions import EppLoginError, EppConnectionError
from .doc import (EppResponse, EppHello, EppLoginCommand, EppLogoutCommand,
EppCreateCommand, EppUpdateCommand, EppRenewCommand,
EppTransferCommand, EppDeleteCommand)
from .utils import gen_trid
try:
from ssl import match_hostname, CertificateError
except ImportError:
from backports.ssl_match_hostname import match_hostname, CertificateError
class EppClient(object):
"""
EPP client class
"""
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-arguments
def __init__(self, host=None, port=700,
ssl_enable=True, ssl_keyfile=None, ssl_certfile=None, ssl_cacerts=None,
ssl_version=None, ssl_ciphers=None,
ssl_validate_hostname=True, socket_timeout=60, socket_connect_timeout=15,
ssl_validate_cert=True):
self.host = host
self.port = port
self.ssl_enable = ssl_enable
# PROTOCOL_SSLv23 gives the best proto version available (including TLSv1 and above)
# SSLv2 should be disabled by most OpenSSL build
self.ssl_version = ssl_version or ssl.PROTOCOL_SSLv23
# `ssl_ciphers`, if given, should be a string
# (https://www.openssl.org/docs/apps/ciphers.html)
# if not given, use the default in Python version (`ssl._DEFAULT_CIPHERS`)
self.ssl_ciphers = ssl_ciphers
self.keyfile = ssl_keyfile
self.certfile = ssl_certfile
self.cacerts = ssl_cacerts
self.socket_timeout = socket_timeout
self.socket_connect_timeout = socket_connect_timeout
self.validate_hostname = ssl_validate_hostname
self.log = logging.getLogger(__name__)
self.sock = None
self.greeting = None
if ssl_validate_cert:
self.cert_required = ssl.CERT_REQUIRED
else:
self.cert_required = ssl.CERT_NONE
def connect(self, host=None, port=None, address_family=None):
"""
Method that initiates a connection to an EPP host
"""
host = host or self.host
self.sock = socket.socket(address_family or socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(self.socket_connect_timeout) # connect timeout
self.sock.connect((host, port or self.port))
local_sock_addr = self.sock.getsockname()
local_addr, local_port = local_sock_addr[:2]
self.log.debug('connected local=%s:%s remote=%s:%s',
local_addr, local_port, self.sock.getpeername()[0], port)
if self.ssl_enable:
self.sock = ssl.wrap_socket(self.sock, self.keyfile, self.certfile,
ssl_version=self.ssl_version,
ciphers=self.ssl_ciphers,
server_side=False,
cert_reqs=self.cert_required,
ca_certs=self.cacerts)
self.log.debug('%s negotiated with local=%s:%s remote=%s:%s', self.sock.version(),
local_addr, local_port, self.sock.getpeername()[0], port)
if self.validate_hostname:
try:
match_hostname(self.sock.getpeercert(), host)
except CertificateError as exp:
self.log.exception("SSL hostname mismatch")
raise EppConnectionError(str(exp))
self.greeting = EppResponse.from_xml(self.read().decode('utf-8'))
self.sock.settimeout(self.socket_timeout) # regular timeout
def remote_info(self):
"""
Method that returns the remote peer name
"""
return '{}:{}'.format(*self.sock.getpeername())
def hello(self, log_send_recv=False):
"""
Method to send EppHello()
"""
return self.send(EppHello(), log_send_recv=log_send_recv)
# pylint: disable=c0103
def login(self, clID, pw, newPW=None, raise_on_fail=True,
obj_uris=None, extra_obj_uris=None, extra_ext_uris=None, clTRID=None):
if not self.sock:
self.connect(self.host, self.port)
cmd = EppLoginCommand(
obj_uris=obj_uris,
extra_obj_uris=extra_obj_uris,
extra_ext_uris=extra_ext_uris)
cmd.clID = clID
cmd.pw = pw
if clTRID:
cmd['epp']['command']['clTRID'] = clTRID
if newPW:
cmd.newPW = newPW
r = self.send(cmd)
if not r.success and raise_on_fail:
raise EppLoginError(r)
return r
def logout(self, clTRID=None):
cmd = EppLogoutCommand()
if clTRID:
cmd['epp']['command']['clTRID'] = clTRID
return self.send(cmd)
# pylint: enable=c0103
def read(self):
recvmeth = self.sock.read if self.ssl_enable else self.sock.recv
siz = b''
while len(siz) < 4:
siz += recvmeth(4 - len(siz))
if not siz:
# empty string after read means EOF
self.close()
raise IOError("No size header read")
size_remaining = siz = struct.unpack(">I", siz)[0] - 4
data = b''
while size_remaining:
buf = recvmeth(size_remaining)
if not buf:
self.close()
raise IOError(
"Short / no data read (expected %d bytes, got %d)" %
(siz, len(data)))
size_remaining -= len(buf)
data += buf
return data
#self.log.debug("read total %d bytes:\n%s\n" % (siz+4, data))
def write(self, data):
writemeth = self.sock.write if self.ssl_enable else self.sock.sendall
siz = struct.pack(">I", 4 + len(data))
if PY3:
datad = str.encode(data) if type(data) is str else data
writemeth(siz + datad)
else:
writemeth(siz + data)
def write_many(self, docs):
"""
For testing only.
Writes multiple documents at once
"""
writemeth = self.sock.write if self.ssl_enable else self.sock.sendall
buf = []
for doc in docs:
buf.append(struct.pack(">I", 4 + len(doc)))
buf.append(doc)
writemeth(b''.join(buf))
def send(self, doc, log_send_recv=True, extra_nsmap=None, strip_hints=True):
self._gen_cltrid(doc)
buf = doc.to_xml(force_prefix=True)
if log_send_recv:
self.log.debug("SEND %s: %s", self.remote_info(), buf.decode('utf-8'))
self.write(buf)
r_buf = self.read().decode('utf-8')
if log_send_recv:
self.log.debug("RECV %s: %s", self.remote_info(), r_buf)
resp = EppResponse.from_xml(r_buf, extra_nsmap=extra_nsmap)
if strip_hints:
self.strip_hints(resp)
doc.normalize_response(resp)
return resp
@staticmethod
def strip_hints(data):
"""
Remove various cruft from the given EppDoc
(useful for responses where we don't care about _order etc.)
"""
stack = deque([data])
while len(stack):
current = stack.pop()
for key in list(current.keys()):
if key in ('@xsi:schemaLocation', '_order'):
del current[key]
else:
val = current[key]
if isinstance(val, dict):
# visit later
stack.append(val)
elif isinstance(val, list):
# visit each dict in the list
for elem in val:
if isinstance(elem, dict):
stack.append(elem)
return data
def batchsend(self, docs, readresponse=True, failfast=True, pipeline=False):
""" Send multiple documents. If ``pipeline`` is True, it will
send it in a single ``write`` call (which may have the effect
of having more than one doc packed into a single TCP packet
if they fits) """
sent = 0
recved = 0
ndocs = len(docs)
try:
if pipeline:
self.write_many(docs)
sent = ndocs
else:
for doc in docs:
self.write(str(doc))
sent += 1
# pylint: disable=w0702
except:
self.log.error(
"Failed to send all commands (sent %d/%d)", sent, ndocs)
if failfast:
raise
if not readresponse:
return sent
try:
out = []
for _ in xrange(sent):
r_buf = self.read()
out.append(EppResponse.from_xml(r_buf))
recved += 1
# pylint: disable=w0702
except Exception as exp:
self.log.error(
"Failed to receive all responses (recv'ed %d/%d)", recved, sent)
# pad the rest with None
for _ in xrange(sent - len(out)):
out.append(None)
# pylint: enable=w0702
return out
def write_split(self, data):
"""
For testing only.
Writes the size header and first 4 bytes of the payload in one call,
then the rest of the payload in another call.
"""
writemeth = self.sock.sendall if self.ssl_enable else self.sock.sendall
siz = struct.pack(">I", 4 + len(data))
self.log.debug("siz=%d", (4 + len(data)))
writemeth(siz + data[:4])
writemeth(data[4:])
def write_splitsize(self, data):
"""
For testing only.
Writes 2 bytes of the header, then another two bytes,
then the payload in another call.
"""
writemeth = self.sock.sendall if self.ssl_enable else self.sock.sendall
siz = struct.pack(">I", 4 + len(data))
self.log.debug("siz=%d", (4 + len(data)))
writemeth(siz[:2])
writemeth(siz[2:])
writemeth(data)
def write_splitall(self, data):
"""
For testing only.
Writes 2 bytes of the header, then another two bytes,
then 4 bytes of the payload, then the rest of the payload.
"""
writemeth = self.sock.sendall if self.ssl_enable else self.sock.sendall
siz = struct.pack(">I", 4 + len(data))
self.log.debug("siz=%d", (4 + len(data)))
writemeth(siz[:2])
writemeth(siz[2:])
writemeth(data[:4])
writemeth(data[4:])
def close(self):
self.sock.close()
self.sock = None
@staticmethod
def _gen_cltrid(doc):
if isinstance(doc, (EppLoginCommand, EppCreateCommand, EppUpdateCommand,
EppDeleteCommand, EppTransferCommand, EppRenewCommand)):
cmd_node = doc['epp']['command']
if not cmd_node.get('clTRID'):
cmd_node['clTRID'] = gen_trid()
def _get_ssl_protocol_version(self):
"""
This is a hack to get the negotiated protocol version of an SSL connection.
WARNING: Do not use this on anything other than Python 2.7
WARNING: Do not use on non-CPython.
WARNING: only use it for debugging.
WARNING: this will probably crash because we may be loading the wrong version of libssl
From https://github.com/python-git/python/blob/master/Modules/_ssl.c
the PySSLObject struct looks like this:
typedef struct {
PyObject_HEAD
PySocketSockObject *Socket; /* Socket on which we're layered */
SSL_CTX* ctx;
SSL* ssl;
X509* peer_cert;
char server[X509_NAME_MAXLEN];
char issuer[X509_NAME_MAXLEN];
} PySSLObject;
and this is stored as self.sock._sslobj so we pry open the mem location
and call OpenSSL's SSL_get_version C API
This technique is inspired by http://pyevolve.sourceforge.net/wordpress/?p=2171
"""
assert self.ssl_enable, "don't use it on non-SSL sockets"
assert self.sock._sslobj, "don't use it on non-SSL sockets"
import ctypes
import ctypes.util
size_pyobject_head = ctypes.sizeof(
ctypes.c_long) + ctypes.sizeof(ctypes.c_voidp)
# skip PySocketSockObject* and SSL_CTX*
real_ssl_offset = size_pyobject_head + ctypes.sizeof(ctypes.c_voidp) * 2
ssl_p = ctypes.c_voidp.from_address(id(self.sock._sslobj) + real_ssl_offset)
# libssl = ctypes.cdll.LoadLibrary('/usr/local/opt/openssl/lib/libssl.1.0.0.dylib')
libssl = ctypes.cdll.LoadLibrary(ctypes.util.find_library('ssl'))
if not libssl:
return None
libssl.SSL_get_version.restype = ctypes.c_char_p
libssl.SSL_get_version.argtypes = [ctypes.c_void_p]
ver = libssl.SSL_get_version(ssl_p)
return ver
| 36.865385 | 95 | 0.577539 |
try:
import gevent.socket as socket
import gevent.ssl as ssl
except ImportError:
import socket
import ssl
import struct
from collections import deque
import logging
from six import PY2, PY3
from past.builtins import xrange
from .exceptions import EppLoginError, EppConnectionError
from .doc import (EppResponse, EppHello, EppLoginCommand, EppLogoutCommand,
EppCreateCommand, EppUpdateCommand, EppRenewCommand,
EppTransferCommand, EppDeleteCommand)
from .utils import gen_trid
try:
from ssl import match_hostname, CertificateError
except ImportError:
from backports.ssl_match_hostname import match_hostname, CertificateError
class EppClient(object):
def __init__(self, host=None, port=700,
ssl_enable=True, ssl_keyfile=None, ssl_certfile=None, ssl_cacerts=None,
ssl_version=None, ssl_ciphers=None,
ssl_validate_hostname=True, socket_timeout=60, socket_connect_timeout=15,
ssl_validate_cert=True):
self.host = host
self.port = port
self.ssl_enable = ssl_enable
self.ssl_version = ssl_version or ssl.PROTOCOL_SSLv23
self.ssl_ciphers = ssl_ciphers
self.keyfile = ssl_keyfile
self.certfile = ssl_certfile
self.cacerts = ssl_cacerts
self.socket_timeout = socket_timeout
self.socket_connect_timeout = socket_connect_timeout
self.validate_hostname = ssl_validate_hostname
self.log = logging.getLogger(__name__)
self.sock = None
self.greeting = None
if ssl_validate_cert:
self.cert_required = ssl.CERT_REQUIRED
else:
self.cert_required = ssl.CERT_NONE
def connect(self, host=None, port=None, address_family=None):
host = host or self.host
self.sock = socket.socket(address_family or socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(self.socket_connect_timeout)
self.sock.connect((host, port or self.port))
local_sock_addr = self.sock.getsockname()
local_addr, local_port = local_sock_addr[:2]
self.log.debug('connected local=%s:%s remote=%s:%s',
local_addr, local_port, self.sock.getpeername()[0], port)
if self.ssl_enable:
self.sock = ssl.wrap_socket(self.sock, self.keyfile, self.certfile,
ssl_version=self.ssl_version,
ciphers=self.ssl_ciphers,
server_side=False,
cert_reqs=self.cert_required,
ca_certs=self.cacerts)
self.log.debug('%s negotiated with local=%s:%s remote=%s:%s', self.sock.version(),
local_addr, local_port, self.sock.getpeername()[0], port)
if self.validate_hostname:
try:
match_hostname(self.sock.getpeercert(), host)
except CertificateError as exp:
self.log.exception("SSL hostname mismatch")
raise EppConnectionError(str(exp))
self.greeting = EppResponse.from_xml(self.read().decode('utf-8'))
self.sock.settimeout(self.socket_timeout)
def remote_info(self):
return '{}:{}'.format(*self.sock.getpeername())
def hello(self, log_send_recv=False):
return self.send(EppHello(), log_send_recv=log_send_recv)
def login(self, clID, pw, newPW=None, raise_on_fail=True,
obj_uris=None, extra_obj_uris=None, extra_ext_uris=None, clTRID=None):
if not self.sock:
self.connect(self.host, self.port)
cmd = EppLoginCommand(
obj_uris=obj_uris,
extra_obj_uris=extra_obj_uris,
extra_ext_uris=extra_ext_uris)
cmd.clID = clID
cmd.pw = pw
if clTRID:
cmd['epp']['command']['clTRID'] = clTRID
if newPW:
cmd.newPW = newPW
r = self.send(cmd)
if not r.success and raise_on_fail:
raise EppLoginError(r)
return r
def logout(self, clTRID=None):
cmd = EppLogoutCommand()
if clTRID:
cmd['epp']['command']['clTRID'] = clTRID
return self.send(cmd)
def read(self):
recvmeth = self.sock.read if self.ssl_enable else self.sock.recv
siz = b''
while len(siz) < 4:
siz += recvmeth(4 - len(siz))
if not siz:
self.close()
raise IOError("No size header read")
size_remaining = siz = struct.unpack(">I", siz)[0] - 4
data = b''
while size_remaining:
buf = recvmeth(size_remaining)
if not buf:
self.close()
raise IOError(
"Short / no data read (expected %d bytes, got %d)" %
(siz, len(data)))
size_remaining -= len(buf)
data += buf
return data
def write(self, data):
writemeth = self.sock.write if self.ssl_enable else self.sock.sendall
siz = struct.pack(">I", 4 + len(data))
if PY3:
datad = str.encode(data) if type(data) is str else data
writemeth(siz + datad)
else:
writemeth(siz + data)
def write_many(self, docs):
writemeth = self.sock.write if self.ssl_enable else self.sock.sendall
buf = []
for doc in docs:
buf.append(struct.pack(">I", 4 + len(doc)))
buf.append(doc)
writemeth(b''.join(buf))
def send(self, doc, log_send_recv=True, extra_nsmap=None, strip_hints=True):
self._gen_cltrid(doc)
buf = doc.to_xml(force_prefix=True)
if log_send_recv:
self.log.debug("SEND %s: %s", self.remote_info(), buf.decode('utf-8'))
self.write(buf)
r_buf = self.read().decode('utf-8')
if log_send_recv:
self.log.debug("RECV %s: %s", self.remote_info(), r_buf)
resp = EppResponse.from_xml(r_buf, extra_nsmap=extra_nsmap)
if strip_hints:
self.strip_hints(resp)
doc.normalize_response(resp)
return resp
@staticmethod
def strip_hints(data):
stack = deque([data])
while len(stack):
current = stack.pop()
for key in list(current.keys()):
if key in ('@xsi:schemaLocation', '_order'):
del current[key]
else:
val = current[key]
if isinstance(val, dict):
stack.append(val)
elif isinstance(val, list):
for elem in val:
if isinstance(elem, dict):
stack.append(elem)
return data
def batchsend(self, docs, readresponse=True, failfast=True, pipeline=False):
sent = 0
recved = 0
ndocs = len(docs)
try:
if pipeline:
self.write_many(docs)
sent = ndocs
else:
for doc in docs:
self.write(str(doc))
sent += 1
except:
self.log.error(
"Failed to send all commands (sent %d/%d)", sent, ndocs)
if failfast:
raise
if not readresponse:
return sent
try:
out = []
for _ in xrange(sent):
r_buf = self.read()
out.append(EppResponse.from_xml(r_buf))
recved += 1
except Exception as exp:
self.log.error(
"Failed to receive all responses (recv'ed %d/%d)", recved, sent)
# pad the rest with None
for _ in xrange(sent - len(out)):
out.append(None)
# pylint: enable=w0702
return out
def write_split(self, data):
writemeth = self.sock.sendall if self.ssl_enable else self.sock.sendall
siz = struct.pack(">I", 4 + len(data))
self.log.debug("siz=%d", (4 + len(data)))
writemeth(siz + data[:4])
writemeth(data[4:])
def write_splitsize(self, data):
writemeth = self.sock.sendall if self.ssl_enable else self.sock.sendall
siz = struct.pack(">I", 4 + len(data))
self.log.debug("siz=%d", (4 + len(data)))
writemeth(siz[:2])
writemeth(siz[2:])
writemeth(data)
def write_splitall(self, data):
writemeth = self.sock.sendall if self.ssl_enable else self.sock.sendall
siz = struct.pack(">I", 4 + len(data))
self.log.debug("siz=%d", (4 + len(data)))
writemeth(siz[:2])
writemeth(siz[2:])
writemeth(data[:4])
writemeth(data[4:])
def close(self):
self.sock.close()
self.sock = None
@staticmethod
def _gen_cltrid(doc):
if isinstance(doc, (EppLoginCommand, EppCreateCommand, EppUpdateCommand,
EppDeleteCommand, EppTransferCommand, EppRenewCommand)):
cmd_node = doc['epp']['command']
if not cmd_node.get('clTRID'):
cmd_node['clTRID'] = gen_trid()
def _get_ssl_protocol_version(self):
assert self.ssl_enable, "don't use it on non-SSL sockets"
assert self.sock._sslobj, "don't use it on non-SSL sockets"
import ctypes
import ctypes.util
size_pyobject_head = ctypes.sizeof(
ctypes.c_long) + ctypes.sizeof(ctypes.c_voidp)
# skip PySocketSockObject* and SSL_CTX*
real_ssl_offset = size_pyobject_head + ctypes.sizeof(ctypes.c_voidp) * 2
ssl_p = ctypes.c_voidp.from_address(id(self.sock._sslobj) + real_ssl_offset)
# libssl = ctypes.cdll.LoadLibrary('/usr/local/opt/openssl/lib/libssl.1.0.0.dylib')
libssl = ctypes.cdll.LoadLibrary(ctypes.util.find_library('ssl'))
if not libssl:
return None
libssl.SSL_get_version.restype = ctypes.c_char_p
libssl.SSL_get_version.argtypes = [ctypes.c_void_p]
ver = libssl.SSL_get_version(ssl_p)
return ver
| true | true |
f7320a3608ba6d9708669c62e3c8029585363f25 | 604 | py | Python | Twitter Streaming/spark_app.py | simranjeet97/PySpark_Practice | 7dfb77a5c3e1b632007a32b47ff921972e9ecf87 | [
"Apache-2.0"
] | null | null | null | Twitter Streaming/spark_app.py | simranjeet97/PySpark_Practice | 7dfb77a5c3e1b632007a32b47ff921972e9ecf87 | [
"Apache-2.0"
] | null | null | null | Twitter Streaming/spark_app.py | simranjeet97/PySpark_Practice | 7dfb77a5c3e1b632007a32b47ff921972e9ecf87 | [
"Apache-2.0"
] | null | null | null | import findspark
findspark.init()
from pyspark import SparkContext
from pyspark.streaming import StreamingContext
sc = SparkContext(appName="tweetStream")
# Create a local StreamingContext with batch interval of 1 second
ssc = StreamingContext(sc, 1)
# Create a DStream that conencts to hostname:port
lines = ssc.socketTextStream("127.0.0.1", 9009)
# Split Tweets
words = lines.flatMap(lambda s: s.lower().split("__end"))
# Print the first ten elements of each DStream RDD to the console
print(type(words))
words.saveAsTextFiles("data", ".txt")
# Wait for termination
ssc.awaitTermination()
| 27.454545 | 65 | 0.764901 | import findspark
findspark.init()
from pyspark import SparkContext
from pyspark.streaming import StreamingContext
sc = SparkContext(appName="tweetStream")
ssc = StreamingContext(sc, 1)
lines = ssc.socketTextStream("127.0.0.1", 9009)
words = lines.flatMap(lambda s: s.lower().split("__end"))
print(type(words))
words.saveAsTextFiles("data", ".txt")
ssc.awaitTermination()
| true | true |
f7320b011accc73b2cbaaaa931708ad6886faa27 | 263,215 | py | Python | ptracer/ptrace/_gen_defs_linux_64.py | fakeNetflix/pinterest-repo-ptracer | 9f9d11403ec50f5e26ed2e8c5633fbb54813415a | [
"Apache-2.0"
] | 147 | 2017-10-24T19:48:49.000Z | 2022-02-12T21:02:07.000Z | ptracer/ptrace/_gen_defs_linux_64.py | sthagen/ptracer | b5019bc977c7c16b0b2713242d017b0ae72b4948 | [
"Apache-2.0"
] | 4 | 2021-02-03T14:23:58.000Z | 2022-02-24T18:19:20.000Z | ptracer/ptrace/_gen_defs_linux_64.py | sthagen/ptracer | b5019bc977c7c16b0b2713242d017b0ae72b4948 | [
"Apache-2.0"
] | 20 | 2017-10-24T19:48:35.000Z | 2022-03-17T01:04:24.000Z | # Automatically generated from system headers.
# DO NOT EDIT.
import ctypes
from .syscalldef import CType, SysCallSig, SysCallParamSig
PTRACE_TRACEME = 0
PTRACE_PEEKTEXT = 1
PTRACE_PEEKDATA = 2
PTRACE_PEEKUSER = 3
PTRACE_POKETEXT = 4
PTRACE_POKEDATA = 5
PTRACE_POKEUSER = 6
PTRACE_CONT = 7
PTRACE_KILL = 8
PTRACE_SINGLESTEP = 9
PTRACE_GETREGS = 12
PTRACE_SETREGS = 13
PTRACE_GETFPREGS = 14
PTRACE_SETFPREGS = 15
PTRACE_ATTACH = 16
PTRACE_DETACH = 17
PTRACE_GETFPXREGS = 18
PTRACE_SETFPXREGS = 19
PTRACE_SYSCALL = 24
PTRACE_SETOPTIONS = 0x4200
PTRACE_GETEVENTMSG = 0x4201
PTRACE_GETSIGINFO = 0x4202
PTRACE_SETSIGINFO = 0x4203
PTRACE_GETREGSET = 0x4204
PTRACE_SETREGSET = 0x4205
PTRACE_SEIZE = 0x4206
PTRACE_INTERRUPT = 0x4207
PTRACE_LISTEN = 0x4208
PTRACE_PEEKSIGINFO = 0x4209
PTRACE_GETSIGMASK = 0x420a
PTRACE_SETSIGMASK = 0x420b
PTRACE_SECCOMP_GET_FILTER = 0x420c
PTRACE_SEIZE_DEVEL = 0x80000000
PTRACE_O_TRACESYSGOOD = 0x00000001
PTRACE_O_TRACEFORK = 0x00000002
PTRACE_O_TRACEVFORK = 0x00000004
PTRACE_O_TRACECLONE = 0x00000008
PTRACE_O_TRACEEXEC = 0x00000010
PTRACE_O_TRACEVFORKDONE = 0x00000020
PTRACE_O_TRACEEXIT = 0x00000040
PTRACE_O_TRACESECCOMP = 0x00000080
PTRACE_O_EXITKILL = 0x00100000
PTRACE_O_SUSPEND_SECCOMP = 0x00200000
PTRACE_O_MASK = 0x003000ff
PTRACE_EVENT_FORK = 1
PTRACE_EVENT_VFORK = 2
PTRACE_EVENT_CLONE = 3
PTRACE_EVENT_EXEC = 4
PTRACE_EVENT_VFORK_DONE = 5
PTRACE_EVENT_EXIT = 6
PTRACE_EVENT_SECCOMP = 7
PTRACE_PEEKSIGINFO_SHARED = 1 << 0
class __ptrace_peeksiginfo_args(ctypes.Structure):
_fields_ = (
('off', ctypes.c_ulong),
('flags', ctypes.c_uint),
('nr', ctypes.c_int),
)
class user_fpregs_struct(ctypes.Structure):
_fields_ = (
('cwd', ctypes.c_ushort),
('swd', ctypes.c_ushort),
('ftw', ctypes.c_ushort),
('fop', ctypes.c_ushort),
('rip', ctypes.c_ulonglong),
('rdp', ctypes.c_ulonglong),
('mxcsr', ctypes.c_uint),
('mxcr_mask', ctypes.c_uint),
('st_space', ctypes.c_uint * 32),
('xmm_space', ctypes.c_uint * 64),
('padding', ctypes.c_uint * 24),
)
class user_regs_struct(ctypes.Structure):
_fields_ = (
('r15', ctypes.c_ulonglong),
('r14', ctypes.c_ulonglong),
('r13', ctypes.c_ulonglong),
('r12', ctypes.c_ulonglong),
('rbp', ctypes.c_ulonglong),
('rbx', ctypes.c_ulonglong),
('r11', ctypes.c_ulonglong),
('r10', ctypes.c_ulonglong),
('r9', ctypes.c_ulonglong),
('r8', ctypes.c_ulonglong),
('rax', ctypes.c_ulonglong),
('rcx', ctypes.c_ulonglong),
('rdx', ctypes.c_ulonglong),
('rsi', ctypes.c_ulonglong),
('rdi', ctypes.c_ulonglong),
('orig_rax', ctypes.c_ulonglong),
('rip', ctypes.c_ulonglong),
('cs', ctypes.c_ulonglong),
('eflags', ctypes.c_ulonglong),
('rsp', ctypes.c_ulonglong),
('ss', ctypes.c_ulonglong),
('fs_base', ctypes.c_ulonglong),
('gs_base', ctypes.c_ulonglong),
('ds', ctypes.c_ulonglong),
('es', ctypes.c_ulonglong),
('fs', ctypes.c_ulonglong),
('gs', ctypes.c_ulonglong),
)
class _anon_2(ctypes.Structure):
_fields_ = (
('si_pid', ctypes.c_int),
('si_uid', ctypes.c_uint),
)
class _anon_3(ctypes.Structure):
_fields_ = (
('si_tid', ctypes.c_int),
('si_overrun', ctypes.c_int),
('si_sigval', ctypes.c_void_p),
)
class _anon_4(ctypes.Structure):
_fields_ = (
('si_pid', ctypes.c_int),
('si_uid', ctypes.c_uint),
('si_sigval', ctypes.c_void_p),
)
class _anon_5(ctypes.Structure):
_fields_ = (
('si_pid', ctypes.c_int),
('si_uid', ctypes.c_uint),
('si_status', ctypes.c_int),
('si_utime', ctypes.c_long),
('si_stime', ctypes.c_long),
)
class _anon_7(ctypes.Structure):
_fields_ = (
('_lower', ctypes.c_void_p),
('_upper', ctypes.c_void_p),
)
class _anon_6(ctypes.Structure):
_fields_ = (
('si_addr', ctypes.c_void_p),
('si_addr_lsb', ctypes.c_short),
('si_addr_bnd', _anon_7),
)
class _anon_8(ctypes.Structure):
_fields_ = (
('si_band', ctypes.c_long),
('si_fd', ctypes.c_int),
)
class _anon_9(ctypes.Structure):
_fields_ = (
('_call_addr', ctypes.c_void_p),
('_syscall', ctypes.c_int),
('_arch', ctypes.c_uint),
)
class _anon_1(ctypes.Union):
_fields_ = (
('_pad', ctypes.c_int * 28),
('_kill', _anon_2),
('_timer', _anon_3),
('_rt', _anon_4),
('_sigchld', _anon_5),
('_sigfault', _anon_6),
('_sigpoll', _anon_8),
('_sigsys', _anon_9),
)
class siginfo_t(ctypes.Structure):
_fields_ = (
('si_signo', ctypes.c_int),
('si_errno', ctypes.c_int),
('si_code', ctypes.c_int),
('_sifields', _anon_1),
)
SYSCALLS = {
'time': SysCallSig(
'time',
params=[
SysCallParamSig(
'tloc',
CType(
['time_t', '*'],
ctypes.c_long,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'stime': SysCallSig(
'stime',
params=[
SysCallParamSig(
'tptr',
CType(
['time_t', '*'],
ctypes.c_long,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'gettimeofday': SysCallSig(
'gettimeofday',
params=[
SysCallParamSig(
'tv',
CType(
['struct', 'timeval', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'tz',
CType(
['struct', 'timezone', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'settimeofday': SysCallSig(
'settimeofday',
params=[
SysCallParamSig(
'tv',
CType(
['struct', 'timeval', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'tz',
CType(
['struct', 'timezone', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'adjtimex': SysCallSig(
'adjtimex',
params=[
SysCallParamSig(
'txc_p',
CType(
['struct', 'timex', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'times': SysCallSig(
'times',
params=[
SysCallParamSig(
'tbuf',
CType(
['struct', 'tms', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'gettid': SysCallSig(
'gettid',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'nanosleep': SysCallSig(
'nanosleep',
params=[
SysCallParamSig(
'rqtp',
CType(
['struct', 'timespec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'rmtp',
CType(
['struct', 'timespec', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'alarm': SysCallSig(
'alarm',
params=[
SysCallParamSig(
'seconds',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getpid': SysCallSig(
'getpid',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getppid': SysCallSig(
'getppid',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getuid': SysCallSig(
'getuid',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'geteuid': SysCallSig(
'geteuid',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getgid': SysCallSig(
'getgid',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getegid': SysCallSig(
'getegid',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getresuid': SysCallSig(
'getresuid',
params=[
SysCallParamSig(
'ruid',
CType(
['uid_t', '*'],
ctypes.c_uint,
1
)
),
SysCallParamSig(
'euid',
CType(
['uid_t', '*'],
ctypes.c_uint,
1
)
),
SysCallParamSig(
'suid',
CType(
['uid_t', '*'],
ctypes.c_uint,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getresgid': SysCallSig(
'getresgid',
params=[
SysCallParamSig(
'rgid',
CType(
['gid_t', '*'],
ctypes.c_uint,
1
)
),
SysCallParamSig(
'egid',
CType(
['gid_t', '*'],
ctypes.c_uint,
1
)
),
SysCallParamSig(
'sgid',
CType(
['gid_t', '*'],
ctypes.c_uint,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getpgid': SysCallSig(
'getpgid',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getpgrp': SysCallSig(
'getpgrp',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getsid': SysCallSig(
'getsid',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getgroups': SysCallSig(
'getgroups',
params=[
SysCallParamSig(
'gidsetsize',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'grouplist',
CType(
['gid_t', '*'],
ctypes.c_uint,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'setregid': SysCallSig(
'setregid',
params=[
SysCallParamSig(
'rgid',
CType(
['gid_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'egid',
CType(
['gid_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'setgid': SysCallSig(
'setgid',
params=[
SysCallParamSig(
'gid',
CType(
['gid_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'setreuid': SysCallSig(
'setreuid',
params=[
SysCallParamSig(
'ruid',
CType(
['uid_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'euid',
CType(
['uid_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'setuid': SysCallSig(
'setuid',
params=[
SysCallParamSig(
'uid',
CType(
['uid_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'setresuid': SysCallSig(
'setresuid',
params=[
SysCallParamSig(
'ruid',
CType(
['uid_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'euid',
CType(
['uid_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'suid',
CType(
['uid_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'setresgid': SysCallSig(
'setresgid',
params=[
SysCallParamSig(
'rgid',
CType(
['gid_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'egid',
CType(
['gid_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'sgid',
CType(
['gid_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'setfsuid': SysCallSig(
'setfsuid',
params=[
SysCallParamSig(
'uid',
CType(
['uid_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'setfsgid': SysCallSig(
'setfsgid',
params=[
SysCallParamSig(
'gid',
CType(
['gid_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'setpgid': SysCallSig(
'setpgid',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'pgid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'setsid': SysCallSig(
'setsid',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'setgroups': SysCallSig(
'setgroups',
params=[
SysCallParamSig(
'gidsetsize',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'grouplist',
CType(
['gid_t', '*'],
ctypes.c_uint,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'acct': SysCallSig(
'acct',
params=[
SysCallParamSig(
'name',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'capget': SysCallSig(
'capget',
params=[
SysCallParamSig(
'header',
CType(
['cap_user_header_t'],
ctypes.c_long,
1
)
),
SysCallParamSig(
'dataptr',
CType(
['cap_user_data_t'],
ctypes.c_long,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'capset': SysCallSig(
'capset',
params=[
SysCallParamSig(
'header',
CType(
['cap_user_header_t'],
ctypes.c_long,
1
)
),
SysCallParamSig(
'data',
CType(
['const', 'cap_user_data_t'],
ctypes.c_long,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'personality': SysCallSig(
'personality',
params=[
SysCallParamSig(
'personality',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sigpending': SysCallSig(
'sigpending',
params=[
SysCallParamSig(
'set',
CType(
['old_sigset_t', '*'],
ctypes.c_ulong,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sigprocmask': SysCallSig(
'sigprocmask',
params=[
SysCallParamSig(
'how',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'set',
CType(
['old_sigset_t', '*'],
ctypes.c_ulong,
1
)
),
SysCallParamSig(
'oset',
CType(
['old_sigset_t', '*'],
ctypes.c_ulong,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sigaltstack': SysCallSig(
'sigaltstack',
params=[
SysCallParamSig(
'uss',
CType(
['const', 'struct', 'sigaltstack', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'uoss',
CType(
['struct', 'sigaltstack', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getitimer': SysCallSig(
'getitimer',
params=[
SysCallParamSig(
'which',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'value',
CType(
['struct', 'itimerval', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'setitimer': SysCallSig(
'setitimer',
params=[
SysCallParamSig(
'which',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'value',
CType(
['struct', 'itimerval', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'ovalue',
CType(
['struct', 'itimerval', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'timer_create': SysCallSig(
'timer_create',
params=[
SysCallParamSig(
'which_clock',
CType(
['clockid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'timer_event_spec',
CType(
['struct', 'sigevent', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'created_timer_id',
CType(
['timer_t', '*'],
ctypes.c_int,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'timer_gettime': SysCallSig(
'timer_gettime',
params=[
SysCallParamSig(
'timer_id',
CType(
['timer_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'setting',
CType(
['struct', 'itimerspec', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'timer_getoverrun': SysCallSig(
'timer_getoverrun',
params=[
SysCallParamSig(
'timer_id',
CType(
['timer_t'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'timer_settime': SysCallSig(
'timer_settime',
params=[
SysCallParamSig(
'timer_id',
CType(
['timer_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'new_setting',
CType(
['const', 'struct', 'itimerspec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'old_setting',
CType(
['struct', 'itimerspec', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'timer_delete': SysCallSig(
'timer_delete',
params=[
SysCallParamSig(
'timer_id',
CType(
['timer_t'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'clock_settime': SysCallSig(
'clock_settime',
params=[
SysCallParamSig(
'which_clock',
CType(
['clockid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'tp',
CType(
['const', 'struct', 'timespec', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'clock_gettime': SysCallSig(
'clock_gettime',
params=[
SysCallParamSig(
'which_clock',
CType(
['clockid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'tp',
CType(
['struct', 'timespec', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'clock_adjtime': SysCallSig(
'clock_adjtime',
params=[
SysCallParamSig(
'which_clock',
CType(
['clockid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'tx',
CType(
['struct', 'timex', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'clock_getres': SysCallSig(
'clock_getres',
params=[
SysCallParamSig(
'which_clock',
CType(
['clockid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'tp',
CType(
['struct', 'timespec', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'clock_nanosleep': SysCallSig(
'clock_nanosleep',
params=[
SysCallParamSig(
'which_clock',
CType(
['clockid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'rqtp',
CType(
['const', 'struct', 'timespec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'rmtp',
CType(
['struct', 'timespec', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'nice': SysCallSig(
'nice',
params=[
SysCallParamSig(
'increment',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sched_setscheduler': SysCallSig(
'sched_setscheduler',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'policy',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'param',
CType(
['struct', 'sched_param', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sched_setparam': SysCallSig(
'sched_setparam',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'param',
CType(
['struct', 'sched_param', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sched_setattr': SysCallSig(
'sched_setattr',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'attr',
CType(
['struct', 'sched_attr', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sched_getscheduler': SysCallSig(
'sched_getscheduler',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sched_getparam': SysCallSig(
'sched_getparam',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'param',
CType(
['struct', 'sched_param', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sched_getattr': SysCallSig(
'sched_getattr',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'attr',
CType(
['struct', 'sched_attr', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'size',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sched_setaffinity': SysCallSig(
'sched_setaffinity',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'len',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'user_mask_ptr',
CType(
['unsigned', 'long', '*'],
ctypes.c_ulong,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sched_getaffinity': SysCallSig(
'sched_getaffinity',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'len',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'user_mask_ptr',
CType(
['unsigned', 'long', '*'],
ctypes.c_ulong,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sched_yield': SysCallSig(
'sched_yield',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sched_get_priority_max': SysCallSig(
'sched_get_priority_max',
params=[
SysCallParamSig(
'policy',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sched_get_priority_min': SysCallSig(
'sched_get_priority_min',
params=[
SysCallParamSig(
'policy',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sched_rr_get_interval': SysCallSig(
'sched_rr_get_interval',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'interval',
CType(
['struct', 'timespec', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'setpriority': SysCallSig(
'setpriority',
params=[
SysCallParamSig(
'which',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'who',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'niceval',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getpriority': SysCallSig(
'getpriority',
params=[
SysCallParamSig(
'which',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'who',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'shutdown': SysCallSig(
'shutdown',
params=[
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'reboot': SysCallSig(
'reboot',
params=[
SysCallParamSig(
'magic1',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'magic2',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'cmd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'arg',
CType(
['void', '*'],
ctypes.c_long,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'restart_syscall': SysCallSig(
'restart_syscall',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'kexec_load': SysCallSig(
'kexec_load',
params=[
SysCallParamSig(
'entry',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'nr_segments',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'segments',
CType(
['struct', 'kexec_segment', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'kexec_file_load': SysCallSig(
'kexec_file_load',
params=[
SysCallParamSig(
'kernel_fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'initrd_fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'cmdline_len',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'cmdline_ptr',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'exit': SysCallSig(
'exit',
params=[
SysCallParamSig(
'error_code',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'exit_group': SysCallSig(
'exit_group',
params=[
SysCallParamSig(
'error_code',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'wait4': SysCallSig(
'wait4',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'stat_addr',
CType(
['int', '*'],
ctypes.c_int,
1
)
),
SysCallParamSig(
'options',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'ru',
CType(
['struct', 'rusage', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'waitid': SysCallSig(
'waitid',
params=[
SysCallParamSig(
'which',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'infop',
CType(
['struct', 'siginfo', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'options',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'ru',
CType(
['struct', 'rusage', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'waitpid': SysCallSig(
'waitpid',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'stat_addr',
CType(
['int', '*'],
ctypes.c_int,
1
)
),
SysCallParamSig(
'options',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'set_tid_address': SysCallSig(
'set_tid_address',
params=[
SysCallParamSig(
'tidptr',
CType(
['int', '*'],
ctypes.c_int,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'futex': SysCallSig(
'futex',
params=[
SysCallParamSig(
'uaddr',
CType(
['u32', '*'],
ctypes.c_uint,
1
)
),
SysCallParamSig(
'op',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'val',
CType(
['u32'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'utime',
CType(
['struct', 'timespec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'uaddr2',
CType(
['u32', '*'],
ctypes.c_uint,
1
)
),
SysCallParamSig(
'val3',
CType(
['u32'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'init_module': SysCallSig(
'init_module',
params=[
SysCallParamSig(
'umod',
CType(
['void', '*'],
ctypes.c_long,
1
)
),
SysCallParamSig(
'len',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'uargs',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'delete_module': SysCallSig(
'delete_module',
params=[
SysCallParamSig(
'name_user',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'rt_sigsuspend': SysCallSig(
'rt_sigsuspend',
params=[
SysCallParamSig(
'unewset',
CType(
['sigset_t', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'sigsetsize',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'rt_sigaction': SysCallSig(
'rt_sigaction',
params=[
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['const', 'struct', 'sigaction', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
None,
CType(
['struct', 'sigaction', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
None,
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'rt_sigprocmask': SysCallSig(
'rt_sigprocmask',
params=[
SysCallParamSig(
'how',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'set',
CType(
['sigset_t', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'oset',
CType(
['sigset_t', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'sigsetsize',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'rt_sigpending': SysCallSig(
'rt_sigpending',
params=[
SysCallParamSig(
'set',
CType(
['sigset_t', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'sigsetsize',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'rt_sigtimedwait': SysCallSig(
'rt_sigtimedwait',
params=[
SysCallParamSig(
'uthese',
CType(
['const', 'sigset_t', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'uinfo',
CType(
['siginfo_t', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'uts',
CType(
['const', 'struct', 'timespec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'sigsetsize',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'rt_tgsigqueueinfo': SysCallSig(
'rt_tgsigqueueinfo',
params=[
SysCallParamSig(
'tgid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'sig',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'uinfo',
CType(
['siginfo_t', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'kill': SysCallSig(
'kill',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'sig',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'tgkill': SysCallSig(
'tgkill',
params=[
SysCallParamSig(
'tgid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'sig',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'tkill': SysCallSig(
'tkill',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'sig',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'rt_sigqueueinfo': SysCallSig(
'rt_sigqueueinfo',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'sig',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'uinfo',
CType(
['siginfo_t', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sgetmask': SysCallSig(
'sgetmask',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'ssetmask': SysCallSig(
'ssetmask',
params=[
SysCallParamSig(
'newmask',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'signal': SysCallSig(
'signal',
params=[
SysCallParamSig(
'sig',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'handler',
CType(
['__sighandler_t'],
ctypes.c_long,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'pause': SysCallSig(
'pause',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sync': SysCallSig(
'sync',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fsync': SysCallSig(
'fsync',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fdatasync': SysCallSig(
'fdatasync',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'bdflush': SysCallSig(
'bdflush',
params=[
SysCallParamSig(
'func',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'data',
CType(
['long'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mount': SysCallSig(
'mount',
params=[
SysCallParamSig(
'dev_name',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'dir_name',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'type',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'data',
CType(
['void', '*'],
ctypes.c_long,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'umount': SysCallSig(
'umount',
params=[
SysCallParamSig(
'name',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'oldumount': SysCallSig(
'oldumount',
params=[
SysCallParamSig(
'name',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'truncate': SysCallSig(
'truncate',
params=[
SysCallParamSig(
'path',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'length',
CType(
['long'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'ftruncate': SysCallSig(
'ftruncate',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'length',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'stat': SysCallSig(
'stat',
params=[
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'statbuf',
CType(
['struct', '__old_kernel_stat', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'statfs': SysCallSig(
'statfs',
params=[
SysCallParamSig(
'path',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'buf',
CType(
['struct', 'statfs', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'statfs64': SysCallSig(
'statfs64',
params=[
SysCallParamSig(
'path',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'sz',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'buf',
CType(
['struct', 'statfs64', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fstatfs': SysCallSig(
'fstatfs',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'buf',
CType(
['struct', 'statfs', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fstatfs64': SysCallSig(
'fstatfs64',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'sz',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'buf',
CType(
['struct', 'statfs64', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'lstat': SysCallSig(
'lstat',
params=[
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'statbuf',
CType(
['struct', '__old_kernel_stat', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fstat': SysCallSig(
'fstat',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'statbuf',
CType(
['struct', '__old_kernel_stat', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'newstat': SysCallSig(
'newstat',
params=[
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'statbuf',
CType(
['struct', 'stat', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'newlstat': SysCallSig(
'newlstat',
params=[
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'statbuf',
CType(
['struct', 'stat', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'newfstat': SysCallSig(
'newfstat',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'statbuf',
CType(
['struct', 'stat', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'ustat': SysCallSig(
'ustat',
params=[
SysCallParamSig(
'dev',
CType(
['unsigned'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'ubuf',
CType(
['struct', 'ustat', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'setxattr': SysCallSig(
'setxattr',
params=[
SysCallParamSig(
'path',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'name',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'value',
CType(
['const', 'void', '*'],
ctypes.c_long,
1
)
),
SysCallParamSig(
'size',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'lsetxattr': SysCallSig(
'lsetxattr',
params=[
SysCallParamSig(
'path',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'name',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'value',
CType(
['const', 'void', '*'],
ctypes.c_long,
1
)
),
SysCallParamSig(
'size',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fsetxattr': SysCallSig(
'fsetxattr',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'name',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'value',
CType(
['const', 'void', '*'],
ctypes.c_long,
1
)
),
SysCallParamSig(
'size',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getxattr': SysCallSig(
'getxattr',
params=[
SysCallParamSig(
'path',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'name',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'value',
CType(
['void', '*'],
ctypes.c_long,
1
)
),
SysCallParamSig(
'size',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'lgetxattr': SysCallSig(
'lgetxattr',
params=[
SysCallParamSig(
'path',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'name',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'value',
CType(
['void', '*'],
ctypes.c_long,
1
)
),
SysCallParamSig(
'size',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fgetxattr': SysCallSig(
'fgetxattr',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'name',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'value',
CType(
['void', '*'],
ctypes.c_long,
1
)
),
SysCallParamSig(
'size',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'listxattr': SysCallSig(
'listxattr',
params=[
SysCallParamSig(
'path',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'list',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'size',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'llistxattr': SysCallSig(
'llistxattr',
params=[
SysCallParamSig(
'path',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'list',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'size',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'flistxattr': SysCallSig(
'flistxattr',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'list',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'size',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'removexattr': SysCallSig(
'removexattr',
params=[
SysCallParamSig(
'path',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'name',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'lremovexattr': SysCallSig(
'lremovexattr',
params=[
SysCallParamSig(
'path',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'name',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fremovexattr': SysCallSig(
'fremovexattr',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'name',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'brk': SysCallSig(
'brk',
params=[
SysCallParamSig(
'brk',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mprotect': SysCallSig(
'mprotect',
params=[
SysCallParamSig(
'start',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'len',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'prot',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mremap': SysCallSig(
'mremap',
params=[
SysCallParamSig(
'addr',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'old_len',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'new_len',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'new_addr',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'remap_file_pages': SysCallSig(
'remap_file_pages',
params=[
SysCallParamSig(
'start',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'size',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'prot',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'pgoff',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'msync': SysCallSig(
'msync',
params=[
SysCallParamSig(
'start',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'len',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fadvise64': SysCallSig(
'fadvise64',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'offset',
CType(
['loff_t'],
ctypes.c_longlong,
0
)
),
SysCallParamSig(
'len',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'advice',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fadvise64_64': SysCallSig(
'fadvise64_64',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'offset',
CType(
['loff_t'],
ctypes.c_longlong,
0
)
),
SysCallParamSig(
'len',
CType(
['loff_t'],
ctypes.c_longlong,
0
)
),
SysCallParamSig(
'advice',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'munmap': SysCallSig(
'munmap',
params=[
SysCallParamSig(
'addr',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'len',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mlock': SysCallSig(
'mlock',
params=[
SysCallParamSig(
'start',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'len',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'munlock': SysCallSig(
'munlock',
params=[
SysCallParamSig(
'start',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'len',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mlockall': SysCallSig(
'mlockall',
params=[
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'munlockall': SysCallSig(
'munlockall',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'madvise': SysCallSig(
'madvise',
params=[
SysCallParamSig(
'start',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'len',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'behavior',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mincore': SysCallSig(
'mincore',
params=[
SysCallParamSig(
'start',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'len',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'vec',
CType(
['unsigned', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'pivot_root': SysCallSig(
'pivot_root',
params=[
SysCallParamSig(
'new_root',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'put_old',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'chroot': SysCallSig(
'chroot',
params=[
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mknod': SysCallSig(
'mknod',
params=[
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'mode',
CType(
['umode_t'],
ctypes.c_ushort,
0
)
),
SysCallParamSig(
'dev',
CType(
['unsigned'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'link': SysCallSig(
'link',
params=[
SysCallParamSig(
'oldname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'newname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'symlink': SysCallSig(
'symlink',
params=[
SysCallParamSig(
'old',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'new',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'unlink': SysCallSig(
'unlink',
params=[
SysCallParamSig(
'pathname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'rename': SysCallSig(
'rename',
params=[
SysCallParamSig(
'oldname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'newname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'chmod': SysCallSig(
'chmod',
params=[
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'mode',
CType(
['umode_t'],
ctypes.c_ushort,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fchmod': SysCallSig(
'fchmod',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'mode',
CType(
['umode_t'],
ctypes.c_ushort,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fcntl': SysCallSig(
'fcntl',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'cmd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'arg',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'pipe': SysCallSig(
'pipe',
params=[
SysCallParamSig(
'fildes',
CType(
['int', '*'],
ctypes.c_int,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'pipe2': SysCallSig(
'pipe2',
params=[
SysCallParamSig(
'fildes',
CType(
['int', '*'],
ctypes.c_int,
1
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'dup': SysCallSig(
'dup',
params=[
SysCallParamSig(
'fildes',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'dup2': SysCallSig(
'dup2',
params=[
SysCallParamSig(
'oldfd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'newfd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'dup3': SysCallSig(
'dup3',
params=[
SysCallParamSig(
'oldfd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'newfd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'ioperm': SysCallSig(
'ioperm',
params=[
SysCallParamSig(
'from',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'num',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'on',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'ioctl': SysCallSig(
'ioctl',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'cmd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'arg',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'flock': SysCallSig(
'flock',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'cmd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'io_setup': SysCallSig(
'io_setup',
params=[
SysCallParamSig(
'nr_reqs',
CType(
['unsigned'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'ctx',
CType(
['aio_context_t', '*'],
ctypes.c_ulong,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'io_destroy': SysCallSig(
'io_destroy',
params=[
SysCallParamSig(
'ctx',
CType(
['aio_context_t'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'io_getevents': SysCallSig(
'io_getevents',
params=[
SysCallParamSig(
'ctx_id',
CType(
['aio_context_t'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'min_nr',
CType(
['long'],
ctypes.c_long,
0
)
),
SysCallParamSig(
'nr',
CType(
['long'],
ctypes.c_long,
0
)
),
SysCallParamSig(
'events',
CType(
['struct', 'io_event', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'timeout',
CType(
['struct', 'timespec', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'io_submit': SysCallSig(
'io_submit',
params=[
SysCallParamSig(
None,
CType(
['aio_context_t'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
None,
CType(
['long'],
ctypes.c_long,
0
)
),
SysCallParamSig(
'__foo',
CType(
['struct', 'iocb', '*', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'io_cancel': SysCallSig(
'io_cancel',
params=[
SysCallParamSig(
'ctx_id',
CType(
['aio_context_t'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'iocb',
CType(
['struct', 'iocb', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'result',
CType(
['struct', 'io_event', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sendfile': SysCallSig(
'sendfile',
params=[
SysCallParamSig(
'out_fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'in_fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'offset',
CType(
['off_t', '*'],
ctypes.c_longlong,
1
)
),
SysCallParamSig(
'count',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sendfile64': SysCallSig(
'sendfile64',
params=[
SysCallParamSig(
'out_fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'in_fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'offset',
CType(
['loff_t', '*'],
ctypes.c_longlong,
1
)
),
SysCallParamSig(
'count',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'readlink': SysCallSig(
'readlink',
params=[
SysCallParamSig(
'path',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'buf',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'bufsiz',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'creat': SysCallSig(
'creat',
params=[
SysCallParamSig(
'pathname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'mode',
CType(
['umode_t'],
ctypes.c_ushort,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'open': SysCallSig(
'open',
params=[
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'mode',
CType(
['umode_t'],
ctypes.c_ushort,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'close': SysCallSig(
'close',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'access': SysCallSig(
'access',
params=[
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'mode',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'vhangup': SysCallSig(
'vhangup',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'chown': SysCallSig(
'chown',
params=[
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'user',
CType(
['uid_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'group',
CType(
['gid_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'lchown': SysCallSig(
'lchown',
params=[
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'user',
CType(
['uid_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'group',
CType(
['gid_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fchown': SysCallSig(
'fchown',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'user',
CType(
['uid_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'group',
CType(
['gid_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'utime': SysCallSig(
'utime',
params=[
SysCallParamSig(
'filename',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'times',
CType(
['struct', 'utimbuf', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'utimes': SysCallSig(
'utimes',
params=[
SysCallParamSig(
'filename',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'utimes',
CType(
['struct', 'timeval', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'lseek': SysCallSig(
'lseek',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'offset',
CType(
['off_t'],
ctypes.c_longlong,
0
)
),
SysCallParamSig(
'whence',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'llseek': SysCallSig(
'llseek',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'offset_high',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'offset_low',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'result',
CType(
['loff_t', '*'],
ctypes.c_longlong,
1
)
),
SysCallParamSig(
'whence',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'read': SysCallSig(
'read',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'buf',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'count',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'readahead': SysCallSig(
'readahead',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'offset',
CType(
['loff_t'],
ctypes.c_longlong,
0
)
),
SysCallParamSig(
'count',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'readv': SysCallSig(
'readv',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'vec',
CType(
['const', 'struct', 'iovec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'vlen',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'write': SysCallSig(
'write',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'buf',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'count',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'writev': SysCallSig(
'writev',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'vec',
CType(
['const', 'struct', 'iovec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'vlen',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'pread64': SysCallSig(
'pread64',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'buf',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'count',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'pos',
CType(
['loff_t'],
ctypes.c_longlong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'pwrite64': SysCallSig(
'pwrite64',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'buf',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'count',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'pos',
CType(
['loff_t'],
ctypes.c_longlong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'preadv': SysCallSig(
'preadv',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'vec',
CType(
['const', 'struct', 'iovec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'vlen',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'pos_l',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'pos_h',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'preadv2': SysCallSig(
'preadv2',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'vec',
CType(
['const', 'struct', 'iovec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'vlen',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'pos_l',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'pos_h',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'pwritev': SysCallSig(
'pwritev',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'vec',
CType(
['const', 'struct', 'iovec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'vlen',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'pos_l',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'pos_h',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'pwritev2': SysCallSig(
'pwritev2',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'vec',
CType(
['const', 'struct', 'iovec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'vlen',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'pos_l',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'pos_h',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getcwd': SysCallSig(
'getcwd',
params=[
SysCallParamSig(
'buf',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'size',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mkdir': SysCallSig(
'mkdir',
params=[
SysCallParamSig(
'pathname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'mode',
CType(
['umode_t'],
ctypes.c_ushort,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'chdir': SysCallSig(
'chdir',
params=[
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fchdir': SysCallSig(
'fchdir',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'rmdir': SysCallSig(
'rmdir',
params=[
SysCallParamSig(
'pathname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'lookup_dcookie': SysCallSig(
'lookup_dcookie',
params=[
SysCallParamSig(
'cookie64',
CType(
['u64'],
ctypes.c_ulonglong,
0
)
),
SysCallParamSig(
'buf',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'len',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'quotactl': SysCallSig(
'quotactl',
params=[
SysCallParamSig(
'cmd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'special',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'id',
CType(
['qid_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'addr',
CType(
['void', '*'],
ctypes.c_long,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getdents': SysCallSig(
'getdents',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'dirent',
CType(
['struct', 'linux_dirent', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'count',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getdents64': SysCallSig(
'getdents64',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'dirent',
CType(
['struct', 'linux_dirent64', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'count',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'setsockopt': SysCallSig(
'setsockopt',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'level',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'optname',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'optval',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'optlen',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getsockopt': SysCallSig(
'getsockopt',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'level',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'optname',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'optval',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'optlen',
CType(
['int', '*'],
ctypes.c_int,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'bind': SysCallSig(
'bind',
params=[
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['struct', 'sockaddr', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'connect': SysCallSig(
'connect',
params=[
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['struct', 'sockaddr', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'accept': SysCallSig(
'accept',
params=[
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['struct', 'sockaddr', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
None,
CType(
['int', '*'],
ctypes.c_int,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'accept4': SysCallSig(
'accept4',
params=[
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['struct', 'sockaddr', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
None,
CType(
['int', '*'],
ctypes.c_int,
1
)
),
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getsockname': SysCallSig(
'getsockname',
params=[
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['struct', 'sockaddr', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
None,
CType(
['int', '*'],
ctypes.c_int,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getpeername': SysCallSig(
'getpeername',
params=[
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['struct', 'sockaddr', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
None,
CType(
['int', '*'],
ctypes.c_int,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'send': SysCallSig(
'send',
params=[
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['void', '*'],
ctypes.c_long,
1
)
),
SysCallParamSig(
None,
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
None,
CType(
['unsigned'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sendto': SysCallSig(
'sendto',
params=[
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['void', '*'],
ctypes.c_long,
1
)
),
SysCallParamSig(
None,
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
None,
CType(
['unsigned'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
None,
CType(
['struct', 'sockaddr', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sendmsg': SysCallSig(
'sendmsg',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'msg',
CType(
['struct', 'user_msghdr', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sendmmsg': SysCallSig(
'sendmmsg',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'msg',
CType(
['struct', 'mmsghdr', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'vlen',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'recv': SysCallSig(
'recv',
params=[
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['void', '*'],
ctypes.c_long,
1
)
),
SysCallParamSig(
None,
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
None,
CType(
['unsigned'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'recvfrom': SysCallSig(
'recvfrom',
params=[
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['void', '*'],
ctypes.c_long,
1
)
),
SysCallParamSig(
None,
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
None,
CType(
['unsigned'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
None,
CType(
['struct', 'sockaddr', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
None,
CType(
['int', '*'],
ctypes.c_int,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'recvmsg': SysCallSig(
'recvmsg',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'msg',
CType(
['struct', 'user_msghdr', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'recvmmsg': SysCallSig(
'recvmmsg',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'msg',
CType(
['struct', 'mmsghdr', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'vlen',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'timeout',
CType(
['struct', 'timespec', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'socket': SysCallSig(
'socket',
params=[
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'socketpair': SysCallSig(
'socketpair',
params=[
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['int', '*'],
ctypes.c_int,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'socketcall': SysCallSig(
'socketcall',
params=[
SysCallParamSig(
'call',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'args',
CType(
['unsigned', 'long', '*'],
ctypes.c_ulong,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'listen': SysCallSig(
'listen',
params=[
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'poll': SysCallSig(
'poll',
params=[
SysCallParamSig(
'ufds',
CType(
['struct', 'pollfd', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'nfds',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'timeout',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'select': SysCallSig(
'select',
params=[
SysCallParamSig(
'n',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'inp',
CType(
['fd_set', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'outp',
CType(
['fd_set', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'exp',
CType(
['fd_set', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'tvp',
CType(
['struct', 'timeval', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'old_select': SysCallSig(
'old_select',
params=[
SysCallParamSig(
'arg',
CType(
['struct', 'sel_arg_struct', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'epoll_create': SysCallSig(
'epoll_create',
params=[
SysCallParamSig(
'size',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'epoll_create1': SysCallSig(
'epoll_create1',
params=[
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'epoll_ctl': SysCallSig(
'epoll_ctl',
params=[
SysCallParamSig(
'epfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'op',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'event',
CType(
['struct', 'epoll_event', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'epoll_wait': SysCallSig(
'epoll_wait',
params=[
SysCallParamSig(
'epfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'events',
CType(
['struct', 'epoll_event', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'maxevents',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'timeout',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'epoll_pwait': SysCallSig(
'epoll_pwait',
params=[
SysCallParamSig(
'epfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'events',
CType(
['struct', 'epoll_event', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'maxevents',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'timeout',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'sigmask',
CType(
['const', 'sigset_t', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'sigsetsize',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'gethostname': SysCallSig(
'gethostname',
params=[
SysCallParamSig(
'name',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'len',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sethostname': SysCallSig(
'sethostname',
params=[
SysCallParamSig(
'name',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'len',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'setdomainname': SysCallSig(
'setdomainname',
params=[
SysCallParamSig(
'name',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'len',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'newuname': SysCallSig(
'newuname',
params=[
SysCallParamSig(
'name',
CType(
['struct', 'new_utsname', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'uname': SysCallSig(
'uname',
params=[
SysCallParamSig(
None,
CType(
['struct', 'old_utsname', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'olduname': SysCallSig(
'olduname',
params=[
SysCallParamSig(
None,
CType(
['struct', 'oldold_utsname', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getrlimit': SysCallSig(
'getrlimit',
params=[
SysCallParamSig(
'resource',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'rlim',
CType(
['struct', 'rlimit', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'setrlimit': SysCallSig(
'setrlimit',
params=[
SysCallParamSig(
'resource',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'rlim',
CType(
['struct', 'rlimit', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'prlimit64': SysCallSig(
'prlimit64',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'resource',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'new_rlim',
CType(
['const', 'struct', 'rlimit64', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'old_rlim',
CType(
['struct', 'rlimit64', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getrusage': SysCallSig(
'getrusage',
params=[
SysCallParamSig(
'who',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'ru',
CType(
['struct', 'rusage', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'umask': SysCallSig(
'umask',
params=[
SysCallParamSig(
'mask',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'msgget': SysCallSig(
'msgget',
params=[
SysCallParamSig(
'key',
CType(
['key_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'msgflg',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'msgsnd': SysCallSig(
'msgsnd',
params=[
SysCallParamSig(
'msqid',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'msgp',
CType(
['struct', 'msgbuf', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'msgsz',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'msgflg',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'msgrcv': SysCallSig(
'msgrcv',
params=[
SysCallParamSig(
'msqid',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'msgp',
CType(
['struct', 'msgbuf', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'msgsz',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'msgtyp',
CType(
['long'],
ctypes.c_long,
0
)
),
SysCallParamSig(
'msgflg',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'msgctl': SysCallSig(
'msgctl',
params=[
SysCallParamSig(
'msqid',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'cmd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'buf',
CType(
['struct', 'msqid_ds', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'semget': SysCallSig(
'semget',
params=[
SysCallParamSig(
'key',
CType(
['key_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'nsems',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'semflg',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'semop': SysCallSig(
'semop',
params=[
SysCallParamSig(
'semid',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'sops',
CType(
['struct', 'sembuf', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'nsops',
CType(
['unsigned'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'semctl': SysCallSig(
'semctl',
params=[
SysCallParamSig(
'semid',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'semnum',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'cmd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'arg',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'semtimedop': SysCallSig(
'semtimedop',
params=[
SysCallParamSig(
'semid',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'sops',
CType(
['struct', 'sembuf', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'nsops',
CType(
['unsigned'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'timeout',
CType(
['const', 'struct', 'timespec', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'shmat': SysCallSig(
'shmat',
params=[
SysCallParamSig(
'shmid',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'shmaddr',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'shmflg',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'shmget': SysCallSig(
'shmget',
params=[
SysCallParamSig(
'key',
CType(
['key_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'size',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'flag',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'shmdt': SysCallSig(
'shmdt',
params=[
SysCallParamSig(
'shmaddr',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'shmctl': SysCallSig(
'shmctl',
params=[
SysCallParamSig(
'shmid',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'cmd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'buf',
CType(
['struct', 'shmid_ds', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'ipc': SysCallSig(
'ipc',
params=[
SysCallParamSig(
'call',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'first',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'second',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'third',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'ptr',
CType(
['void', '*'],
ctypes.c_long,
1
)
),
SysCallParamSig(
'fifth',
CType(
['long'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mq_open': SysCallSig(
'mq_open',
params=[
SysCallParamSig(
'name',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'oflag',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'mode',
CType(
['umode_t'],
ctypes.c_ushort,
0
)
),
SysCallParamSig(
'attr',
CType(
['struct', 'mq_attr', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mq_unlink': SysCallSig(
'mq_unlink',
params=[
SysCallParamSig(
'name',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mq_timedsend': SysCallSig(
'mq_timedsend',
params=[
SysCallParamSig(
'mqdes',
CType(
['mqd_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'msg_ptr',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'msg_len',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'msg_prio',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'abs_timeout',
CType(
['const', 'struct', 'timespec', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mq_timedreceive': SysCallSig(
'mq_timedreceive',
params=[
SysCallParamSig(
'mqdes',
CType(
['mqd_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'msg_ptr',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'msg_len',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'msg_prio',
CType(
['unsigned', 'int', '*'],
ctypes.c_uint,
1
)
),
SysCallParamSig(
'abs_timeout',
CType(
['const', 'struct', 'timespec', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mq_notify': SysCallSig(
'mq_notify',
params=[
SysCallParamSig(
'mqdes',
CType(
['mqd_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'notification',
CType(
['const', 'struct', 'sigevent', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mq_getsetattr': SysCallSig(
'mq_getsetattr',
params=[
SysCallParamSig(
'mqdes',
CType(
['mqd_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'mqstat',
CType(
['const', 'struct', 'mq_attr', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'omqstat',
CType(
['struct', 'mq_attr', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'pciconfig_iobase': SysCallSig(
'pciconfig_iobase',
params=[
SysCallParamSig(
'which',
CType(
['long'],
ctypes.c_long,
0
)
),
SysCallParamSig(
'bus',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'devfn',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'pciconfig_read': SysCallSig(
'pciconfig_read',
params=[
SysCallParamSig(
'bus',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'dfn',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'off',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'len',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'buf',
CType(
['void', '*'],
ctypes.c_long,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'pciconfig_write': SysCallSig(
'pciconfig_write',
params=[
SysCallParamSig(
'bus',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'dfn',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'off',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'len',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'buf',
CType(
['void', '*'],
ctypes.c_long,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'prctl': SysCallSig(
'prctl',
params=[
SysCallParamSig(
'option',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'arg2',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'arg3',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'arg4',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'arg5',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'swapon': SysCallSig(
'swapon',
params=[
SysCallParamSig(
'specialfile',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'swap_flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'swapoff': SysCallSig(
'swapoff',
params=[
SysCallParamSig(
'specialfile',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sysctl': SysCallSig(
'sysctl',
params=[
SysCallParamSig(
'args',
CType(
['struct', '__sysctl_args', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sysinfo': SysCallSig(
'sysinfo',
params=[
SysCallParamSig(
'info',
CType(
['struct', 'sysinfo', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sysfs': SysCallSig(
'sysfs',
params=[
SysCallParamSig(
'option',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'arg1',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'arg2',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'syslog': SysCallSig(
'syslog',
params=[
SysCallParamSig(
'type',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'buf',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'len',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'uselib': SysCallSig(
'uselib',
params=[
SysCallParamSig(
'library',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'ni_syscall': SysCallSig(
'ni_syscall',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'ptrace': SysCallSig(
'ptrace',
params=[
SysCallParamSig(
'request',
CType(
['long'],
ctypes.c_long,
0
)
),
SysCallParamSig(
'pid',
CType(
['long'],
ctypes.c_long,
0
)
),
SysCallParamSig(
'addr',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'data',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'add_key': SysCallSig(
'add_key',
params=[
SysCallParamSig(
'_type',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'_description',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'_payload',
CType(
['const', 'void', '*'],
ctypes.c_long,
1
)
),
SysCallParamSig(
'plen',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'destringid',
CType(
['key_serial_t'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'request_key': SysCallSig(
'request_key',
params=[
SysCallParamSig(
'_type',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'_description',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'_callout_info',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'destringid',
CType(
['key_serial_t'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'keyctl': SysCallSig(
'keyctl',
params=[
SysCallParamSig(
'cmd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'arg2',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'arg3',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'arg4',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'arg5',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'ioprio_set': SysCallSig(
'ioprio_set',
params=[
SysCallParamSig(
'which',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'who',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'ioprio',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'ioprio_get': SysCallSig(
'ioprio_get',
params=[
SysCallParamSig(
'which',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'who',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'set_mempolicy': SysCallSig(
'set_mempolicy',
params=[
SysCallParamSig(
'mode',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'nmask',
CType(
['const', 'unsigned', 'long', '*'],
ctypes.c_ulong,
1
)
),
SysCallParamSig(
'maxnode',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'migrate_pages': SysCallSig(
'migrate_pages',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'maxnode',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'from',
CType(
['const', 'unsigned', 'long', '*'],
ctypes.c_ulong,
1
)
),
SysCallParamSig(
'to',
CType(
['const', 'unsigned', 'long', '*'],
ctypes.c_ulong,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'move_pages': SysCallSig(
'move_pages',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'nr_pages',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'pages',
CType(
['const', 'void', '*', '*'],
ctypes.c_long,
2
)
),
SysCallParamSig(
'nodes',
CType(
['const', 'int', '*'],
ctypes.c_int,
1
)
),
SysCallParamSig(
'status',
CType(
['int', '*'],
ctypes.c_int,
1
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mbind': SysCallSig(
'mbind',
params=[
SysCallParamSig(
'start',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'len',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'mode',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'nmask',
CType(
['const', 'unsigned', 'long', '*'],
ctypes.c_ulong,
1
)
),
SysCallParamSig(
'maxnode',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'get_mempolicy': SysCallSig(
'get_mempolicy',
params=[
SysCallParamSig(
'policy',
CType(
['int', '*'],
ctypes.c_int,
1
)
),
SysCallParamSig(
'nmask',
CType(
['unsigned', 'long', '*'],
ctypes.c_ulong,
1
)
),
SysCallParamSig(
'maxnode',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'addr',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'inotify_init': SysCallSig(
'inotify_init',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'inotify_init1': SysCallSig(
'inotify_init1',
params=[
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'inotify_add_watch': SysCallSig(
'inotify_add_watch',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'path',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'mask',
CType(
['u32'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'inotify_rm_watch': SysCallSig(
'inotify_rm_watch',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'wd',
CType(
['__s32'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'spu_run': SysCallSig(
'spu_run',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'unpc',
CType(
['__u32', '*'],
ctypes.c_uint,
1
)
),
SysCallParamSig(
'ustatus',
CType(
['__u32', '*'],
ctypes.c_uint,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'spu_create': SysCallSig(
'spu_create',
params=[
SysCallParamSig(
'name',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'mode',
CType(
['umode_t'],
ctypes.c_ushort,
0
)
),
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mknodat': SysCallSig(
'mknodat',
params=[
SysCallParamSig(
'dfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'mode',
CType(
['umode_t'],
ctypes.c_ushort,
0
)
),
SysCallParamSig(
'dev',
CType(
['unsigned'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mkdirat': SysCallSig(
'mkdirat',
params=[
SysCallParamSig(
'dfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'pathname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'mode',
CType(
['umode_t'],
ctypes.c_ushort,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'unlinkat': SysCallSig(
'unlinkat',
params=[
SysCallParamSig(
'dfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'pathname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'flag',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'symlinkat': SysCallSig(
'symlinkat',
params=[
SysCallParamSig(
'oldname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'newdfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'newname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'linkat': SysCallSig(
'linkat',
params=[
SysCallParamSig(
'olddfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'oldname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'newdfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'newname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'renameat': SysCallSig(
'renameat',
params=[
SysCallParamSig(
'olddfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'oldname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'newdfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'newname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'renameat2': SysCallSig(
'renameat2',
params=[
SysCallParamSig(
'olddfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'oldname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'newdfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'newname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'futimesat': SysCallSig(
'futimesat',
params=[
SysCallParamSig(
'dfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'utimes',
CType(
['struct', 'timeval', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'faccessat': SysCallSig(
'faccessat',
params=[
SysCallParamSig(
'dfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'mode',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fchmodat': SysCallSig(
'fchmodat',
params=[
SysCallParamSig(
'dfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'mode',
CType(
['umode_t'],
ctypes.c_ushort,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fchownat': SysCallSig(
'fchownat',
params=[
SysCallParamSig(
'dfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'user',
CType(
['uid_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'group',
CType(
['gid_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'flag',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'openat': SysCallSig(
'openat',
params=[
SysCallParamSig(
'dfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'mode',
CType(
['umode_t'],
ctypes.c_ushort,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'newfstatat': SysCallSig(
'newfstatat',
params=[
SysCallParamSig(
'dfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'statbuf',
CType(
['struct', 'stat', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'flag',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'readlinkat': SysCallSig(
'readlinkat',
params=[
SysCallParamSig(
'dfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'path',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'buf',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'bufsiz',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'utimensat': SysCallSig(
'utimensat',
params=[
SysCallParamSig(
'dfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'utimes',
CType(
['struct', 'timespec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'unshare': SysCallSig(
'unshare',
params=[
SysCallParamSig(
'unshare_flags',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'splice': SysCallSig(
'splice',
params=[
SysCallParamSig(
'fd_in',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'off_in',
CType(
['loff_t', '*'],
ctypes.c_longlong,
1
)
),
SysCallParamSig(
'fd_out',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'off_out',
CType(
['loff_t', '*'],
ctypes.c_longlong,
1
)
),
SysCallParamSig(
'len',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'vmsplice': SysCallSig(
'vmsplice',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'iov',
CType(
['const', 'struct', 'iovec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'nr_segs',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'tee': SysCallSig(
'tee',
params=[
SysCallParamSig(
'fdin',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'fdout',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'len',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sync_file_range': SysCallSig(
'sync_file_range',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'offset',
CType(
['loff_t'],
ctypes.c_longlong,
0
)
),
SysCallParamSig(
'nbytes',
CType(
['loff_t'],
ctypes.c_longlong,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sync_file_range2': SysCallSig(
'sync_file_range2',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'offset',
CType(
['loff_t'],
ctypes.c_longlong,
0
)
),
SysCallParamSig(
'nbytes',
CType(
['loff_t'],
ctypes.c_longlong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'get_robust_list': SysCallSig(
'get_robust_list',
params=[
SysCallParamSig(
'pid',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'head_ptr',
CType(
['struct', 'robust_list_head', '*', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'len_ptr',
CType(
['size_t', '*'],
ctypes.c_uint,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'set_robust_list': SysCallSig(
'set_robust_list',
params=[
SysCallParamSig(
'head',
CType(
['struct', 'robust_list_head', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'len',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getcpu': SysCallSig(
'getcpu',
params=[
SysCallParamSig(
'cpu',
CType(
['unsigned', '*'],
ctypes.c_uint,
1
)
),
SysCallParamSig(
'node',
CType(
['unsigned', '*'],
ctypes.c_uint,
1
)
),
SysCallParamSig(
'cache',
CType(
['struct', 'getcpu_cache', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'signalfd': SysCallSig(
'signalfd',
params=[
SysCallParamSig(
'ufd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'user_mask',
CType(
['sigset_t', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'sizemask',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'signalfd4': SysCallSig(
'signalfd4',
params=[
SysCallParamSig(
'ufd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'user_mask',
CType(
['sigset_t', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'sizemask',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'timerfd_create': SysCallSig(
'timerfd_create',
params=[
SysCallParamSig(
'clockid',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'timerfd_settime': SysCallSig(
'timerfd_settime',
params=[
SysCallParamSig(
'ufd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'utmr',
CType(
['const', 'struct', 'itimerspec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'otmr',
CType(
['struct', 'itimerspec', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'timerfd_gettime': SysCallSig(
'timerfd_gettime',
params=[
SysCallParamSig(
'ufd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'otmr',
CType(
['struct', 'itimerspec', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'eventfd': SysCallSig(
'eventfd',
params=[
SysCallParamSig(
'count',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'eventfd2': SysCallSig(
'eventfd2',
params=[
SysCallParamSig(
'count',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'memfd_create': SysCallSig(
'memfd_create',
params=[
SysCallParamSig(
'uname_ptr',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'userfaultfd': SysCallSig(
'userfaultfd',
params=[
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fallocate': SysCallSig(
'fallocate',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'mode',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'offset',
CType(
['loff_t'],
ctypes.c_longlong,
0
)
),
SysCallParamSig(
'len',
CType(
['loff_t'],
ctypes.c_longlong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'old_readdir': SysCallSig(
'old_readdir',
params=[
SysCallParamSig(
None,
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
None,
CType(
['struct', 'old_linux_dirent', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
None,
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'pselect6': SysCallSig(
'pselect6',
params=[
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['fd_set', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
None,
CType(
['fd_set', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
None,
CType(
['fd_set', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
None,
CType(
['struct', 'timespec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
None,
CType(
['void', '*'],
ctypes.c_long,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'ppoll': SysCallSig(
'ppoll',
params=[
SysCallParamSig(
None,
CType(
['struct', 'pollfd', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
None,
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
None,
CType(
['struct', 'timespec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
None,
CType(
['const', 'sigset_t', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
None,
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fanotify_init': SysCallSig(
'fanotify_init',
params=[
SysCallParamSig(
'flags',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'event_f_flags',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fanotify_mark': SysCallSig(
'fanotify_mark',
params=[
SysCallParamSig(
'fanotify_fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'mask',
CType(
['u64'],
ctypes.c_ulonglong,
0
)
),
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'pathname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'syncfs': SysCallSig(
'syncfs',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fork': SysCallSig(
'fork',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'vfork': SysCallSig(
'vfork',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'clone': SysCallSig(
'clone',
params=[
SysCallParamSig(
None,
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
None,
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
None,
CType(
['int', '*'],
ctypes.c_int,
1
)
),
SysCallParamSig(
None,
CType(
['int', '*'],
ctypes.c_int,
1
)
),
SysCallParamSig(
None,
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'execve': SysCallSig(
'execve',
params=[
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'argv',
CType(
['const', 'const', 'char', '*', '*'],
ctypes.c_char,
2
)
),
SysCallParamSig(
'envp',
CType(
['const', 'const', 'char', '*', '*'],
ctypes.c_char,
2
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'perf_event_open': SysCallSig(
'perf_event_open',
params=[
SysCallParamSig(
'attr_uptr',
CType(
['struct', 'perf_event_attr', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'cpu',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'group_fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mmap_pgoff': SysCallSig(
'mmap_pgoff',
params=[
SysCallParamSig(
'addr',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'len',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'prot',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'fd',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'pgoff',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'old_mmap': SysCallSig(
'old_mmap',
params=[
SysCallParamSig(
'arg',
CType(
['struct', 'mmap_arg_struct', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'name_to_handle_at': SysCallSig(
'name_to_handle_at',
params=[
SysCallParamSig(
'dfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'name',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'handle',
CType(
['struct', 'file_handle', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'mnt_id',
CType(
['int', '*'],
ctypes.c_int,
1
)
),
SysCallParamSig(
'flag',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'open_by_handle_at': SysCallSig(
'open_by_handle_at',
params=[
SysCallParamSig(
'mountdirfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'handle',
CType(
['struct', 'file_handle', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'setns': SysCallSig(
'setns',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'nstype',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'process_vm_readv': SysCallSig(
'process_vm_readv',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'lvec',
CType(
['const', 'struct', 'iovec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'liovcnt',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'rvec',
CType(
['const', 'struct', 'iovec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'riovcnt',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'process_vm_writev': SysCallSig(
'process_vm_writev',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'lvec',
CType(
['const', 'struct', 'iovec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'liovcnt',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'rvec',
CType(
['const', 'struct', 'iovec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'riovcnt',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'kcmp': SysCallSig(
'kcmp',
params=[
SysCallParamSig(
'pid1',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'pid2',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'type',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'idx1',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'idx2',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'finit_module': SysCallSig(
'finit_module',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'uargs',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'seccomp': SysCallSig(
'seccomp',
params=[
SysCallParamSig(
'op',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'uargs',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getrandom': SysCallSig(
'getrandom',
params=[
SysCallParamSig(
'buf',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'count',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'bpf': SysCallSig(
'bpf',
params=[
SysCallParamSig(
'cmd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'attr',
CType(
['union', 'bpf_attr', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'size',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'execveat': SysCallSig(
'execveat',
params=[
SysCallParamSig(
'dfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'argv',
CType(
['const', 'const', 'char', '*', '*'],
ctypes.c_char,
2
)
),
SysCallParamSig(
'envp',
CType(
['const', 'const', 'char', '*', '*'],
ctypes.c_char,
2
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'membarrier': SysCallSig(
'membarrier',
params=[
SysCallParamSig(
'cmd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'copy_file_range': SysCallSig(
'copy_file_range',
params=[
SysCallParamSig(
'fd_in',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'off_in',
CType(
['loff_t', '*'],
ctypes.c_longlong,
1
)
),
SysCallParamSig(
'fd_out',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'off_out',
CType(
['loff_t', '*'],
ctypes.c_longlong,
1
)
),
SysCallParamSig(
'len',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mlock2': SysCallSig(
'mlock2',
params=[
SysCallParamSig(
'start',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'len',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'pkey_mprotect': SysCallSig(
'pkey_mprotect',
params=[
SysCallParamSig(
'start',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'len',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'prot',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'pkey',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'pkey_alloc': SysCallSig(
'pkey_alloc',
params=[
SysCallParamSig(
'flags',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'init_val',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'pkey_free': SysCallSig(
'pkey_free',
params=[
SysCallParamSig(
'pkey',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'statx': SysCallSig(
'statx',
params=[
SysCallParamSig(
'dfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'path',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'flags',
CType(
['unsigned'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'mask',
CType(
['unsigned'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'buffer',
CType(
['struct', 'statx', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'ioperm': SysCallSig(
'ioperm',
params=[
SysCallParamSig(
None,
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
None,
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'iopl': SysCallSig(
'iopl',
params=[
SysCallParamSig(
None,
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'modify_ldt': SysCallSig(
'modify_ldt',
params=[
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['void', '*'],
ctypes.c_long,
1
)
),
SysCallParamSig(
None,
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['int'], ctypes.c_int, 0)
),
'rt_sigreturn': SysCallSig(
'rt_sigreturn',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'set_thread_area': SysCallSig(
'set_thread_area',
params=[
SysCallParamSig(
None,
CType(
['struct', 'user_desc', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'get_thread_area': SysCallSig(
'get_thread_area',
params=[
SysCallParamSig(
None,
CType(
['struct', 'user_desc', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'arch_prctl': SysCallSig(
'arch_prctl',
params=[
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mmap': SysCallSig(
'mmap',
params=[
SysCallParamSig(
None,
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
None,
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
None,
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
None,
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
None,
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
None,
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
}
SYSCALL_NUMBERS = {
0: 'read',
1: 'write',
2: 'open',
3: 'close',
4: 'stat',
5: 'fstat',
6: 'lstat',
7: 'poll',
8: 'lseek',
9: 'mmap',
10: 'mprotect',
11: 'munmap',
12: 'brk',
13: 'rt_sigaction',
14: 'rt_sigprocmask',
15: 'rt_sigreturn',
16: 'ioctl',
17: 'pread64',
18: 'pwrite64',
19: 'readv',
20: 'writev',
21: 'access',
22: 'pipe',
23: 'select',
24: 'sched_yield',
25: 'mremap',
26: 'msync',
27: 'mincore',
28: 'madvise',
29: 'shmget',
30: 'shmat',
31: 'shmctl',
32: 'dup',
33: 'dup2',
34: 'pause',
35: 'nanosleep',
36: 'getitimer',
37: 'alarm',
38: 'setitimer',
39: 'getpid',
40: 'sendfile',
41: 'socket',
42: 'connect',
43: 'accept',
44: 'sendto',
45: 'recvfrom',
46: 'sendmsg',
47: 'recvmsg',
48: 'shutdown',
49: 'bind',
50: 'listen',
51: 'getsockname',
52: 'getpeername',
53: 'socketpair',
54: 'setsockopt',
55: 'getsockopt',
56: 'clone',
57: 'fork',
58: 'vfork',
59: 'execve',
60: 'exit',
61: 'wait4',
62: 'kill',
63: 'uname',
64: 'semget',
65: 'semop',
66: 'semctl',
67: 'shmdt',
68: 'msgget',
69: 'msgsnd',
70: 'msgrcv',
71: 'msgctl',
72: 'fcntl',
73: 'flock',
74: 'fsync',
75: 'fdatasync',
76: 'truncate',
77: 'ftruncate',
78: 'getdents',
79: 'getcwd',
80: 'chdir',
81: 'fchdir',
82: 'rename',
83: 'mkdir',
84: 'rmdir',
85: 'creat',
86: 'link',
87: 'unlink',
88: 'symlink',
89: 'readlink',
90: 'chmod',
91: 'fchmod',
92: 'chown',
93: 'fchown',
94: 'lchown',
95: 'umask',
96: 'gettimeofday',
97: 'getrlimit',
98: 'getrusage',
99: 'sysinfo',
100: 'times',
101: 'ptrace',
102: 'getuid',
103: 'syslog',
104: 'getgid',
105: 'setuid',
106: 'setgid',
107: 'geteuid',
108: 'getegid',
109: 'setpgid',
110: 'getppid',
111: 'getpgrp',
112: 'setsid',
113: 'setreuid',
114: 'setregid',
115: 'getgroups',
116: 'setgroups',
117: 'setresuid',
118: 'getresuid',
119: 'setresgid',
120: 'getresgid',
121: 'getpgid',
122: 'setfsuid',
123: 'setfsgid',
124: 'getsid',
125: 'capget',
126: 'capset',
127: 'rt_sigpending',
128: 'rt_sigtimedwait',
129: 'rt_sigqueueinfo',
130: 'rt_sigsuspend',
131: 'sigaltstack',
132: 'utime',
133: 'mknod',
134: 'uselib',
135: 'personality',
136: 'ustat',
137: 'statfs',
138: 'fstatfs',
139: 'sysfs',
140: 'getpriority',
141: 'setpriority',
142: 'sched_setparam',
143: 'sched_getparam',
144: 'sched_setscheduler',
145: 'sched_getscheduler',
146: 'sched_get_priority_max',
147: 'sched_get_priority_min',
148: 'sched_rr_get_interval',
149: 'mlock',
150: 'munlock',
151: 'mlockall',
152: 'munlockall',
153: 'vhangup',
154: 'modify_ldt',
155: 'pivot_root',
156: '_sysctl',
157: 'prctl',
158: 'arch_prctl',
159: 'adjtimex',
160: 'setrlimit',
161: 'chroot',
162: 'sync',
163: 'acct',
164: 'settimeofday',
165: 'mount',
166: 'umount2',
167: 'swapon',
168: 'swapoff',
169: 'reboot',
170: 'sethostname',
171: 'setdomainname',
172: 'iopl',
173: 'ioperm',
174: 'create_module',
175: 'init_module',
176: 'delete_module',
177: 'get_kernel_syms',
178: 'query_module',
179: 'quotactl',
180: 'nfsservctl',
181: 'getpmsg',
182: 'putpmsg',
183: 'afs_syscall',
184: 'tuxcall',
185: 'security',
186: 'gettid',
187: 'readahead',
188: 'setxattr',
189: 'lsetxattr',
190: 'fsetxattr',
191: 'getxattr',
192: 'lgetxattr',
193: 'fgetxattr',
194: 'listxattr',
195: 'llistxattr',
196: 'flistxattr',
197: 'removexattr',
198: 'lremovexattr',
199: 'fremovexattr',
200: 'tkill',
201: 'time',
202: 'futex',
203: 'sched_setaffinity',
204: 'sched_getaffinity',
205: 'set_thread_area',
206: 'io_setup',
207: 'io_destroy',
208: 'io_getevents',
209: 'io_submit',
210: 'io_cancel',
211: 'get_thread_area',
212: 'lookup_dcookie',
213: 'epoll_create',
214: 'epoll_ctl_old',
215: 'epoll_wait_old',
216: 'remap_file_pages',
217: 'getdents64',
218: 'set_tid_address',
219: 'restart_syscall',
220: 'semtimedop',
221: 'fadvise64',
222: 'timer_create',
223: 'timer_settime',
224: 'timer_gettime',
225: 'timer_getoverrun',
226: 'timer_delete',
227: 'clock_settime',
228: 'clock_gettime',
229: 'clock_getres',
230: 'clock_nanosleep',
231: 'exit_group',
232: 'epoll_wait',
233: 'epoll_ctl',
234: 'tgkill',
235: 'utimes',
236: 'vserver',
237: 'mbind',
238: 'set_mempolicy',
239: 'get_mempolicy',
240: 'mq_open',
241: 'mq_unlink',
242: 'mq_timedsend',
243: 'mq_timedreceive',
244: 'mq_notify',
245: 'mq_getsetattr',
246: 'kexec_load',
247: 'waitid',
248: 'add_key',
249: 'request_key',
250: 'keyctl',
251: 'ioprio_set',
252: 'ioprio_get',
253: 'inotify_init',
254: 'inotify_add_watch',
255: 'inotify_rm_watch',
256: 'migrate_pages',
257: 'openat',
258: 'mkdirat',
259: 'mknodat',
260: 'fchownat',
261: 'futimesat',
262: 'newfstatat',
263: 'unlinkat',
264: 'renameat',
265: 'linkat',
266: 'symlinkat',
267: 'readlinkat',
268: 'fchmodat',
269: 'faccessat',
270: 'pselect6',
271: 'ppoll',
272: 'unshare',
273: 'set_robust_list',
274: 'get_robust_list',
275: 'splice',
276: 'tee',
277: 'sync_file_range',
278: 'vmsplice',
279: 'move_pages',
280: 'utimensat',
281: 'epoll_pwait',
282: 'signalfd',
283: 'timerfd_create',
284: 'eventfd',
285: 'fallocate',
286: 'timerfd_settime',
287: 'timerfd_gettime',
288: 'accept4',
289: 'signalfd4',
290: 'eventfd2',
291: 'epoll_create1',
292: 'dup3',
293: 'pipe2',
294: 'inotify_init1',
295: 'preadv',
296: 'pwritev',
297: 'rt_tgsigqueueinfo',
298: 'perf_event_open',
299: 'recvmmsg',
300: 'fanotify_init',
301: 'fanotify_mark',
302: 'prlimit64',
303: 'name_to_handle_at',
304: 'open_by_handle_at',
305: 'clock_adjtime',
306: 'syncfs',
307: 'sendmmsg',
308: 'setns',
309: 'getcpu',
310: 'process_vm_readv',
311: 'process_vm_writev',
312: 'kcmp',
313: 'finit_module',
314: 'sched_setattr',
315: 'sched_getattr',
316: 'renameat2',
317: 'seccomp',
318: 'getrandom',
319: 'memfd_create',
320: 'kexec_file_load',
321: 'bpf',
322: 'execveat',
323: 'userfaultfd',
324: 'membarrier',
325: 'mlock2',
326: 'copy_file_range',
327: 'preadv2',
328: 'pwritev2',
329: 'pkey_mprotect',
330: 'pkey_alloc',
331: 'pkey_free',
332: 'statx',
}
| 24.733603 | 61 | 0.283909 |
import ctypes
from .syscalldef import CType, SysCallSig, SysCallParamSig
PTRACE_TRACEME = 0
PTRACE_PEEKTEXT = 1
PTRACE_PEEKDATA = 2
PTRACE_PEEKUSER = 3
PTRACE_POKETEXT = 4
PTRACE_POKEDATA = 5
PTRACE_POKEUSER = 6
PTRACE_CONT = 7
PTRACE_KILL = 8
PTRACE_SINGLESTEP = 9
PTRACE_GETREGS = 12
PTRACE_SETREGS = 13
PTRACE_GETFPREGS = 14
PTRACE_SETFPREGS = 15
PTRACE_ATTACH = 16
PTRACE_DETACH = 17
PTRACE_GETFPXREGS = 18
PTRACE_SETFPXREGS = 19
PTRACE_SYSCALL = 24
PTRACE_SETOPTIONS = 0x4200
PTRACE_GETEVENTMSG = 0x4201
PTRACE_GETSIGINFO = 0x4202
PTRACE_SETSIGINFO = 0x4203
PTRACE_GETREGSET = 0x4204
PTRACE_SETREGSET = 0x4205
PTRACE_SEIZE = 0x4206
PTRACE_INTERRUPT = 0x4207
PTRACE_LISTEN = 0x4208
PTRACE_PEEKSIGINFO = 0x4209
PTRACE_GETSIGMASK = 0x420a
PTRACE_SETSIGMASK = 0x420b
PTRACE_SECCOMP_GET_FILTER = 0x420c
PTRACE_SEIZE_DEVEL = 0x80000000
PTRACE_O_TRACESYSGOOD = 0x00000001
PTRACE_O_TRACEFORK = 0x00000002
PTRACE_O_TRACEVFORK = 0x00000004
PTRACE_O_TRACECLONE = 0x00000008
PTRACE_O_TRACEEXEC = 0x00000010
PTRACE_O_TRACEVFORKDONE = 0x00000020
PTRACE_O_TRACEEXIT = 0x00000040
PTRACE_O_TRACESECCOMP = 0x00000080
PTRACE_O_EXITKILL = 0x00100000
PTRACE_O_SUSPEND_SECCOMP = 0x00200000
PTRACE_O_MASK = 0x003000ff
PTRACE_EVENT_FORK = 1
PTRACE_EVENT_VFORK = 2
PTRACE_EVENT_CLONE = 3
PTRACE_EVENT_EXEC = 4
PTRACE_EVENT_VFORK_DONE = 5
PTRACE_EVENT_EXIT = 6
PTRACE_EVENT_SECCOMP = 7
PTRACE_PEEKSIGINFO_SHARED = 1 << 0
class __ptrace_peeksiginfo_args(ctypes.Structure):
_fields_ = (
('off', ctypes.c_ulong),
('flags', ctypes.c_uint),
('nr', ctypes.c_int),
)
class user_fpregs_struct(ctypes.Structure):
_fields_ = (
('cwd', ctypes.c_ushort),
('swd', ctypes.c_ushort),
('ftw', ctypes.c_ushort),
('fop', ctypes.c_ushort),
('rip', ctypes.c_ulonglong),
('rdp', ctypes.c_ulonglong),
('mxcsr', ctypes.c_uint),
('mxcr_mask', ctypes.c_uint),
('st_space', ctypes.c_uint * 32),
('xmm_space', ctypes.c_uint * 64),
('padding', ctypes.c_uint * 24),
)
class user_regs_struct(ctypes.Structure):
_fields_ = (
('r15', ctypes.c_ulonglong),
('r14', ctypes.c_ulonglong),
('r13', ctypes.c_ulonglong),
('r12', ctypes.c_ulonglong),
('rbp', ctypes.c_ulonglong),
('rbx', ctypes.c_ulonglong),
('r11', ctypes.c_ulonglong),
('r10', ctypes.c_ulonglong),
('r9', ctypes.c_ulonglong),
('r8', ctypes.c_ulonglong),
('rax', ctypes.c_ulonglong),
('rcx', ctypes.c_ulonglong),
('rdx', ctypes.c_ulonglong),
('rsi', ctypes.c_ulonglong),
('rdi', ctypes.c_ulonglong),
('orig_rax', ctypes.c_ulonglong),
('rip', ctypes.c_ulonglong),
('cs', ctypes.c_ulonglong),
('eflags', ctypes.c_ulonglong),
('rsp', ctypes.c_ulonglong),
('ss', ctypes.c_ulonglong),
('fs_base', ctypes.c_ulonglong),
('gs_base', ctypes.c_ulonglong),
('ds', ctypes.c_ulonglong),
('es', ctypes.c_ulonglong),
('fs', ctypes.c_ulonglong),
('gs', ctypes.c_ulonglong),
)
class _anon_2(ctypes.Structure):
_fields_ = (
('si_pid', ctypes.c_int),
('si_uid', ctypes.c_uint),
)
class _anon_3(ctypes.Structure):
_fields_ = (
('si_tid', ctypes.c_int),
('si_overrun', ctypes.c_int),
('si_sigval', ctypes.c_void_p),
)
class _anon_4(ctypes.Structure):
_fields_ = (
('si_pid', ctypes.c_int),
('si_uid', ctypes.c_uint),
('si_sigval', ctypes.c_void_p),
)
class _anon_5(ctypes.Structure):
_fields_ = (
('si_pid', ctypes.c_int),
('si_uid', ctypes.c_uint),
('si_status', ctypes.c_int),
('si_utime', ctypes.c_long),
('si_stime', ctypes.c_long),
)
class _anon_7(ctypes.Structure):
_fields_ = (
('_lower', ctypes.c_void_p),
('_upper', ctypes.c_void_p),
)
class _anon_6(ctypes.Structure):
_fields_ = (
('si_addr', ctypes.c_void_p),
('si_addr_lsb', ctypes.c_short),
('si_addr_bnd', _anon_7),
)
class _anon_8(ctypes.Structure):
_fields_ = (
('si_band', ctypes.c_long),
('si_fd', ctypes.c_int),
)
class _anon_9(ctypes.Structure):
_fields_ = (
('_call_addr', ctypes.c_void_p),
('_syscall', ctypes.c_int),
('_arch', ctypes.c_uint),
)
class _anon_1(ctypes.Union):
_fields_ = (
('_pad', ctypes.c_int * 28),
('_kill', _anon_2),
('_timer', _anon_3),
('_rt', _anon_4),
('_sigchld', _anon_5),
('_sigfault', _anon_6),
('_sigpoll', _anon_8),
('_sigsys', _anon_9),
)
class siginfo_t(ctypes.Structure):
_fields_ = (
('si_signo', ctypes.c_int),
('si_errno', ctypes.c_int),
('si_code', ctypes.c_int),
('_sifields', _anon_1),
)
SYSCALLS = {
'time': SysCallSig(
'time',
params=[
SysCallParamSig(
'tloc',
CType(
['time_t', '*'],
ctypes.c_long,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'stime': SysCallSig(
'stime',
params=[
SysCallParamSig(
'tptr',
CType(
['time_t', '*'],
ctypes.c_long,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'gettimeofday': SysCallSig(
'gettimeofday',
params=[
SysCallParamSig(
'tv',
CType(
['struct', 'timeval', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'tz',
CType(
['struct', 'timezone', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'settimeofday': SysCallSig(
'settimeofday',
params=[
SysCallParamSig(
'tv',
CType(
['struct', 'timeval', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'tz',
CType(
['struct', 'timezone', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'adjtimex': SysCallSig(
'adjtimex',
params=[
SysCallParamSig(
'txc_p',
CType(
['struct', 'timex', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'times': SysCallSig(
'times',
params=[
SysCallParamSig(
'tbuf',
CType(
['struct', 'tms', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'gettid': SysCallSig(
'gettid',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'nanosleep': SysCallSig(
'nanosleep',
params=[
SysCallParamSig(
'rqtp',
CType(
['struct', 'timespec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'rmtp',
CType(
['struct', 'timespec', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'alarm': SysCallSig(
'alarm',
params=[
SysCallParamSig(
'seconds',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getpid': SysCallSig(
'getpid',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getppid': SysCallSig(
'getppid',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getuid': SysCallSig(
'getuid',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'geteuid': SysCallSig(
'geteuid',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getgid': SysCallSig(
'getgid',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getegid': SysCallSig(
'getegid',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getresuid': SysCallSig(
'getresuid',
params=[
SysCallParamSig(
'ruid',
CType(
['uid_t', '*'],
ctypes.c_uint,
1
)
),
SysCallParamSig(
'euid',
CType(
['uid_t', '*'],
ctypes.c_uint,
1
)
),
SysCallParamSig(
'suid',
CType(
['uid_t', '*'],
ctypes.c_uint,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getresgid': SysCallSig(
'getresgid',
params=[
SysCallParamSig(
'rgid',
CType(
['gid_t', '*'],
ctypes.c_uint,
1
)
),
SysCallParamSig(
'egid',
CType(
['gid_t', '*'],
ctypes.c_uint,
1
)
),
SysCallParamSig(
'sgid',
CType(
['gid_t', '*'],
ctypes.c_uint,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getpgid': SysCallSig(
'getpgid',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getpgrp': SysCallSig(
'getpgrp',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getsid': SysCallSig(
'getsid',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getgroups': SysCallSig(
'getgroups',
params=[
SysCallParamSig(
'gidsetsize',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'grouplist',
CType(
['gid_t', '*'],
ctypes.c_uint,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'setregid': SysCallSig(
'setregid',
params=[
SysCallParamSig(
'rgid',
CType(
['gid_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'egid',
CType(
['gid_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'setgid': SysCallSig(
'setgid',
params=[
SysCallParamSig(
'gid',
CType(
['gid_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'setreuid': SysCallSig(
'setreuid',
params=[
SysCallParamSig(
'ruid',
CType(
['uid_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'euid',
CType(
['uid_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'setuid': SysCallSig(
'setuid',
params=[
SysCallParamSig(
'uid',
CType(
['uid_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'setresuid': SysCallSig(
'setresuid',
params=[
SysCallParamSig(
'ruid',
CType(
['uid_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'euid',
CType(
['uid_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'suid',
CType(
['uid_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'setresgid': SysCallSig(
'setresgid',
params=[
SysCallParamSig(
'rgid',
CType(
['gid_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'egid',
CType(
['gid_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'sgid',
CType(
['gid_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'setfsuid': SysCallSig(
'setfsuid',
params=[
SysCallParamSig(
'uid',
CType(
['uid_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'setfsgid': SysCallSig(
'setfsgid',
params=[
SysCallParamSig(
'gid',
CType(
['gid_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'setpgid': SysCallSig(
'setpgid',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'pgid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'setsid': SysCallSig(
'setsid',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'setgroups': SysCallSig(
'setgroups',
params=[
SysCallParamSig(
'gidsetsize',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'grouplist',
CType(
['gid_t', '*'],
ctypes.c_uint,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'acct': SysCallSig(
'acct',
params=[
SysCallParamSig(
'name',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'capget': SysCallSig(
'capget',
params=[
SysCallParamSig(
'header',
CType(
['cap_user_header_t'],
ctypes.c_long,
1
)
),
SysCallParamSig(
'dataptr',
CType(
['cap_user_data_t'],
ctypes.c_long,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'capset': SysCallSig(
'capset',
params=[
SysCallParamSig(
'header',
CType(
['cap_user_header_t'],
ctypes.c_long,
1
)
),
SysCallParamSig(
'data',
CType(
['const', 'cap_user_data_t'],
ctypes.c_long,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'personality': SysCallSig(
'personality',
params=[
SysCallParamSig(
'personality',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sigpending': SysCallSig(
'sigpending',
params=[
SysCallParamSig(
'set',
CType(
['old_sigset_t', '*'],
ctypes.c_ulong,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sigprocmask': SysCallSig(
'sigprocmask',
params=[
SysCallParamSig(
'how',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'set',
CType(
['old_sigset_t', '*'],
ctypes.c_ulong,
1
)
),
SysCallParamSig(
'oset',
CType(
['old_sigset_t', '*'],
ctypes.c_ulong,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sigaltstack': SysCallSig(
'sigaltstack',
params=[
SysCallParamSig(
'uss',
CType(
['const', 'struct', 'sigaltstack', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'uoss',
CType(
['struct', 'sigaltstack', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getitimer': SysCallSig(
'getitimer',
params=[
SysCallParamSig(
'which',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'value',
CType(
['struct', 'itimerval', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'setitimer': SysCallSig(
'setitimer',
params=[
SysCallParamSig(
'which',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'value',
CType(
['struct', 'itimerval', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'ovalue',
CType(
['struct', 'itimerval', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'timer_create': SysCallSig(
'timer_create',
params=[
SysCallParamSig(
'which_clock',
CType(
['clockid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'timer_event_spec',
CType(
['struct', 'sigevent', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'created_timer_id',
CType(
['timer_t', '*'],
ctypes.c_int,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'timer_gettime': SysCallSig(
'timer_gettime',
params=[
SysCallParamSig(
'timer_id',
CType(
['timer_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'setting',
CType(
['struct', 'itimerspec', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'timer_getoverrun': SysCallSig(
'timer_getoverrun',
params=[
SysCallParamSig(
'timer_id',
CType(
['timer_t'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'timer_settime': SysCallSig(
'timer_settime',
params=[
SysCallParamSig(
'timer_id',
CType(
['timer_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'new_setting',
CType(
['const', 'struct', 'itimerspec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'old_setting',
CType(
['struct', 'itimerspec', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'timer_delete': SysCallSig(
'timer_delete',
params=[
SysCallParamSig(
'timer_id',
CType(
['timer_t'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'clock_settime': SysCallSig(
'clock_settime',
params=[
SysCallParamSig(
'which_clock',
CType(
['clockid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'tp',
CType(
['const', 'struct', 'timespec', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'clock_gettime': SysCallSig(
'clock_gettime',
params=[
SysCallParamSig(
'which_clock',
CType(
['clockid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'tp',
CType(
['struct', 'timespec', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'clock_adjtime': SysCallSig(
'clock_adjtime',
params=[
SysCallParamSig(
'which_clock',
CType(
['clockid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'tx',
CType(
['struct', 'timex', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'clock_getres': SysCallSig(
'clock_getres',
params=[
SysCallParamSig(
'which_clock',
CType(
['clockid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'tp',
CType(
['struct', 'timespec', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'clock_nanosleep': SysCallSig(
'clock_nanosleep',
params=[
SysCallParamSig(
'which_clock',
CType(
['clockid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'rqtp',
CType(
['const', 'struct', 'timespec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'rmtp',
CType(
['struct', 'timespec', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'nice': SysCallSig(
'nice',
params=[
SysCallParamSig(
'increment',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sched_setscheduler': SysCallSig(
'sched_setscheduler',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'policy',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'param',
CType(
['struct', 'sched_param', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sched_setparam': SysCallSig(
'sched_setparam',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'param',
CType(
['struct', 'sched_param', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sched_setattr': SysCallSig(
'sched_setattr',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'attr',
CType(
['struct', 'sched_attr', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sched_getscheduler': SysCallSig(
'sched_getscheduler',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sched_getparam': SysCallSig(
'sched_getparam',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'param',
CType(
['struct', 'sched_param', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sched_getattr': SysCallSig(
'sched_getattr',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'attr',
CType(
['struct', 'sched_attr', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'size',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sched_setaffinity': SysCallSig(
'sched_setaffinity',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'len',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'user_mask_ptr',
CType(
['unsigned', 'long', '*'],
ctypes.c_ulong,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sched_getaffinity': SysCallSig(
'sched_getaffinity',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'len',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'user_mask_ptr',
CType(
['unsigned', 'long', '*'],
ctypes.c_ulong,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sched_yield': SysCallSig(
'sched_yield',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sched_get_priority_max': SysCallSig(
'sched_get_priority_max',
params=[
SysCallParamSig(
'policy',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sched_get_priority_min': SysCallSig(
'sched_get_priority_min',
params=[
SysCallParamSig(
'policy',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sched_rr_get_interval': SysCallSig(
'sched_rr_get_interval',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'interval',
CType(
['struct', 'timespec', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'setpriority': SysCallSig(
'setpriority',
params=[
SysCallParamSig(
'which',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'who',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'niceval',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getpriority': SysCallSig(
'getpriority',
params=[
SysCallParamSig(
'which',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'who',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'shutdown': SysCallSig(
'shutdown',
params=[
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'reboot': SysCallSig(
'reboot',
params=[
SysCallParamSig(
'magic1',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'magic2',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'cmd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'arg',
CType(
['void', '*'],
ctypes.c_long,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'restart_syscall': SysCallSig(
'restart_syscall',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'kexec_load': SysCallSig(
'kexec_load',
params=[
SysCallParamSig(
'entry',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'nr_segments',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'segments',
CType(
['struct', 'kexec_segment', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'kexec_file_load': SysCallSig(
'kexec_file_load',
params=[
SysCallParamSig(
'kernel_fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'initrd_fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'cmdline_len',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'cmdline_ptr',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'exit': SysCallSig(
'exit',
params=[
SysCallParamSig(
'error_code',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'exit_group': SysCallSig(
'exit_group',
params=[
SysCallParamSig(
'error_code',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'wait4': SysCallSig(
'wait4',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'stat_addr',
CType(
['int', '*'],
ctypes.c_int,
1
)
),
SysCallParamSig(
'options',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'ru',
CType(
['struct', 'rusage', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'waitid': SysCallSig(
'waitid',
params=[
SysCallParamSig(
'which',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'infop',
CType(
['struct', 'siginfo', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'options',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'ru',
CType(
['struct', 'rusage', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'waitpid': SysCallSig(
'waitpid',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'stat_addr',
CType(
['int', '*'],
ctypes.c_int,
1
)
),
SysCallParamSig(
'options',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'set_tid_address': SysCallSig(
'set_tid_address',
params=[
SysCallParamSig(
'tidptr',
CType(
['int', '*'],
ctypes.c_int,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'futex': SysCallSig(
'futex',
params=[
SysCallParamSig(
'uaddr',
CType(
['u32', '*'],
ctypes.c_uint,
1
)
),
SysCallParamSig(
'op',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'val',
CType(
['u32'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'utime',
CType(
['struct', 'timespec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'uaddr2',
CType(
['u32', '*'],
ctypes.c_uint,
1
)
),
SysCallParamSig(
'val3',
CType(
['u32'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'init_module': SysCallSig(
'init_module',
params=[
SysCallParamSig(
'umod',
CType(
['void', '*'],
ctypes.c_long,
1
)
),
SysCallParamSig(
'len',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'uargs',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'delete_module': SysCallSig(
'delete_module',
params=[
SysCallParamSig(
'name_user',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'rt_sigsuspend': SysCallSig(
'rt_sigsuspend',
params=[
SysCallParamSig(
'unewset',
CType(
['sigset_t', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'sigsetsize',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'rt_sigaction': SysCallSig(
'rt_sigaction',
params=[
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['const', 'struct', 'sigaction', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
None,
CType(
['struct', 'sigaction', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
None,
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'rt_sigprocmask': SysCallSig(
'rt_sigprocmask',
params=[
SysCallParamSig(
'how',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'set',
CType(
['sigset_t', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'oset',
CType(
['sigset_t', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'sigsetsize',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'rt_sigpending': SysCallSig(
'rt_sigpending',
params=[
SysCallParamSig(
'set',
CType(
['sigset_t', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'sigsetsize',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'rt_sigtimedwait': SysCallSig(
'rt_sigtimedwait',
params=[
SysCallParamSig(
'uthese',
CType(
['const', 'sigset_t', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'uinfo',
CType(
['siginfo_t', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'uts',
CType(
['const', 'struct', 'timespec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'sigsetsize',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'rt_tgsigqueueinfo': SysCallSig(
'rt_tgsigqueueinfo',
params=[
SysCallParamSig(
'tgid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'sig',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'uinfo',
CType(
['siginfo_t', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'kill': SysCallSig(
'kill',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'sig',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'tgkill': SysCallSig(
'tgkill',
params=[
SysCallParamSig(
'tgid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'sig',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'tkill': SysCallSig(
'tkill',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'sig',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'rt_sigqueueinfo': SysCallSig(
'rt_sigqueueinfo',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'sig',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'uinfo',
CType(
['siginfo_t', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sgetmask': SysCallSig(
'sgetmask',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'ssetmask': SysCallSig(
'ssetmask',
params=[
SysCallParamSig(
'newmask',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'signal': SysCallSig(
'signal',
params=[
SysCallParamSig(
'sig',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'handler',
CType(
['__sighandler_t'],
ctypes.c_long,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'pause': SysCallSig(
'pause',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sync': SysCallSig(
'sync',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fsync': SysCallSig(
'fsync',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fdatasync': SysCallSig(
'fdatasync',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'bdflush': SysCallSig(
'bdflush',
params=[
SysCallParamSig(
'func',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'data',
CType(
['long'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mount': SysCallSig(
'mount',
params=[
SysCallParamSig(
'dev_name',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'dir_name',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'type',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'data',
CType(
['void', '*'],
ctypes.c_long,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'umount': SysCallSig(
'umount',
params=[
SysCallParamSig(
'name',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'oldumount': SysCallSig(
'oldumount',
params=[
SysCallParamSig(
'name',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'truncate': SysCallSig(
'truncate',
params=[
SysCallParamSig(
'path',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'length',
CType(
['long'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'ftruncate': SysCallSig(
'ftruncate',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'length',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'stat': SysCallSig(
'stat',
params=[
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'statbuf',
CType(
['struct', '__old_kernel_stat', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'statfs': SysCallSig(
'statfs',
params=[
SysCallParamSig(
'path',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'buf',
CType(
['struct', 'statfs', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'statfs64': SysCallSig(
'statfs64',
params=[
SysCallParamSig(
'path',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'sz',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'buf',
CType(
['struct', 'statfs64', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fstatfs': SysCallSig(
'fstatfs',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'buf',
CType(
['struct', 'statfs', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fstatfs64': SysCallSig(
'fstatfs64',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'sz',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'buf',
CType(
['struct', 'statfs64', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'lstat': SysCallSig(
'lstat',
params=[
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'statbuf',
CType(
['struct', '__old_kernel_stat', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fstat': SysCallSig(
'fstat',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'statbuf',
CType(
['struct', '__old_kernel_stat', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'newstat': SysCallSig(
'newstat',
params=[
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'statbuf',
CType(
['struct', 'stat', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'newlstat': SysCallSig(
'newlstat',
params=[
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'statbuf',
CType(
['struct', 'stat', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'newfstat': SysCallSig(
'newfstat',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'statbuf',
CType(
['struct', 'stat', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'ustat': SysCallSig(
'ustat',
params=[
SysCallParamSig(
'dev',
CType(
['unsigned'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'ubuf',
CType(
['struct', 'ustat', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'setxattr': SysCallSig(
'setxattr',
params=[
SysCallParamSig(
'path',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'name',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'value',
CType(
['const', 'void', '*'],
ctypes.c_long,
1
)
),
SysCallParamSig(
'size',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'lsetxattr': SysCallSig(
'lsetxattr',
params=[
SysCallParamSig(
'path',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'name',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'value',
CType(
['const', 'void', '*'],
ctypes.c_long,
1
)
),
SysCallParamSig(
'size',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fsetxattr': SysCallSig(
'fsetxattr',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'name',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'value',
CType(
['const', 'void', '*'],
ctypes.c_long,
1
)
),
SysCallParamSig(
'size',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getxattr': SysCallSig(
'getxattr',
params=[
SysCallParamSig(
'path',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'name',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'value',
CType(
['void', '*'],
ctypes.c_long,
1
)
),
SysCallParamSig(
'size',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'lgetxattr': SysCallSig(
'lgetxattr',
params=[
SysCallParamSig(
'path',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'name',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'value',
CType(
['void', '*'],
ctypes.c_long,
1
)
),
SysCallParamSig(
'size',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fgetxattr': SysCallSig(
'fgetxattr',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'name',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'value',
CType(
['void', '*'],
ctypes.c_long,
1
)
),
SysCallParamSig(
'size',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'listxattr': SysCallSig(
'listxattr',
params=[
SysCallParamSig(
'path',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'list',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'size',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'llistxattr': SysCallSig(
'llistxattr',
params=[
SysCallParamSig(
'path',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'list',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'size',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'flistxattr': SysCallSig(
'flistxattr',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'list',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'size',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'removexattr': SysCallSig(
'removexattr',
params=[
SysCallParamSig(
'path',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'name',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'lremovexattr': SysCallSig(
'lremovexattr',
params=[
SysCallParamSig(
'path',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'name',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fremovexattr': SysCallSig(
'fremovexattr',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'name',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'brk': SysCallSig(
'brk',
params=[
SysCallParamSig(
'brk',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mprotect': SysCallSig(
'mprotect',
params=[
SysCallParamSig(
'start',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'len',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'prot',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mremap': SysCallSig(
'mremap',
params=[
SysCallParamSig(
'addr',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'old_len',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'new_len',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'new_addr',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'remap_file_pages': SysCallSig(
'remap_file_pages',
params=[
SysCallParamSig(
'start',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'size',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'prot',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'pgoff',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'msync': SysCallSig(
'msync',
params=[
SysCallParamSig(
'start',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'len',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fadvise64': SysCallSig(
'fadvise64',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'offset',
CType(
['loff_t'],
ctypes.c_longlong,
0
)
),
SysCallParamSig(
'len',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'advice',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fadvise64_64': SysCallSig(
'fadvise64_64',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'offset',
CType(
['loff_t'],
ctypes.c_longlong,
0
)
),
SysCallParamSig(
'len',
CType(
['loff_t'],
ctypes.c_longlong,
0
)
),
SysCallParamSig(
'advice',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'munmap': SysCallSig(
'munmap',
params=[
SysCallParamSig(
'addr',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'len',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mlock': SysCallSig(
'mlock',
params=[
SysCallParamSig(
'start',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'len',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'munlock': SysCallSig(
'munlock',
params=[
SysCallParamSig(
'start',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'len',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mlockall': SysCallSig(
'mlockall',
params=[
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'munlockall': SysCallSig(
'munlockall',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'madvise': SysCallSig(
'madvise',
params=[
SysCallParamSig(
'start',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'len',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'behavior',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mincore': SysCallSig(
'mincore',
params=[
SysCallParamSig(
'start',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'len',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'vec',
CType(
['unsigned', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'pivot_root': SysCallSig(
'pivot_root',
params=[
SysCallParamSig(
'new_root',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'put_old',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'chroot': SysCallSig(
'chroot',
params=[
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mknod': SysCallSig(
'mknod',
params=[
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'mode',
CType(
['umode_t'],
ctypes.c_ushort,
0
)
),
SysCallParamSig(
'dev',
CType(
['unsigned'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'link': SysCallSig(
'link',
params=[
SysCallParamSig(
'oldname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'newname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'symlink': SysCallSig(
'symlink',
params=[
SysCallParamSig(
'old',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'new',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'unlink': SysCallSig(
'unlink',
params=[
SysCallParamSig(
'pathname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'rename': SysCallSig(
'rename',
params=[
SysCallParamSig(
'oldname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'newname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'chmod': SysCallSig(
'chmod',
params=[
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'mode',
CType(
['umode_t'],
ctypes.c_ushort,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fchmod': SysCallSig(
'fchmod',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'mode',
CType(
['umode_t'],
ctypes.c_ushort,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fcntl': SysCallSig(
'fcntl',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'cmd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'arg',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'pipe': SysCallSig(
'pipe',
params=[
SysCallParamSig(
'fildes',
CType(
['int', '*'],
ctypes.c_int,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'pipe2': SysCallSig(
'pipe2',
params=[
SysCallParamSig(
'fildes',
CType(
['int', '*'],
ctypes.c_int,
1
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'dup': SysCallSig(
'dup',
params=[
SysCallParamSig(
'fildes',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'dup2': SysCallSig(
'dup2',
params=[
SysCallParamSig(
'oldfd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'newfd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'dup3': SysCallSig(
'dup3',
params=[
SysCallParamSig(
'oldfd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'newfd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'ioperm': SysCallSig(
'ioperm',
params=[
SysCallParamSig(
'from',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'num',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'on',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'ioctl': SysCallSig(
'ioctl',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'cmd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'arg',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'flock': SysCallSig(
'flock',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'cmd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'io_setup': SysCallSig(
'io_setup',
params=[
SysCallParamSig(
'nr_reqs',
CType(
['unsigned'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'ctx',
CType(
['aio_context_t', '*'],
ctypes.c_ulong,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'io_destroy': SysCallSig(
'io_destroy',
params=[
SysCallParamSig(
'ctx',
CType(
['aio_context_t'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'io_getevents': SysCallSig(
'io_getevents',
params=[
SysCallParamSig(
'ctx_id',
CType(
['aio_context_t'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'min_nr',
CType(
['long'],
ctypes.c_long,
0
)
),
SysCallParamSig(
'nr',
CType(
['long'],
ctypes.c_long,
0
)
),
SysCallParamSig(
'events',
CType(
['struct', 'io_event', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'timeout',
CType(
['struct', 'timespec', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'io_submit': SysCallSig(
'io_submit',
params=[
SysCallParamSig(
None,
CType(
['aio_context_t'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
None,
CType(
['long'],
ctypes.c_long,
0
)
),
SysCallParamSig(
'__foo',
CType(
['struct', 'iocb', '*', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'io_cancel': SysCallSig(
'io_cancel',
params=[
SysCallParamSig(
'ctx_id',
CType(
['aio_context_t'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'iocb',
CType(
['struct', 'iocb', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'result',
CType(
['struct', 'io_event', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sendfile': SysCallSig(
'sendfile',
params=[
SysCallParamSig(
'out_fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'in_fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'offset',
CType(
['off_t', '*'],
ctypes.c_longlong,
1
)
),
SysCallParamSig(
'count',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sendfile64': SysCallSig(
'sendfile64',
params=[
SysCallParamSig(
'out_fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'in_fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'offset',
CType(
['loff_t', '*'],
ctypes.c_longlong,
1
)
),
SysCallParamSig(
'count',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'readlink': SysCallSig(
'readlink',
params=[
SysCallParamSig(
'path',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'buf',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'bufsiz',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'creat': SysCallSig(
'creat',
params=[
SysCallParamSig(
'pathname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'mode',
CType(
['umode_t'],
ctypes.c_ushort,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'open': SysCallSig(
'open',
params=[
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'mode',
CType(
['umode_t'],
ctypes.c_ushort,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'close': SysCallSig(
'close',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'access': SysCallSig(
'access',
params=[
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'mode',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'vhangup': SysCallSig(
'vhangup',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'chown': SysCallSig(
'chown',
params=[
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'user',
CType(
['uid_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'group',
CType(
['gid_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'lchown': SysCallSig(
'lchown',
params=[
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'user',
CType(
['uid_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'group',
CType(
['gid_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fchown': SysCallSig(
'fchown',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'user',
CType(
['uid_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'group',
CType(
['gid_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'utime': SysCallSig(
'utime',
params=[
SysCallParamSig(
'filename',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'times',
CType(
['struct', 'utimbuf', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'utimes': SysCallSig(
'utimes',
params=[
SysCallParamSig(
'filename',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'utimes',
CType(
['struct', 'timeval', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'lseek': SysCallSig(
'lseek',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'offset',
CType(
['off_t'],
ctypes.c_longlong,
0
)
),
SysCallParamSig(
'whence',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'llseek': SysCallSig(
'llseek',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'offset_high',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'offset_low',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'result',
CType(
['loff_t', '*'],
ctypes.c_longlong,
1
)
),
SysCallParamSig(
'whence',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'read': SysCallSig(
'read',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'buf',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'count',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'readahead': SysCallSig(
'readahead',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'offset',
CType(
['loff_t'],
ctypes.c_longlong,
0
)
),
SysCallParamSig(
'count',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'readv': SysCallSig(
'readv',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'vec',
CType(
['const', 'struct', 'iovec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'vlen',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'write': SysCallSig(
'write',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'buf',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'count',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'writev': SysCallSig(
'writev',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'vec',
CType(
['const', 'struct', 'iovec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'vlen',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'pread64': SysCallSig(
'pread64',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'buf',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'count',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'pos',
CType(
['loff_t'],
ctypes.c_longlong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'pwrite64': SysCallSig(
'pwrite64',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'buf',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'count',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'pos',
CType(
['loff_t'],
ctypes.c_longlong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'preadv': SysCallSig(
'preadv',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'vec',
CType(
['const', 'struct', 'iovec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'vlen',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'pos_l',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'pos_h',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'preadv2': SysCallSig(
'preadv2',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'vec',
CType(
['const', 'struct', 'iovec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'vlen',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'pos_l',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'pos_h',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'pwritev': SysCallSig(
'pwritev',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'vec',
CType(
['const', 'struct', 'iovec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'vlen',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'pos_l',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'pos_h',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'pwritev2': SysCallSig(
'pwritev2',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'vec',
CType(
['const', 'struct', 'iovec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'vlen',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'pos_l',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'pos_h',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getcwd': SysCallSig(
'getcwd',
params=[
SysCallParamSig(
'buf',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'size',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mkdir': SysCallSig(
'mkdir',
params=[
SysCallParamSig(
'pathname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'mode',
CType(
['umode_t'],
ctypes.c_ushort,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'chdir': SysCallSig(
'chdir',
params=[
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fchdir': SysCallSig(
'fchdir',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'rmdir': SysCallSig(
'rmdir',
params=[
SysCallParamSig(
'pathname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'lookup_dcookie': SysCallSig(
'lookup_dcookie',
params=[
SysCallParamSig(
'cookie64',
CType(
['u64'],
ctypes.c_ulonglong,
0
)
),
SysCallParamSig(
'buf',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'len',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'quotactl': SysCallSig(
'quotactl',
params=[
SysCallParamSig(
'cmd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'special',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'id',
CType(
['qid_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'addr',
CType(
['void', '*'],
ctypes.c_long,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getdents': SysCallSig(
'getdents',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'dirent',
CType(
['struct', 'linux_dirent', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'count',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getdents64': SysCallSig(
'getdents64',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'dirent',
CType(
['struct', 'linux_dirent64', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'count',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'setsockopt': SysCallSig(
'setsockopt',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'level',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'optname',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'optval',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'optlen',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getsockopt': SysCallSig(
'getsockopt',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'level',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'optname',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'optval',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'optlen',
CType(
['int', '*'],
ctypes.c_int,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'bind': SysCallSig(
'bind',
params=[
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['struct', 'sockaddr', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'connect': SysCallSig(
'connect',
params=[
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['struct', 'sockaddr', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'accept': SysCallSig(
'accept',
params=[
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['struct', 'sockaddr', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
None,
CType(
['int', '*'],
ctypes.c_int,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'accept4': SysCallSig(
'accept4',
params=[
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['struct', 'sockaddr', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
None,
CType(
['int', '*'],
ctypes.c_int,
1
)
),
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getsockname': SysCallSig(
'getsockname',
params=[
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['struct', 'sockaddr', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
None,
CType(
['int', '*'],
ctypes.c_int,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getpeername': SysCallSig(
'getpeername',
params=[
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['struct', 'sockaddr', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
None,
CType(
['int', '*'],
ctypes.c_int,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'send': SysCallSig(
'send',
params=[
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['void', '*'],
ctypes.c_long,
1
)
),
SysCallParamSig(
None,
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
None,
CType(
['unsigned'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sendto': SysCallSig(
'sendto',
params=[
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['void', '*'],
ctypes.c_long,
1
)
),
SysCallParamSig(
None,
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
None,
CType(
['unsigned'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
None,
CType(
['struct', 'sockaddr', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sendmsg': SysCallSig(
'sendmsg',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'msg',
CType(
['struct', 'user_msghdr', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sendmmsg': SysCallSig(
'sendmmsg',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'msg',
CType(
['struct', 'mmsghdr', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'vlen',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'recv': SysCallSig(
'recv',
params=[
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['void', '*'],
ctypes.c_long,
1
)
),
SysCallParamSig(
None,
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
None,
CType(
['unsigned'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'recvfrom': SysCallSig(
'recvfrom',
params=[
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['void', '*'],
ctypes.c_long,
1
)
),
SysCallParamSig(
None,
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
None,
CType(
['unsigned'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
None,
CType(
['struct', 'sockaddr', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
None,
CType(
['int', '*'],
ctypes.c_int,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'recvmsg': SysCallSig(
'recvmsg',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'msg',
CType(
['struct', 'user_msghdr', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'recvmmsg': SysCallSig(
'recvmmsg',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'msg',
CType(
['struct', 'mmsghdr', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'vlen',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'timeout',
CType(
['struct', 'timespec', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'socket': SysCallSig(
'socket',
params=[
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'socketpair': SysCallSig(
'socketpair',
params=[
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['int', '*'],
ctypes.c_int,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'socketcall': SysCallSig(
'socketcall',
params=[
SysCallParamSig(
'call',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'args',
CType(
['unsigned', 'long', '*'],
ctypes.c_ulong,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'listen': SysCallSig(
'listen',
params=[
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'poll': SysCallSig(
'poll',
params=[
SysCallParamSig(
'ufds',
CType(
['struct', 'pollfd', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'nfds',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'timeout',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'select': SysCallSig(
'select',
params=[
SysCallParamSig(
'n',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'inp',
CType(
['fd_set', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'outp',
CType(
['fd_set', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'exp',
CType(
['fd_set', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'tvp',
CType(
['struct', 'timeval', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'old_select': SysCallSig(
'old_select',
params=[
SysCallParamSig(
'arg',
CType(
['struct', 'sel_arg_struct', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'epoll_create': SysCallSig(
'epoll_create',
params=[
SysCallParamSig(
'size',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'epoll_create1': SysCallSig(
'epoll_create1',
params=[
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'epoll_ctl': SysCallSig(
'epoll_ctl',
params=[
SysCallParamSig(
'epfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'op',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'event',
CType(
['struct', 'epoll_event', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'epoll_wait': SysCallSig(
'epoll_wait',
params=[
SysCallParamSig(
'epfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'events',
CType(
['struct', 'epoll_event', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'maxevents',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'timeout',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'epoll_pwait': SysCallSig(
'epoll_pwait',
params=[
SysCallParamSig(
'epfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'events',
CType(
['struct', 'epoll_event', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'maxevents',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'timeout',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'sigmask',
CType(
['const', 'sigset_t', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'sigsetsize',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'gethostname': SysCallSig(
'gethostname',
params=[
SysCallParamSig(
'name',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'len',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sethostname': SysCallSig(
'sethostname',
params=[
SysCallParamSig(
'name',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'len',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'setdomainname': SysCallSig(
'setdomainname',
params=[
SysCallParamSig(
'name',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'len',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'newuname': SysCallSig(
'newuname',
params=[
SysCallParamSig(
'name',
CType(
['struct', 'new_utsname', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'uname': SysCallSig(
'uname',
params=[
SysCallParamSig(
None,
CType(
['struct', 'old_utsname', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'olduname': SysCallSig(
'olduname',
params=[
SysCallParamSig(
None,
CType(
['struct', 'oldold_utsname', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getrlimit': SysCallSig(
'getrlimit',
params=[
SysCallParamSig(
'resource',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'rlim',
CType(
['struct', 'rlimit', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'setrlimit': SysCallSig(
'setrlimit',
params=[
SysCallParamSig(
'resource',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'rlim',
CType(
['struct', 'rlimit', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'prlimit64': SysCallSig(
'prlimit64',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'resource',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'new_rlim',
CType(
['const', 'struct', 'rlimit64', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'old_rlim',
CType(
['struct', 'rlimit64', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getrusage': SysCallSig(
'getrusage',
params=[
SysCallParamSig(
'who',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'ru',
CType(
['struct', 'rusage', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'umask': SysCallSig(
'umask',
params=[
SysCallParamSig(
'mask',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'msgget': SysCallSig(
'msgget',
params=[
SysCallParamSig(
'key',
CType(
['key_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'msgflg',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'msgsnd': SysCallSig(
'msgsnd',
params=[
SysCallParamSig(
'msqid',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'msgp',
CType(
['struct', 'msgbuf', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'msgsz',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'msgflg',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'msgrcv': SysCallSig(
'msgrcv',
params=[
SysCallParamSig(
'msqid',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'msgp',
CType(
['struct', 'msgbuf', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'msgsz',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'msgtyp',
CType(
['long'],
ctypes.c_long,
0
)
),
SysCallParamSig(
'msgflg',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'msgctl': SysCallSig(
'msgctl',
params=[
SysCallParamSig(
'msqid',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'cmd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'buf',
CType(
['struct', 'msqid_ds', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'semget': SysCallSig(
'semget',
params=[
SysCallParamSig(
'key',
CType(
['key_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'nsems',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'semflg',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'semop': SysCallSig(
'semop',
params=[
SysCallParamSig(
'semid',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'sops',
CType(
['struct', 'sembuf', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'nsops',
CType(
['unsigned'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'semctl': SysCallSig(
'semctl',
params=[
SysCallParamSig(
'semid',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'semnum',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'cmd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'arg',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'semtimedop': SysCallSig(
'semtimedop',
params=[
SysCallParamSig(
'semid',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'sops',
CType(
['struct', 'sembuf', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'nsops',
CType(
['unsigned'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'timeout',
CType(
['const', 'struct', 'timespec', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'shmat': SysCallSig(
'shmat',
params=[
SysCallParamSig(
'shmid',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'shmaddr',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'shmflg',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'shmget': SysCallSig(
'shmget',
params=[
SysCallParamSig(
'key',
CType(
['key_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'size',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'flag',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'shmdt': SysCallSig(
'shmdt',
params=[
SysCallParamSig(
'shmaddr',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'shmctl': SysCallSig(
'shmctl',
params=[
SysCallParamSig(
'shmid',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'cmd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'buf',
CType(
['struct', 'shmid_ds', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'ipc': SysCallSig(
'ipc',
params=[
SysCallParamSig(
'call',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'first',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'second',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'third',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'ptr',
CType(
['void', '*'],
ctypes.c_long,
1
)
),
SysCallParamSig(
'fifth',
CType(
['long'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mq_open': SysCallSig(
'mq_open',
params=[
SysCallParamSig(
'name',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'oflag',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'mode',
CType(
['umode_t'],
ctypes.c_ushort,
0
)
),
SysCallParamSig(
'attr',
CType(
['struct', 'mq_attr', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mq_unlink': SysCallSig(
'mq_unlink',
params=[
SysCallParamSig(
'name',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mq_timedsend': SysCallSig(
'mq_timedsend',
params=[
SysCallParamSig(
'mqdes',
CType(
['mqd_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'msg_ptr',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'msg_len',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'msg_prio',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'abs_timeout',
CType(
['const', 'struct', 'timespec', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mq_timedreceive': SysCallSig(
'mq_timedreceive',
params=[
SysCallParamSig(
'mqdes',
CType(
['mqd_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'msg_ptr',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'msg_len',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'msg_prio',
CType(
['unsigned', 'int', '*'],
ctypes.c_uint,
1
)
),
SysCallParamSig(
'abs_timeout',
CType(
['const', 'struct', 'timespec', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mq_notify': SysCallSig(
'mq_notify',
params=[
SysCallParamSig(
'mqdes',
CType(
['mqd_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'notification',
CType(
['const', 'struct', 'sigevent', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mq_getsetattr': SysCallSig(
'mq_getsetattr',
params=[
SysCallParamSig(
'mqdes',
CType(
['mqd_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'mqstat',
CType(
['const', 'struct', 'mq_attr', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'omqstat',
CType(
['struct', 'mq_attr', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'pciconfig_iobase': SysCallSig(
'pciconfig_iobase',
params=[
SysCallParamSig(
'which',
CType(
['long'],
ctypes.c_long,
0
)
),
SysCallParamSig(
'bus',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'devfn',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'pciconfig_read': SysCallSig(
'pciconfig_read',
params=[
SysCallParamSig(
'bus',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'dfn',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'off',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'len',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'buf',
CType(
['void', '*'],
ctypes.c_long,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'pciconfig_write': SysCallSig(
'pciconfig_write',
params=[
SysCallParamSig(
'bus',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'dfn',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'off',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'len',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'buf',
CType(
['void', '*'],
ctypes.c_long,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'prctl': SysCallSig(
'prctl',
params=[
SysCallParamSig(
'option',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'arg2',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'arg3',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'arg4',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'arg5',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'swapon': SysCallSig(
'swapon',
params=[
SysCallParamSig(
'specialfile',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'swap_flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'swapoff': SysCallSig(
'swapoff',
params=[
SysCallParamSig(
'specialfile',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sysctl': SysCallSig(
'sysctl',
params=[
SysCallParamSig(
'args',
CType(
['struct', '__sysctl_args', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sysinfo': SysCallSig(
'sysinfo',
params=[
SysCallParamSig(
'info',
CType(
['struct', 'sysinfo', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sysfs': SysCallSig(
'sysfs',
params=[
SysCallParamSig(
'option',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'arg1',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'arg2',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'syslog': SysCallSig(
'syslog',
params=[
SysCallParamSig(
'type',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'buf',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'len',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'uselib': SysCallSig(
'uselib',
params=[
SysCallParamSig(
'library',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'ni_syscall': SysCallSig(
'ni_syscall',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'ptrace': SysCallSig(
'ptrace',
params=[
SysCallParamSig(
'request',
CType(
['long'],
ctypes.c_long,
0
)
),
SysCallParamSig(
'pid',
CType(
['long'],
ctypes.c_long,
0
)
),
SysCallParamSig(
'addr',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'data',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'add_key': SysCallSig(
'add_key',
params=[
SysCallParamSig(
'_type',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'_description',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'_payload',
CType(
['const', 'void', '*'],
ctypes.c_long,
1
)
),
SysCallParamSig(
'plen',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'destringid',
CType(
['key_serial_t'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'request_key': SysCallSig(
'request_key',
params=[
SysCallParamSig(
'_type',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'_description',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'_callout_info',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'destringid',
CType(
['key_serial_t'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'keyctl': SysCallSig(
'keyctl',
params=[
SysCallParamSig(
'cmd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'arg2',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'arg3',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'arg4',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'arg5',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'ioprio_set': SysCallSig(
'ioprio_set',
params=[
SysCallParamSig(
'which',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'who',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'ioprio',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'ioprio_get': SysCallSig(
'ioprio_get',
params=[
SysCallParamSig(
'which',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'who',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'set_mempolicy': SysCallSig(
'set_mempolicy',
params=[
SysCallParamSig(
'mode',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'nmask',
CType(
['const', 'unsigned', 'long', '*'],
ctypes.c_ulong,
1
)
),
SysCallParamSig(
'maxnode',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'migrate_pages': SysCallSig(
'migrate_pages',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'maxnode',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'from',
CType(
['const', 'unsigned', 'long', '*'],
ctypes.c_ulong,
1
)
),
SysCallParamSig(
'to',
CType(
['const', 'unsigned', 'long', '*'],
ctypes.c_ulong,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'move_pages': SysCallSig(
'move_pages',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'nr_pages',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'pages',
CType(
['const', 'void', '*', '*'],
ctypes.c_long,
2
)
),
SysCallParamSig(
'nodes',
CType(
['const', 'int', '*'],
ctypes.c_int,
1
)
),
SysCallParamSig(
'status',
CType(
['int', '*'],
ctypes.c_int,
1
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mbind': SysCallSig(
'mbind',
params=[
SysCallParamSig(
'start',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'len',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'mode',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'nmask',
CType(
['const', 'unsigned', 'long', '*'],
ctypes.c_ulong,
1
)
),
SysCallParamSig(
'maxnode',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'get_mempolicy': SysCallSig(
'get_mempolicy',
params=[
SysCallParamSig(
'policy',
CType(
['int', '*'],
ctypes.c_int,
1
)
),
SysCallParamSig(
'nmask',
CType(
['unsigned', 'long', '*'],
ctypes.c_ulong,
1
)
),
SysCallParamSig(
'maxnode',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'addr',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'inotify_init': SysCallSig(
'inotify_init',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'inotify_init1': SysCallSig(
'inotify_init1',
params=[
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'inotify_add_watch': SysCallSig(
'inotify_add_watch',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'path',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'mask',
CType(
['u32'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'inotify_rm_watch': SysCallSig(
'inotify_rm_watch',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'wd',
CType(
['__s32'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'spu_run': SysCallSig(
'spu_run',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'unpc',
CType(
['__u32', '*'],
ctypes.c_uint,
1
)
),
SysCallParamSig(
'ustatus',
CType(
['__u32', '*'],
ctypes.c_uint,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'spu_create': SysCallSig(
'spu_create',
params=[
SysCallParamSig(
'name',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'mode',
CType(
['umode_t'],
ctypes.c_ushort,
0
)
),
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mknodat': SysCallSig(
'mknodat',
params=[
SysCallParamSig(
'dfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'mode',
CType(
['umode_t'],
ctypes.c_ushort,
0
)
),
SysCallParamSig(
'dev',
CType(
['unsigned'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mkdirat': SysCallSig(
'mkdirat',
params=[
SysCallParamSig(
'dfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'pathname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'mode',
CType(
['umode_t'],
ctypes.c_ushort,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'unlinkat': SysCallSig(
'unlinkat',
params=[
SysCallParamSig(
'dfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'pathname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'flag',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'symlinkat': SysCallSig(
'symlinkat',
params=[
SysCallParamSig(
'oldname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'newdfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'newname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'linkat': SysCallSig(
'linkat',
params=[
SysCallParamSig(
'olddfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'oldname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'newdfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'newname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'renameat': SysCallSig(
'renameat',
params=[
SysCallParamSig(
'olddfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'oldname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'newdfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'newname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'renameat2': SysCallSig(
'renameat2',
params=[
SysCallParamSig(
'olddfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'oldname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'newdfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'newname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'futimesat': SysCallSig(
'futimesat',
params=[
SysCallParamSig(
'dfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'utimes',
CType(
['struct', 'timeval', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'faccessat': SysCallSig(
'faccessat',
params=[
SysCallParamSig(
'dfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'mode',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fchmodat': SysCallSig(
'fchmodat',
params=[
SysCallParamSig(
'dfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'mode',
CType(
['umode_t'],
ctypes.c_ushort,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fchownat': SysCallSig(
'fchownat',
params=[
SysCallParamSig(
'dfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'user',
CType(
['uid_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'group',
CType(
['gid_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'flag',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'openat': SysCallSig(
'openat',
params=[
SysCallParamSig(
'dfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'mode',
CType(
['umode_t'],
ctypes.c_ushort,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'newfstatat': SysCallSig(
'newfstatat',
params=[
SysCallParamSig(
'dfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'statbuf',
CType(
['struct', 'stat', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'flag',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'readlinkat': SysCallSig(
'readlinkat',
params=[
SysCallParamSig(
'dfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'path',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'buf',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'bufsiz',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'utimensat': SysCallSig(
'utimensat',
params=[
SysCallParamSig(
'dfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'utimes',
CType(
['struct', 'timespec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'unshare': SysCallSig(
'unshare',
params=[
SysCallParamSig(
'unshare_flags',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'splice': SysCallSig(
'splice',
params=[
SysCallParamSig(
'fd_in',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'off_in',
CType(
['loff_t', '*'],
ctypes.c_longlong,
1
)
),
SysCallParamSig(
'fd_out',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'off_out',
CType(
['loff_t', '*'],
ctypes.c_longlong,
1
)
),
SysCallParamSig(
'len',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'vmsplice': SysCallSig(
'vmsplice',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'iov',
CType(
['const', 'struct', 'iovec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'nr_segs',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'tee': SysCallSig(
'tee',
params=[
SysCallParamSig(
'fdin',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'fdout',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'len',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sync_file_range': SysCallSig(
'sync_file_range',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'offset',
CType(
['loff_t'],
ctypes.c_longlong,
0
)
),
SysCallParamSig(
'nbytes',
CType(
['loff_t'],
ctypes.c_longlong,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sync_file_range2': SysCallSig(
'sync_file_range2',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'offset',
CType(
['loff_t'],
ctypes.c_longlong,
0
)
),
SysCallParamSig(
'nbytes',
CType(
['loff_t'],
ctypes.c_longlong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'get_robust_list': SysCallSig(
'get_robust_list',
params=[
SysCallParamSig(
'pid',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'head_ptr',
CType(
['struct', 'robust_list_head', '*', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'len_ptr',
CType(
['size_t', '*'],
ctypes.c_uint,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'set_robust_list': SysCallSig(
'set_robust_list',
params=[
SysCallParamSig(
'head',
CType(
['struct', 'robust_list_head', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'len',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getcpu': SysCallSig(
'getcpu',
params=[
SysCallParamSig(
'cpu',
CType(
['unsigned', '*'],
ctypes.c_uint,
1
)
),
SysCallParamSig(
'node',
CType(
['unsigned', '*'],
ctypes.c_uint,
1
)
),
SysCallParamSig(
'cache',
CType(
['struct', 'getcpu_cache', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'signalfd': SysCallSig(
'signalfd',
params=[
SysCallParamSig(
'ufd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'user_mask',
CType(
['sigset_t', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'sizemask',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'signalfd4': SysCallSig(
'signalfd4',
params=[
SysCallParamSig(
'ufd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'user_mask',
CType(
['sigset_t', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'sizemask',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'timerfd_create': SysCallSig(
'timerfd_create',
params=[
SysCallParamSig(
'clockid',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'timerfd_settime': SysCallSig(
'timerfd_settime',
params=[
SysCallParamSig(
'ufd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'utmr',
CType(
['const', 'struct', 'itimerspec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'otmr',
CType(
['struct', 'itimerspec', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'timerfd_gettime': SysCallSig(
'timerfd_gettime',
params=[
SysCallParamSig(
'ufd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'otmr',
CType(
['struct', 'itimerspec', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'eventfd': SysCallSig(
'eventfd',
params=[
SysCallParamSig(
'count',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'eventfd2': SysCallSig(
'eventfd2',
params=[
SysCallParamSig(
'count',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'memfd_create': SysCallSig(
'memfd_create',
params=[
SysCallParamSig(
'uname_ptr',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'userfaultfd': SysCallSig(
'userfaultfd',
params=[
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fallocate': SysCallSig(
'fallocate',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'mode',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'offset',
CType(
['loff_t'],
ctypes.c_longlong,
0
)
),
SysCallParamSig(
'len',
CType(
['loff_t'],
ctypes.c_longlong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'old_readdir': SysCallSig(
'old_readdir',
params=[
SysCallParamSig(
None,
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
None,
CType(
['struct', 'old_linux_dirent', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
None,
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'pselect6': SysCallSig(
'pselect6',
params=[
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['fd_set', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
None,
CType(
['fd_set', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
None,
CType(
['fd_set', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
None,
CType(
['struct', 'timespec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
None,
CType(
['void', '*'],
ctypes.c_long,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'ppoll': SysCallSig(
'ppoll',
params=[
SysCallParamSig(
None,
CType(
['struct', 'pollfd', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
None,
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
None,
CType(
['struct', 'timespec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
None,
CType(
['const', 'sigset_t', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
None,
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fanotify_init': SysCallSig(
'fanotify_init',
params=[
SysCallParamSig(
'flags',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'event_f_flags',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fanotify_mark': SysCallSig(
'fanotify_mark',
params=[
SysCallParamSig(
'fanotify_fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'mask',
CType(
['u64'],
ctypes.c_ulonglong,
0
)
),
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'pathname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'syncfs': SysCallSig(
'syncfs',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fork': SysCallSig(
'fork',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'vfork': SysCallSig(
'vfork',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'clone': SysCallSig(
'clone',
params=[
SysCallParamSig(
None,
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
None,
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
None,
CType(
['int', '*'],
ctypes.c_int,
1
)
),
SysCallParamSig(
None,
CType(
['int', '*'],
ctypes.c_int,
1
)
),
SysCallParamSig(
None,
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'execve': SysCallSig(
'execve',
params=[
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'argv',
CType(
['const', 'const', 'char', '*', '*'],
ctypes.c_char,
2
)
),
SysCallParamSig(
'envp',
CType(
['const', 'const', 'char', '*', '*'],
ctypes.c_char,
2
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'perf_event_open': SysCallSig(
'perf_event_open',
params=[
SysCallParamSig(
'attr_uptr',
CType(
['struct', 'perf_event_attr', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'cpu',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'group_fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mmap_pgoff': SysCallSig(
'mmap_pgoff',
params=[
SysCallParamSig(
'addr',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'len',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'prot',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'fd',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'pgoff',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'old_mmap': SysCallSig(
'old_mmap',
params=[
SysCallParamSig(
'arg',
CType(
['struct', 'mmap_arg_struct', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'name_to_handle_at': SysCallSig(
'name_to_handle_at',
params=[
SysCallParamSig(
'dfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'name',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'handle',
CType(
['struct', 'file_handle', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'mnt_id',
CType(
['int', '*'],
ctypes.c_int,
1
)
),
SysCallParamSig(
'flag',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'open_by_handle_at': SysCallSig(
'open_by_handle_at',
params=[
SysCallParamSig(
'mountdirfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'handle',
CType(
['struct', 'file_handle', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'setns': SysCallSig(
'setns',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'nstype',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'process_vm_readv': SysCallSig(
'process_vm_readv',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'lvec',
CType(
['const', 'struct', 'iovec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'liovcnt',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'rvec',
CType(
['const', 'struct', 'iovec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'riovcnt',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'process_vm_writev': SysCallSig(
'process_vm_writev',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'lvec',
CType(
['const', 'struct', 'iovec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'liovcnt',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'rvec',
CType(
['const', 'struct', 'iovec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'riovcnt',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'kcmp': SysCallSig(
'kcmp',
params=[
SysCallParamSig(
'pid1',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'pid2',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'type',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'idx1',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'idx2',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'finit_module': SysCallSig(
'finit_module',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'uargs',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'seccomp': SysCallSig(
'seccomp',
params=[
SysCallParamSig(
'op',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'uargs',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getrandom': SysCallSig(
'getrandom',
params=[
SysCallParamSig(
'buf',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'count',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'bpf': SysCallSig(
'bpf',
params=[
SysCallParamSig(
'cmd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'attr',
CType(
['union', 'bpf_attr', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'size',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'execveat': SysCallSig(
'execveat',
params=[
SysCallParamSig(
'dfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'argv',
CType(
['const', 'const', 'char', '*', '*'],
ctypes.c_char,
2
)
),
SysCallParamSig(
'envp',
CType(
['const', 'const', 'char', '*', '*'],
ctypes.c_char,
2
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'membarrier': SysCallSig(
'membarrier',
params=[
SysCallParamSig(
'cmd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'copy_file_range': SysCallSig(
'copy_file_range',
params=[
SysCallParamSig(
'fd_in',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'off_in',
CType(
['loff_t', '*'],
ctypes.c_longlong,
1
)
),
SysCallParamSig(
'fd_out',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'off_out',
CType(
['loff_t', '*'],
ctypes.c_longlong,
1
)
),
SysCallParamSig(
'len',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mlock2': SysCallSig(
'mlock2',
params=[
SysCallParamSig(
'start',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'len',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'pkey_mprotect': SysCallSig(
'pkey_mprotect',
params=[
SysCallParamSig(
'start',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'len',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'prot',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'pkey',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'pkey_alloc': SysCallSig(
'pkey_alloc',
params=[
SysCallParamSig(
'flags',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'init_val',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'pkey_free': SysCallSig(
'pkey_free',
params=[
SysCallParamSig(
'pkey',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'statx': SysCallSig(
'statx',
params=[
SysCallParamSig(
'dfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'path',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'flags',
CType(
['unsigned'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'mask',
CType(
['unsigned'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'buffer',
CType(
['struct', 'statx', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'ioperm': SysCallSig(
'ioperm',
params=[
SysCallParamSig(
None,
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
None,
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'iopl': SysCallSig(
'iopl',
params=[
SysCallParamSig(
None,
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'modify_ldt': SysCallSig(
'modify_ldt',
params=[
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['void', '*'],
ctypes.c_long,
1
)
),
SysCallParamSig(
None,
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['int'], ctypes.c_int, 0)
),
'rt_sigreturn': SysCallSig(
'rt_sigreturn',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'set_thread_area': SysCallSig(
'set_thread_area',
params=[
SysCallParamSig(
None,
CType(
['struct', 'user_desc', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'get_thread_area': SysCallSig(
'get_thread_area',
params=[
SysCallParamSig(
None,
CType(
['struct', 'user_desc', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'arch_prctl': SysCallSig(
'arch_prctl',
params=[
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mmap': SysCallSig(
'mmap',
params=[
SysCallParamSig(
None,
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
None,
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
None,
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
None,
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
None,
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
None,
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
}
SYSCALL_NUMBERS = {
0: 'read',
1: 'write',
2: 'open',
3: 'close',
4: 'stat',
5: 'fstat',
6: 'lstat',
7: 'poll',
8: 'lseek',
9: 'mmap',
10: 'mprotect',
11: 'munmap',
12: 'brk',
13: 'rt_sigaction',
14: 'rt_sigprocmask',
15: 'rt_sigreturn',
16: 'ioctl',
17: 'pread64',
18: 'pwrite64',
19: 'readv',
20: 'writev',
21: 'access',
22: 'pipe',
23: 'select',
24: 'sched_yield',
25: 'mremap',
26: 'msync',
27: 'mincore',
28: 'madvise',
29: 'shmget',
30: 'shmat',
31: 'shmctl',
32: 'dup',
33: 'dup2',
34: 'pause',
35: 'nanosleep',
36: 'getitimer',
37: 'alarm',
38: 'setitimer',
39: 'getpid',
40: 'sendfile',
41: 'socket',
42: 'connect',
43: 'accept',
44: 'sendto',
45: 'recvfrom',
46: 'sendmsg',
47: 'recvmsg',
48: 'shutdown',
49: 'bind',
50: 'listen',
51: 'getsockname',
52: 'getpeername',
53: 'socketpair',
54: 'setsockopt',
55: 'getsockopt',
56: 'clone',
57: 'fork',
58: 'vfork',
59: 'execve',
60: 'exit',
61: 'wait4',
62: 'kill',
63: 'uname',
64: 'semget',
65: 'semop',
66: 'semctl',
67: 'shmdt',
68: 'msgget',
69: 'msgsnd',
70: 'msgrcv',
71: 'msgctl',
72: 'fcntl',
73: 'flock',
74: 'fsync',
75: 'fdatasync',
76: 'truncate',
77: 'ftruncate',
78: 'getdents',
79: 'getcwd',
80: 'chdir',
81: 'fchdir',
82: 'rename',
83: 'mkdir',
84: 'rmdir',
85: 'creat',
86: 'link',
87: 'unlink',
88: 'symlink',
89: 'readlink',
90: 'chmod',
91: 'fchmod',
92: 'chown',
93: 'fchown',
94: 'lchown',
95: 'umask',
96: 'gettimeofday',
97: 'getrlimit',
98: 'getrusage',
99: 'sysinfo',
100: 'times',
101: 'ptrace',
102: 'getuid',
103: 'syslog',
104: 'getgid',
105: 'setuid',
106: 'setgid',
107: 'geteuid',
108: 'getegid',
109: 'setpgid',
110: 'getppid',
111: 'getpgrp',
112: 'setsid',
113: 'setreuid',
114: 'setregid',
115: 'getgroups',
116: 'setgroups',
117: 'setresuid',
118: 'getresuid',
119: 'setresgid',
120: 'getresgid',
121: 'getpgid',
122: 'setfsuid',
123: 'setfsgid',
124: 'getsid',
125: 'capget',
126: 'capset',
127: 'rt_sigpending',
128: 'rt_sigtimedwait',
129: 'rt_sigqueueinfo',
130: 'rt_sigsuspend',
131: 'sigaltstack',
132: 'utime',
133: 'mknod',
134: 'uselib',
135: 'personality',
136: 'ustat',
137: 'statfs',
138: 'fstatfs',
139: 'sysfs',
140: 'getpriority',
141: 'setpriority',
142: 'sched_setparam',
143: 'sched_getparam',
144: 'sched_setscheduler',
145: 'sched_getscheduler',
146: 'sched_get_priority_max',
147: 'sched_get_priority_min',
148: 'sched_rr_get_interval',
149: 'mlock',
150: 'munlock',
151: 'mlockall',
152: 'munlockall',
153: 'vhangup',
154: 'modify_ldt',
155: 'pivot_root',
156: '_sysctl',
157: 'prctl',
158: 'arch_prctl',
159: 'adjtimex',
160: 'setrlimit',
161: 'chroot',
162: 'sync',
163: 'acct',
164: 'settimeofday',
165: 'mount',
166: 'umount2',
167: 'swapon',
168: 'swapoff',
169: 'reboot',
170: 'sethostname',
171: 'setdomainname',
172: 'iopl',
173: 'ioperm',
174: 'create_module',
175: 'init_module',
176: 'delete_module',
177: 'get_kernel_syms',
178: 'query_module',
179: 'quotactl',
180: 'nfsservctl',
181: 'getpmsg',
182: 'putpmsg',
183: 'afs_syscall',
184: 'tuxcall',
185: 'security',
186: 'gettid',
187: 'readahead',
188: 'setxattr',
189: 'lsetxattr',
190: 'fsetxattr',
191: 'getxattr',
192: 'lgetxattr',
193: 'fgetxattr',
194: 'listxattr',
195: 'llistxattr',
196: 'flistxattr',
197: 'removexattr',
198: 'lremovexattr',
199: 'fremovexattr',
200: 'tkill',
201: 'time',
202: 'futex',
203: 'sched_setaffinity',
204: 'sched_getaffinity',
205: 'set_thread_area',
206: 'io_setup',
207: 'io_destroy',
208: 'io_getevents',
209: 'io_submit',
210: 'io_cancel',
211: 'get_thread_area',
212: 'lookup_dcookie',
213: 'epoll_create',
214: 'epoll_ctl_old',
215: 'epoll_wait_old',
216: 'remap_file_pages',
217: 'getdents64',
218: 'set_tid_address',
219: 'restart_syscall',
220: 'semtimedop',
221: 'fadvise64',
222: 'timer_create',
223: 'timer_settime',
224: 'timer_gettime',
225: 'timer_getoverrun',
226: 'timer_delete',
227: 'clock_settime',
228: 'clock_gettime',
229: 'clock_getres',
230: 'clock_nanosleep',
231: 'exit_group',
232: 'epoll_wait',
233: 'epoll_ctl',
234: 'tgkill',
235: 'utimes',
236: 'vserver',
237: 'mbind',
238: 'set_mempolicy',
239: 'get_mempolicy',
240: 'mq_open',
241: 'mq_unlink',
242: 'mq_timedsend',
243: 'mq_timedreceive',
244: 'mq_notify',
245: 'mq_getsetattr',
246: 'kexec_load',
247: 'waitid',
248: 'add_key',
249: 'request_key',
250: 'keyctl',
251: 'ioprio_set',
252: 'ioprio_get',
253: 'inotify_init',
254: 'inotify_add_watch',
255: 'inotify_rm_watch',
256: 'migrate_pages',
257: 'openat',
258: 'mkdirat',
259: 'mknodat',
260: 'fchownat',
261: 'futimesat',
262: 'newfstatat',
263: 'unlinkat',
264: 'renameat',
265: 'linkat',
266: 'symlinkat',
267: 'readlinkat',
268: 'fchmodat',
269: 'faccessat',
270: 'pselect6',
271: 'ppoll',
272: 'unshare',
273: 'set_robust_list',
274: 'get_robust_list',
275: 'splice',
276: 'tee',
277: 'sync_file_range',
278: 'vmsplice',
279: 'move_pages',
280: 'utimensat',
281: 'epoll_pwait',
282: 'signalfd',
283: 'timerfd_create',
284: 'eventfd',
285: 'fallocate',
286: 'timerfd_settime',
287: 'timerfd_gettime',
288: 'accept4',
289: 'signalfd4',
290: 'eventfd2',
291: 'epoll_create1',
292: 'dup3',
293: 'pipe2',
294: 'inotify_init1',
295: 'preadv',
296: 'pwritev',
297: 'rt_tgsigqueueinfo',
298: 'perf_event_open',
299: 'recvmmsg',
300: 'fanotify_init',
301: 'fanotify_mark',
302: 'prlimit64',
303: 'name_to_handle_at',
304: 'open_by_handle_at',
305: 'clock_adjtime',
306: 'syncfs',
307: 'sendmmsg',
308: 'setns',
309: 'getcpu',
310: 'process_vm_readv',
311: 'process_vm_writev',
312: 'kcmp',
313: 'finit_module',
314: 'sched_setattr',
315: 'sched_getattr',
316: 'renameat2',
317: 'seccomp',
318: 'getrandom',
319: 'memfd_create',
320: 'kexec_file_load',
321: 'bpf',
322: 'execveat',
323: 'userfaultfd',
324: 'membarrier',
325: 'mlock2',
326: 'copy_file_range',
327: 'preadv2',
328: 'pwritev2',
329: 'pkey_mprotect',
330: 'pkey_alloc',
331: 'pkey_free',
332: 'statx',
}
| true | true |
f7320d9878bc5a4f10abe988b97f5db2d9d49e64 | 6,985 | py | Python | test/drivers/gaussiand/test_driver_gaussian_log.py | renier/qiskit-nature | a06f378a219d650d96e16db96d763ea4aec9cfc2 | [
"Apache-2.0"
] | null | null | null | test/drivers/gaussiand/test_driver_gaussian_log.py | renier/qiskit-nature | a06f378a219d650d96e16db96d763ea4aec9cfc2 | [
"Apache-2.0"
] | null | null | null | test/drivers/gaussiand/test_driver_gaussian_log.py | renier/qiskit-nature | a06f378a219d650d96e16db96d763ea4aec9cfc2 | [
"Apache-2.0"
] | null | null | null | # This code is part of Qiskit.
#
# (C) Copyright IBM 2020, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Test Gaussian Log Driver """
import unittest
from test import QiskitNatureTestCase
from qiskit_nature.drivers import GaussianLogDriver, GaussianLogResult
from qiskit_nature import QiskitNatureError
class TestDriverGaussianLog(QiskitNatureTestCase):
"""Gaussian Log Driver tests."""
def setUp(self):
super().setUp()
self.logfile = self.get_resource_path(
"test_driver_gaussian_log.txt", "drivers/second_quantization/gaussiand"
)
def test_log_driver(self):
"""Test the driver itself creates log and we can get a result"""
try:
driver = GaussianLogDriver(
[
"#p B3LYP/6-31g Freq=(Anharm) Int=Ultrafine SCF=VeryTight",
"",
"CO2 geometry optimization B3LYP/cc-pVTZ",
"",
"0 1",
"C -0.848629 2.067624 0.160992",
"O 0.098816 2.655801 -0.159738",
"O -1.796073 1.479446 0.481721",
"",
"",
]
)
result = driver.run()
qfc = result.quadratic_force_constants
expected = [
("1", "1", 1409.20235, 1.17003, 0.07515),
("2", "2", 2526.46159, 3.76076, 0.24156),
("3a", "3a", 462.61566, 0.12609, 0.0081),
("3b", "3b", 462.61566, 0.12609, 0.0081),
]
self.assertListEqual(qfc, expected)
except QiskitNatureError:
self.skipTest("GAUSSIAN driver does not appear to be installed")
# These tests check the gaussian log result and the parsing from a partial log file that is
# located with the tests so that this aspect of the code can be tested independent of
# Gaussian 16 being installed.
def test_gaussian_log_result_file(self):
"""Test result from file"""
result = GaussianLogResult(self.logfile)
with open(self.logfile, "r", encoding="utf8") as file:
lines = file.read().split("\n")
with self.subTest("Check list of lines"):
self.assertListEqual(result.log, lines)
with self.subTest("Check as string"):
line = "\n".join(lines)
self.assertEqual(str(result), line)
def test_gaussian_log_result_list(self):
"""Test result from list of strings"""
with open(self.logfile, "r", encoding="utf8") as file:
lines = file.read().split("\n")
result = GaussianLogResult(lines)
self.assertListEqual(result.log, lines)
def test_gaussian_log_result_string(self):
"""Test result from string"""
with open(self.logfile, "r", encoding="utf8") as file:
line = file.read()
result = GaussianLogResult(line)
self.assertListEqual(result.log, line.split("\n"))
def test_quadratic_force_constants(self):
"""Test quadratic force constants"""
result = GaussianLogResult(self.logfile)
qfc = result.quadratic_force_constants
expected = [
("1", "1", 1409.20235, 1.17003, 0.07515),
("2", "2", 2526.46159, 3.76076, 0.24156),
("3a", "3a", 462.61566, 0.12609, 0.0081),
("3b", "3b", 462.61566, 0.12609, 0.0081),
]
self.assertListEqual(qfc, expected)
def test_cubic_force_constants(self):
"""Test cubic force constants"""
result = GaussianLogResult(self.logfile)
cfc = result.cubic_force_constants
expected = [
("1", "1", "1", -260.36071, -1.39757, -0.0475),
("2", "2", "1", -498.9444, -4.80163, -0.1632),
("3a", "3a", "1", 239.87769, 0.4227, 0.01437),
("3a", "3b", "1", 74.25095, 0.13084, 0.00445),
("3b", "3b", "1", 12.93985, 0.0228, 0.00078),
]
self.assertListEqual(cfc, expected)
def test_quartic_force_constants(self):
"""Test quartic force constants"""
result = GaussianLogResult(self.logfile)
qfc = result.quartic_force_constants
expected = [
("1", "1", "1", "1", 40.39063, 1.40169, 0.02521),
("2", "2", "1", "1", 79.08068, 4.92017, 0.0885),
("2", "2", "2", "2", 154.78015, 17.26491, 0.31053),
("3a", "3a", "1", "1", -67.10879, -0.76453, -0.01375),
("3b", "3b", "1", "1", -67.10879, -0.76453, -0.01375),
("3a", "3a", "2", "2", -163.29426, -3.33524, -0.05999),
("3b", "3b", "2", "2", -163.29426, -3.33524, -0.05999),
("3a", "3a", "3a", "3a", 220.54851, 0.82484, 0.01484),
("3a", "3a", "3a", "3b", 66.77089, 0.24972, 0.00449),
("3a", "3a", "3b", "3b", 117.26759, 0.43857, 0.00789),
("3a", "3b", "3b", "3b", -66.77088, -0.24972, -0.00449),
("3b", "3b", "3b", "3b", 220.54851, 0.82484, 0.01484),
]
self.assertListEqual(qfc, expected)
def test_watson_hamiltonian(self):
"""Test the watson hamiltonian"""
result = GaussianLogResult(self.logfile)
watson = result.get_watson_hamiltonian()
expected = [
[352.3005875, 2, 2],
[-352.3005875, -2, -2],
[631.6153975, 1, 1],
[-631.6153975, -1, -1],
[115.653915, 4, 4],
[-115.653915, -4, -4],
[115.653915, 3, 3],
[-115.653915, -3, -3],
[-15.341901966295344, 2, 2, 2],
[-88.2017421687633, 1, 1, 2],
[42.40478531359112, 4, 4, 2],
[26.25167512727164, 4, 3, 2],
[2.2874639206341865, 3, 3, 2],
[0.4207357291666667, 2, 2, 2, 2],
[4.9425425, 1, 1, 2, 2],
[1.6122932291666665, 1, 1, 1, 1],
[-4.194299375, 4, 4, 2, 2],
[-4.194299375, 3, 3, 2, 2],
[-10.20589125, 4, 4, 1, 1],
[-10.20589125, 3, 3, 1, 1],
[2.2973803125, 4, 4, 4, 4],
[2.7821204166666664, 4, 4, 4, 3],
[7.329224375, 4, 4, 3, 3],
[-2.7821200000000004, 4, 3, 3, 3],
[2.2973803125, 3, 3, 3, 3],
]
for i, entry in enumerate(watson.data):
msg = "mode[{}]={} does not match expected {}".format(i, entry, expected[i])
self.assertAlmostEqual(entry[0], expected[i][0], msg=msg)
self.assertListEqual(entry[1:], expected[i][1:], msg=msg)
if __name__ == "__main__":
unittest.main()
| 39.6875 | 95 | 0.534001 |
import unittest
from test import QiskitNatureTestCase
from qiskit_nature.drivers import GaussianLogDriver, GaussianLogResult
from qiskit_nature import QiskitNatureError
class TestDriverGaussianLog(QiskitNatureTestCase):
def setUp(self):
super().setUp()
self.logfile = self.get_resource_path(
"test_driver_gaussian_log.txt", "drivers/second_quantization/gaussiand"
)
def test_log_driver(self):
try:
driver = GaussianLogDriver(
[
"#p B3LYP/6-31g Freq=(Anharm) Int=Ultrafine SCF=VeryTight",
"",
"CO2 geometry optimization B3LYP/cc-pVTZ",
"",
"0 1",
"C -0.848629 2.067624 0.160992",
"O 0.098816 2.655801 -0.159738",
"O -1.796073 1.479446 0.481721",
"",
"",
]
)
result = driver.run()
qfc = result.quadratic_force_constants
expected = [
("1", "1", 1409.20235, 1.17003, 0.07515),
("2", "2", 2526.46159, 3.76076, 0.24156),
("3a", "3a", 462.61566, 0.12609, 0.0081),
("3b", "3b", 462.61566, 0.12609, 0.0081),
]
self.assertListEqual(qfc, expected)
except QiskitNatureError:
self.skipTest("GAUSSIAN driver does not appear to be installed")
def test_gaussian_log_result_file(self):
result = GaussianLogResult(self.logfile)
with open(self.logfile, "r", encoding="utf8") as file:
lines = file.read().split("\n")
with self.subTest("Check list of lines"):
self.assertListEqual(result.log, lines)
with self.subTest("Check as string"):
line = "\n".join(lines)
self.assertEqual(str(result), line)
def test_gaussian_log_result_list(self):
with open(self.logfile, "r", encoding="utf8") as file:
lines = file.read().split("\n")
result = GaussianLogResult(lines)
self.assertListEqual(result.log, lines)
def test_gaussian_log_result_string(self):
with open(self.logfile, "r", encoding="utf8") as file:
line = file.read()
result = GaussianLogResult(line)
self.assertListEqual(result.log, line.split("\n"))
def test_quadratic_force_constants(self):
result = GaussianLogResult(self.logfile)
qfc = result.quadratic_force_constants
expected = [
("1", "1", 1409.20235, 1.17003, 0.07515),
("2", "2", 2526.46159, 3.76076, 0.24156),
("3a", "3a", 462.61566, 0.12609, 0.0081),
("3b", "3b", 462.61566, 0.12609, 0.0081),
]
self.assertListEqual(qfc, expected)
def test_cubic_force_constants(self):
result = GaussianLogResult(self.logfile)
cfc = result.cubic_force_constants
expected = [
("1", "1", "1", -260.36071, -1.39757, -0.0475),
("2", "2", "1", -498.9444, -4.80163, -0.1632),
("3a", "3a", "1", 239.87769, 0.4227, 0.01437),
("3a", "3b", "1", 74.25095, 0.13084, 0.00445),
("3b", "3b", "1", 12.93985, 0.0228, 0.00078),
]
self.assertListEqual(cfc, expected)
def test_quartic_force_constants(self):
result = GaussianLogResult(self.logfile)
qfc = result.quartic_force_constants
expected = [
("1", "1", "1", "1", 40.39063, 1.40169, 0.02521),
("2", "2", "1", "1", 79.08068, 4.92017, 0.0885),
("2", "2", "2", "2", 154.78015, 17.26491, 0.31053),
("3a", "3a", "1", "1", -67.10879, -0.76453, -0.01375),
("3b", "3b", "1", "1", -67.10879, -0.76453, -0.01375),
("3a", "3a", "2", "2", -163.29426, -3.33524, -0.05999),
("3b", "3b", "2", "2", -163.29426, -3.33524, -0.05999),
("3a", "3a", "3a", "3a", 220.54851, 0.82484, 0.01484),
("3a", "3a", "3a", "3b", 66.77089, 0.24972, 0.00449),
("3a", "3a", "3b", "3b", 117.26759, 0.43857, 0.00789),
("3a", "3b", "3b", "3b", -66.77088, -0.24972, -0.00449),
("3b", "3b", "3b", "3b", 220.54851, 0.82484, 0.01484),
]
self.assertListEqual(qfc, expected)
def test_watson_hamiltonian(self):
result = GaussianLogResult(self.logfile)
watson = result.get_watson_hamiltonian()
expected = [
[352.3005875, 2, 2],
[-352.3005875, -2, -2],
[631.6153975, 1, 1],
[-631.6153975, -1, -1],
[115.653915, 4, 4],
[-115.653915, -4, -4],
[115.653915, 3, 3],
[-115.653915, -3, -3],
[-15.341901966295344, 2, 2, 2],
[-88.2017421687633, 1, 1, 2],
[42.40478531359112, 4, 4, 2],
[26.25167512727164, 4, 3, 2],
[2.2874639206341865, 3, 3, 2],
[0.4207357291666667, 2, 2, 2, 2],
[4.9425425, 1, 1, 2, 2],
[1.6122932291666665, 1, 1, 1, 1],
[-4.194299375, 4, 4, 2, 2],
[-4.194299375, 3, 3, 2, 2],
[-10.20589125, 4, 4, 1, 1],
[-10.20589125, 3, 3, 1, 1],
[2.2973803125, 4, 4, 4, 4],
[2.7821204166666664, 4, 4, 4, 3],
[7.329224375, 4, 4, 3, 3],
[-2.7821200000000004, 4, 3, 3, 3],
[2.2973803125, 3, 3, 3, 3],
]
for i, entry in enumerate(watson.data):
msg = "mode[{}]={} does not match expected {}".format(i, entry, expected[i])
self.assertAlmostEqual(entry[0], expected[i][0], msg=msg)
self.assertListEqual(entry[1:], expected[i][1:], msg=msg)
if __name__ == "__main__":
unittest.main()
| true | true |
f7320db42cd644873d257e35043851fb92abef28 | 62,440 | py | Python | lib/wallet.py | Skirmant/electrum-trump | 9b0d4e6dd3317900f3d81dec8924f07ffa60e8b5 | [
"MIT"
] | null | null | null | lib/wallet.py | Skirmant/electrum-trump | 9b0d4e6dd3317900f3d81dec8924f07ffa60e8b5 | [
"MIT"
] | null | null | null | lib/wallet.py | Skirmant/electrum-trump | 9b0d4e6dd3317900f3d81dec8924f07ffa60e8b5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Wallet classes:
- Imported_Wallet: imported address, no keystore
- Standard_Wallet: one keystore, P2PKH
- Multisig_Wallet: several keystores, P2SH
"""
import os
import hashlib
import ast
import threading
import random
import time
import json
import copy
import re
import stat
from functools import partial
from collections import namedtuple, defaultdict
from i18n import _
from util import NotEnoughFunds, PrintError, UserCancelled, profiler
from bitcoin import *
from version import *
from keystore import load_keystore, Hardware_KeyStore
from storage import multisig_type
from transaction import Transaction
from plugins import run_hook
import bitcoin
import coinchooser
from synchronizer import Synchronizer
from verifier import SPV
from mnemonic import Mnemonic
import paymentrequest
from storage import WalletStorage
TX_STATUS = [
_('Replaceable'),
_('Unconfirmed parent'),
_('Low fee'),
_('Unconfirmed'),
_('Not Verified'),
]
class Abstract_Wallet(PrintError):
"""
Wallet classes are created to handle various address generation methods.
Completion states (watching-only, single account, no seed, etc) are handled inside classes.
"""
max_change_outputs = 3
def __init__(self, storage):
self.electrum_version = ELECTRUM_VERSION
self.storage = storage
self.network = None
# verifier (SPV) and synchronizer are started in start_threads
self.synchronizer = None
self.verifier = None
self.gap_limit_for_change = 6 # constant
# saved fields
self.use_change = storage.get('use_change', True)
self.multiple_change = storage.get('multiple_change', False)
self.labels = storage.get('labels', {})
self.frozen_addresses = set(storage.get('frozen_addresses',[]))
self.stored_height = storage.get('stored_height', 0) # last known height (for offline mode)
self.history = storage.get('addr_history',{}) # address -> list(txid, height)
self.load_keystore()
self.load_addresses()
self.load_transactions()
self.build_reverse_history()
# load requests
self.receive_requests = self.storage.get('payment_requests', {})
# Transactions pending verification. A map from tx hash to transaction
# height. Access is not contended so no lock is needed.
self.unverified_tx = defaultdict(int)
# Verified transactions. Each value is a (height, timestamp, block_pos) tuple. Access with self.lock.
self.verified_tx = storage.get('verified_tx3', {})
# there is a difference between wallet.up_to_date and interface.is_up_to_date()
# interface.is_up_to_date() returns true when all requests have been answered and processed
# wallet.up_to_date is true when the wallet is synchronized (stronger requirement)
self.up_to_date = False
self.lock = threading.Lock()
self.transaction_lock = threading.Lock()
self.check_history()
# save wallet type the first time
if self.storage.get('wallet_type') is None:
self.storage.put('wallet_type', self.wallet_type)
def diagnostic_name(self):
return self.basename()
def __str__(self):
return self.basename()
def get_master_public_key(self):
return None
@profiler
def load_transactions(self):
self.txi = self.storage.get('txi', {})
self.txo = self.storage.get('txo', {})
self.tx_fees = self.storage.get('tx_fees', {})
self.pruned_txo = self.storage.get('pruned_txo', {})
tx_list = self.storage.get('transactions', {})
self.transactions = {}
for tx_hash, raw in tx_list.items():
tx = Transaction(raw)
self.transactions[tx_hash] = tx
if self.txi.get(tx_hash) is None and self.txo.get(tx_hash) is None and (tx_hash not in self.pruned_txo.values()):
self.print_error("removing unreferenced tx", tx_hash)
self.transactions.pop(tx_hash)
@profiler
def save_transactions(self, write=False):
with self.transaction_lock:
tx = {}
for k,v in self.transactions.items():
tx[k] = str(v)
self.storage.put('transactions', tx)
self.storage.put('txi', self.txi)
self.storage.put('txo', self.txo)
self.storage.put('tx_fees', self.tx_fees)
self.storage.put('pruned_txo', self.pruned_txo)
self.storage.put('addr_history', self.history)
if write:
self.storage.write()
def clear_history(self):
with self.transaction_lock:
self.txi = {}
self.txo = {}
self.tx_fees = {}
self.pruned_txo = {}
self.save_transactions()
with self.lock:
self.history = {}
self.tx_addr_hist = {}
@profiler
def build_reverse_history(self):
self.tx_addr_hist = {}
for addr, hist in self.history.items():
for tx_hash, h in hist:
s = self.tx_addr_hist.get(tx_hash, set())
s.add(addr)
self.tx_addr_hist[tx_hash] = s
@profiler
def check_history(self):
save = False
for addr, hist in self.history.items():
if not self.is_mine(addr):
self.history.pop(addr)
save = True
continue
for tx_hash, tx_height in hist:
if tx_hash in self.pruned_txo.values() or self.txi.get(tx_hash) or self.txo.get(tx_hash):
continue
tx = self.transactions.get(tx_hash)
if tx is not None:
self.add_transaction(tx_hash, tx)
save = True
if save:
self.save_transactions()
def basename(self):
return os.path.basename(self.storage.path)
def save_pubkeys(self):
self.storage.put('pubkeys', {'receiving':self.receiving_pubkeys, 'change':self.change_pubkeys})
def load_addresses(self):
d = self.storage.get('pubkeys', {})
self.receiving_pubkeys = d.get('receiving', [])
self.change_pubkeys = d.get('change', [])
self.receiving_addresses = map(self.pubkeys_to_address, self.receiving_pubkeys)
self.change_addresses = map(self.pubkeys_to_address, self.change_pubkeys)
def synchronize(self):
pass
def set_up_to_date(self, up_to_date):
with self.lock:
self.up_to_date = up_to_date
if up_to_date:
self.save_transactions(write=True)
def is_up_to_date(self):
with self.lock: return self.up_to_date
def set_label(self, name, text = None):
changed = False
old_text = self.labels.get(name)
if text:
if old_text != text:
self.labels[name] = text
changed = True
else:
if old_text:
self.labels.pop(name)
changed = True
if changed:
run_hook('set_label', self, name, text)
self.storage.put('labels', self.labels)
return changed
def is_mine(self, address):
return address in self.get_addresses()
def is_change(self, address):
if not self.is_mine(address):
return False
return address in self.change_addresses
def get_address_index(self, address):
if self.keystore.can_import():
i = self.receiving_addresses.index(address)
return self.receiving_pubkeys[i]
elif address in self.receiving_addresses:
return False, self.receiving_addresses.index(address)
if address in self.change_addresses:
return True, self.change_addresses.index(address)
raise Exception("Address not found", address)
def get_pubkey_index(self, pubkey):
if self.keystore.can_import():
assert pubkey in self.receiving_pubkeys
return pubkey
elif pubkey in self.receiving_pubkeys:
return False, self.receiving_pubkeys.index(pubkey)
if pubkey in self.change_pubkeys:
return True, self.change_pubkeys.index(pubkey)
raise Exception("Pubkey not found", pubkey)
def get_private_key(self, address, password):
if self.is_watching_only():
return []
index = self.get_address_index(address)
pk = self.keystore.get_private_key(index, password)
return [pk]
def get_public_key(self, address):
if self.keystore.can_import():
i = self.receiving_addresses.index(address)
pubkey = self.receiving_pubkeys[i]
else:
sequence = self.get_address_index(address)
pubkey = self.get_pubkey(*sequence)
return pubkey
def get_public_keys(self, address):
sequence = self.get_address_index(address)
return self.get_pubkeys(*sequence)
def add_unverified_tx(self, tx_hash, tx_height):
# tx will be verified only if height > 0
if tx_hash not in self.verified_tx:
self.unverified_tx[tx_hash] = tx_height
def add_verified_tx(self, tx_hash, info):
# Remove from the unverified map and add to the verified map and
self.unverified_tx.pop(tx_hash, None)
with self.lock:
self.verified_tx[tx_hash] = info # (tx_height, timestamp, pos)
height, conf, timestamp = self.get_tx_height(tx_hash)
self.network.trigger_callback('verified', tx_hash, height, conf, timestamp)
def get_unverified_txs(self):
'''Returns a map from tx hash to transaction height'''
return self.unverified_tx
def undo_verifications(self, height):
'''Used by the verifier when a reorg has happened'''
txs = []
with self.lock:
for tx_hash, item in self.verified_tx:
tx_height, timestamp, pos = item
if tx_height >= height:
self.verified_tx.pop(tx_hash, None)
txs.append(tx_hash)
return txs
def get_local_height(self):
""" return last known height if we are offline """
return self.network.get_local_height() if self.network else self.stored_height
def get_tx_height(self, tx_hash):
""" return the height and timestamp of a verified transaction. """
with self.lock:
if tx_hash in self.verified_tx:
height, timestamp, pos = self.verified_tx[tx_hash]
conf = max(self.get_local_height() - height + 1, 0)
return height, conf, timestamp
else:
height = self.unverified_tx[tx_hash]
return height, 0, False
def get_txpos(self, tx_hash):
"return position, even if the tx is unverified"
with self.lock:
x = self.verified_tx.get(tx_hash)
y = self.unverified_tx.get(tx_hash)
if x:
height, timestamp, pos = x
return height, pos
elif y > 0:
return y, 0
else:
return 1e12, 0
def is_found(self):
return self.history.values() != [[]] * len(self.history)
def get_num_tx(self, address):
""" return number of transactions where address is involved """
return len(self.history.get(address, []))
def get_tx_delta(self, tx_hash, address):
"effect of tx on address"
# pruned
if tx_hash in self.pruned_txo.values():
return None
delta = 0
# substract the value of coins sent from address
d = self.txi.get(tx_hash, {}).get(address, [])
for n, v in d:
delta -= v
# add the value of the coins received at address
d = self.txo.get(tx_hash, {}).get(address, [])
for n, v, cb in d:
delta += v
return delta
def get_wallet_delta(self, tx):
""" effect of tx on wallet """
addresses = self.get_addresses()
is_relevant = False
is_mine = False
is_pruned = False
is_partial = False
v_in = v_out = v_out_mine = 0
for item in tx.inputs():
addr = item.get('address')
if addr in addresses:
is_mine = True
is_relevant = True
d = self.txo.get(item['prevout_hash'], {}).get(addr, [])
for n, v, cb in d:
if n == item['prevout_n']:
value = v
break
else:
value = None
if value is None:
is_pruned = True
else:
v_in += value
else:
is_partial = True
if not is_mine:
is_partial = False
for addr, value in tx.get_outputs():
v_out += value
if addr in addresses:
v_out_mine += value
is_relevant = True
if is_pruned:
# some inputs are mine:
fee = None
if is_mine:
v = v_out_mine - v_out
else:
# no input is mine
v = v_out_mine
else:
v = v_out_mine - v_in
if is_partial:
# some inputs are mine, but not all
fee = None
else:
# all inputs are mine
fee = v_in - v_out
if not is_mine:
fee = None
return is_relevant, is_mine, v, fee
def get_tx_info(self, tx):
is_relevant, is_mine, v, fee = self.get_wallet_delta(tx)
exp_n = None
can_broadcast = False
can_bump = False
label = ''
height = conf = timestamp = None
if tx.is_complete():
tx_hash = tx.hash()
if tx_hash in self.transactions.keys():
label = self.get_label(tx_hash)
height, conf, timestamp = self.get_tx_height(tx_hash)
if height > 0:
if conf:
status = _("%d confirmations") % conf
else:
status = _('Not verified')
else:
status = _('Unconfirmed')
if fee is None:
fee = self.tx_fees.get(tx_hash)
if fee:
size = tx.estimated_size()
fee_per_kb = fee * 1000 / size
exp_n = self.network.config.reverse_dynfee(fee_per_kb)
can_bump = is_mine and not tx.is_final()
else:
status = _("Signed")
can_broadcast = self.network is not None
else:
s, r = tx.signature_count()
status = _("Unsigned") if s == 0 else _('Partially signed') + ' (%d/%d)'%(s,r)
tx_hash = None
if is_relevant:
if is_mine:
if fee is not None:
amount = v + fee
else:
amount = v
else:
amount = v
else:
amount = None
return tx_hash, status, label, can_broadcast, can_bump, amount, fee, height, conf, timestamp, exp_n
def get_addr_io(self, address):
h = self.history.get(address, [])
received = {}
sent = {}
for tx_hash, height in h:
l = self.txo.get(tx_hash, {}).get(address, [])
for n, v, is_cb in l:
received[tx_hash + ':%d'%n] = (height, v, is_cb)
for tx_hash, height in h:
l = self.txi.get(tx_hash, {}).get(address, [])
for txi, v in l:
sent[txi] = height
return received, sent
def get_addr_utxo(self, address):
coins, spent = self.get_addr_io(address)
for txi in spent:
coins.pop(txi)
out = []
for txo, v in coins.items():
tx_height, value, is_cb = v
prevout_hash, prevout_n = txo.split(':')
x = {
'address':address,
'value':value,
'prevout_n':int(prevout_n),
'prevout_hash':prevout_hash,
'height':tx_height,
'coinbase':is_cb
}
out.append(x)
return out
# return the total amount ever received by an address
def get_addr_received(self, address):
received, sent = self.get_addr_io(address)
return sum([v for height, v, is_cb in received.values()])
# return the balance of a bitcoin address: confirmed and matured, unconfirmed, unmatured
def get_addr_balance(self, address):
received, sent = self.get_addr_io(address)
c = u = x = 0
for txo, (tx_height, v, is_cb) in received.items():
if is_cb and tx_height + COINBASE_MATURITY > self.get_local_height():
x += v
elif tx_height > 0:
c += v
else:
u += v
if txo in sent:
if sent[txo] > 0:
c -= v
else:
u -= v
return c, u, x
def get_spendable_coins(self, domain = None):
return self.get_utxos(domain, exclude_frozen=True, mature=True)
def get_utxos(self, domain = None, exclude_frozen = False, mature = False):
coins = []
if domain is None:
domain = self.get_addresses()
if exclude_frozen:
domain = set(domain) - self.frozen_addresses
for addr in domain:
utxos = self.get_addr_utxo(addr)
for x in utxos:
if mature and x['coinbase'] and x['height'] + COINBASE_MATURITY > self.get_local_height():
continue
coins.append(x)
continue
return coins
def dummy_address(self):
return self.get_receiving_addresses()[0]
def get_addresses(self):
out = []
out += self.get_receiving_addresses()
out += self.get_change_addresses()
return out
def get_frozen_balance(self):
return self.get_balance(self.frozen_addresses)
def get_balance(self, domain=None):
if domain is None:
domain = self.get_addresses()
cc = uu = xx = 0
for addr in domain:
c, u, x = self.get_addr_balance(addr)
cc += c
uu += u
xx += x
return cc, uu, xx
def get_address_history(self, address):
with self.lock:
return self.history.get(address, [])
def find_pay_to_pubkey_address(self, prevout_hash, prevout_n):
dd = self.txo.get(prevout_hash, {})
for addr, l in dd.items():
for n, v, is_cb in l:
if n == prevout_n:
self.print_error("found pay-to-pubkey address:", addr)
return addr
def add_transaction(self, tx_hash, tx):
is_coinbase = tx.inputs()[0].get('is_coinbase') == True
with self.transaction_lock:
# add inputs
self.txi[tx_hash] = d = {}
for txi in tx.inputs():
addr = txi.get('address')
if not txi.get('is_coinbase'):
prevout_hash = txi['prevout_hash']
prevout_n = txi['prevout_n']
ser = prevout_hash + ':%d'%prevout_n
if addr == "(pubkey)":
addr = self.find_pay_to_pubkey_address(prevout_hash, prevout_n)
# find value from prev output
if addr and self.is_mine(addr):
dd = self.txo.get(prevout_hash, {})
for n, v, is_cb in dd.get(addr, []):
if n == prevout_n:
if d.get(addr) is None:
d[addr] = []
d[addr].append((ser, v))
break
else:
self.pruned_txo[ser] = tx_hash
# add outputs
self.txo[tx_hash] = d = {}
for n, txo in enumerate(tx.outputs()):
ser = tx_hash + ':%d'%n
_type, x, v = txo
if _type == TYPE_ADDRESS:
addr = x
elif _type == TYPE_PUBKEY:
addr = public_key_to_bc_address(x.decode('hex'))
else:
addr = None
if addr and self.is_mine(addr):
if d.get(addr) is None:
d[addr] = []
d[addr].append((n, v, is_coinbase))
# give v to txi that spends me
next_tx = self.pruned_txo.get(ser)
if next_tx is not None:
self.pruned_txo.pop(ser)
dd = self.txi.get(next_tx, {})
if dd.get(addr) is None:
dd[addr] = []
dd[addr].append((ser, v))
# save
self.transactions[tx_hash] = tx
def remove_transaction(self, tx_hash):
with self.transaction_lock:
self.print_error("removing tx from history", tx_hash)
#tx = self.transactions.pop(tx_hash)
for ser, hh in self.pruned_txo.items():
if hh == tx_hash:
self.pruned_txo.pop(ser)
# add tx to pruned_txo, and undo the txi addition
for next_tx, dd in self.txi.items():
for addr, l in dd.items():
ll = l[:]
for item in ll:
ser, v = item
prev_hash, prev_n = ser.split(':')
if prev_hash == tx_hash:
l.remove(item)
self.pruned_txo[ser] = next_tx
if l == []:
dd.pop(addr)
else:
dd[addr] = l
try:
self.txi.pop(tx_hash)
self.txo.pop(tx_hash)
except KeyError:
self.print_error("tx was not in history", tx_hash)
def receive_tx_callback(self, tx_hash, tx, tx_height):
self.add_transaction(tx_hash, tx)
self.add_unverified_tx(tx_hash, tx_height)
def receive_history_callback(self, addr, hist, tx_fees):
with self.lock:
old_hist = self.history.get(addr, [])
for tx_hash, height in old_hist:
if (tx_hash, height) not in hist:
# remove tx if it's not referenced in histories
self.tx_addr_hist[tx_hash].remove(addr)
if not self.tx_addr_hist[tx_hash]:
self.remove_transaction(tx_hash)
self.history[addr] = hist
for tx_hash, tx_height in hist:
# add it in case it was previously unconfirmed
self.add_unverified_tx(tx_hash, tx_height)
# add reference in tx_addr_hist
s = self.tx_addr_hist.get(tx_hash, set())
s.add(addr)
self.tx_addr_hist[tx_hash] = s
# if addr is new, we have to recompute txi and txo
tx = self.transactions.get(tx_hash)
if tx is not None and self.txi.get(tx_hash, {}).get(addr) is None and self.txo.get(tx_hash, {}).get(addr) is None:
self.add_transaction(tx_hash, tx)
# Store fees
self.tx_fees.update(tx_fees)
def get_history(self, domain=None):
# get domain
if domain is None:
domain = self.get_addresses()
# 1. Get the history of each address in the domain, maintain the
# delta of a tx as the sum of its deltas on domain addresses
tx_deltas = defaultdict(int)
for addr in domain:
h = self.get_address_history(addr)
for tx_hash, height in h:
delta = self.get_tx_delta(tx_hash, addr)
if delta is None or tx_deltas[tx_hash] is None:
tx_deltas[tx_hash] = None
else:
tx_deltas[tx_hash] += delta
# 2. create sorted history
history = []
for tx_hash in tx_deltas:
delta = tx_deltas[tx_hash]
height, conf, timestamp = self.get_tx_height(tx_hash)
history.append((tx_hash, height, conf, timestamp, delta))
history.sort(key = lambda x: self.get_txpos(x[0]))
history.reverse()
# 3. add balance
c, u, x = self.get_balance(domain)
balance = c + u + x
h2 = []
for tx_hash, height, conf, timestamp, delta in history:
h2.append((tx_hash, height, conf, timestamp, delta, balance))
if balance is None or delta is None:
balance = None
else:
balance -= delta
h2.reverse()
# fixme: this may happen if history is incomplete
if balance not in [None, 0]:
self.print_error("Error: history not synchronized")
return []
return h2
def get_label(self, tx_hash):
label = self.labels.get(tx_hash, '')
if label is '':
label = self.get_default_label(tx_hash)
return label
def get_default_label(self, tx_hash):
if self.txi.get(tx_hash) == {}:
d = self.txo.get(tx_hash, {})
labels = []
for addr in d.keys():
label = self.labels.get(addr)
if label:
labels.append(label)
return ', '.join(labels)
return ''
def get_tx_status(self, tx_hash, height, conf, timestamp):
from util import format_time
if conf == 0:
tx = self.transactions.get(tx_hash)
if not tx:
return 3, 'unknown'
is_final = tx and tx.is_final()
fee = self.tx_fees.get(tx_hash)
if fee and self.network and self.network.config.has_fee_estimates():
size = len(tx.raw)/2
low_fee = int(self.network.config.dynfee(0)*size/1000)
is_lowfee = fee < low_fee * 0.5
else:
is_lowfee = False
if height==0 and not is_final:
status = 0
elif height < 0:
status = 1
elif height == 0 and is_lowfee:
status = 2
elif height == 0:
status = 3
else:
status = 4
else:
status = 4 + min(conf, 6)
time_str = format_time(timestamp) if timestamp else _("unknown")
status_str = TX_STATUS[status] if status < 5 else time_str
return status, status_str
def relayfee(self):
RELAY_FEE = 5000
MAX_RELAY_FEE = 50000
f = self.network.relay_fee if self.network and self.network.relay_fee else RELAY_FEE
return min(f, MAX_RELAY_FEE)
def dust_threshold(self):
# Change <= dust threshold is added to the tx fee
return 182 * 3 * self.relayfee() / 1000
def get_tx_fee(self, tx):
# this method can be overloaded
return tx.get_fee()
def make_unsigned_transaction(self, inputs, outputs, config, fixed_fee=None, change_addr=None):
# check outputs
i_max = None
for i, o in enumerate(outputs):
_type, data, value = o
if _type == TYPE_ADDRESS:
if not is_address(data):
raise BaseException("Invalid bitcoin address:" + data)
if value == '!':
if i_max is not None:
raise BaseException("More than one output set to spend max")
i_max = i
# Avoid index-out-of-range with inputs[0] below
if not inputs:
raise NotEnoughFunds()
for item in inputs:
self.add_input_info(item)
# change address
if change_addr:
change_addrs = [change_addr]
else:
addrs = self.get_change_addresses()[-self.gap_limit_for_change:]
if self.use_change and addrs:
# New change addresses are created only after a few
# confirmations. Select the unused addresses within the
# gap limit; if none take one at random
change_addrs = [addr for addr in addrs if
self.get_num_tx(addr) == 0]
if not change_addrs:
change_addrs = [random.choice(addrs)]
else:
change_addrs = [inputs[0]['address']]
# Fee estimator
if fixed_fee is None:
fee_estimator = partial(self.estimate_fee, config)
else:
fee_estimator = lambda size: fixed_fee
if i_max is None:
# Let the coin chooser select the coins to spend
max_change = self.max_change_outputs if self.multiple_change else 1
coin_chooser = coinchooser.get_coin_chooser(config)
tx = coin_chooser.make_tx(inputs, outputs, change_addrs[:max_change],
fee_estimator, self.dust_threshold())
else:
sendable = sum(map(lambda x:x['value'], inputs))
_type, data, value = outputs[i_max]
outputs[i_max] = (_type, data, 0)
tx = Transaction.from_io(inputs, outputs[:])
fee = fee_estimator(tx.estimated_size())
amount = max(0, sendable - tx.output_value() - fee)
outputs[i_max] = (_type, data, amount)
tx = Transaction.from_io(inputs, outputs[:])
# Sort the inputs and outputs deterministically
tx.BIP_LI01_sort()
tx.postime = int(time.time())
run_hook('make_unsigned_transaction', self, tx)
return tx
def estimate_fee(self, config, size):
fee = int(config.fee_per_kb() * size / 1000.)
return fee
def mktx(self, outputs, password, config, fee=None, change_addr=None, domain=None):
coins = self.get_spendable_coins(domain)
tx = self.make_unsigned_transaction(coins, outputs, config, fee, change_addr)
self.sign_transaction(tx, password)
return tx
def sweep(self, privkeys, network, config, recipient, fee=None, imax=100):
inputs = []
keypairs = {}
for privkey in privkeys:
pubkey = public_key_from_private_key(privkey)
address = address_from_private_key(privkey)
u = network.synchronous_get(('blockchain.address.listunspent', [address]))
pay_script = Transaction.pay_script(TYPE_ADDRESS, address)
for item in u:
if len(inputs) >= imax:
break
item['scriptPubKey'] = pay_script
item['redeemPubkey'] = pubkey
item['address'] = address
item['prevout_hash'] = item['tx_hash']
item['prevout_n'] = item['tx_pos']
item['pubkeys'] = [pubkey]
item['x_pubkeys'] = [pubkey]
item['signatures'] = [None]
item['num_sig'] = 1
inputs.append(item)
keypairs[pubkey] = privkey
if not inputs:
return
total = sum(i.get('value') for i in inputs)
if fee is None:
outputs = [(TYPE_ADDRESS, recipient, total)]
tx = Transaction.from_io(inputs, outputs)
fee = self.estimate_fee(config, tx.estimated_size())
outputs = [(TYPE_ADDRESS, recipient, total - fee)]
tx = Transaction.from_io(inputs, outputs)
tx.sign(keypairs)
return tx
def is_frozen(self, addr):
return addr in self.frozen_addresses
def set_frozen_state(self, addrs, freeze):
'''Set frozen state of the addresses to FREEZE, True or False'''
if all(self.is_mine(addr) for addr in addrs):
if freeze:
self.frozen_addresses |= set(addrs)
else:
self.frozen_addresses -= set(addrs)
self.storage.put('frozen_addresses', list(self.frozen_addresses))
return True
return False
def prepare_for_verifier(self):
# review transactions that are in the history
for addr, hist in self.history.items():
for tx_hash, tx_height in hist:
# add it in case it was previously unconfirmed
self.add_unverified_tx(tx_hash, tx_height)
# if we are on a pruning server, remove unverified transactions
with self.lock:
vr = self.verified_tx.keys() + self.unverified_tx.keys()
for tx_hash in self.transactions.keys():
if tx_hash not in vr:
self.print_error("removing transaction", tx_hash)
self.transactions.pop(tx_hash)
def start_threads(self, network):
self.network = network
if self.network is not None:
self.prepare_for_verifier()
self.verifier = SPV(self.network, self)
self.synchronizer = Synchronizer(self, network)
network.add_jobs([self.verifier, self.synchronizer])
else:
self.verifier = None
self.synchronizer = None
def stop_threads(self):
if self.network:
self.network.remove_jobs([self.synchronizer, self.verifier])
self.synchronizer.release()
self.synchronizer = None
self.verifier = None
# Now no references to the syncronizer or verifier
# remain so they will be GC-ed
self.storage.put('stored_height', self.get_local_height())
self.save_transactions()
self.storage.put('verified_tx3', self.verified_tx)
self.storage.write()
def wait_until_synchronized(self, callback=None):
def wait_for_wallet():
self.set_up_to_date(False)
while not self.is_up_to_date():
if callback:
msg = "%s\n%s %d"%(
_("Please wait..."),
_("Addresses generated:"),
len(self.addresses(True)))
callback(msg)
time.sleep(0.1)
def wait_for_network():
while not self.network.is_connected():
if callback:
msg = "%s \n" % (_("Connecting..."))
callback(msg)
time.sleep(0.1)
# wait until we are connected, because the user
# might have selected another server
if self.network:
wait_for_network()
wait_for_wallet()
else:
self.synchronize()
def can_export(self):
return not self.is_watching_only()
def is_used(self, address):
h = self.history.get(address,[])
c, u, x = self.get_addr_balance(address)
return len(h) > 0 and c + u + x == 0
def is_empty(self, address):
c, u, x = self.get_addr_balance(address)
return c+u+x == 0
def address_is_old(self, address, age_limit=2):
age = -1
h = self.history.get(address, [])
for tx_hash, tx_height in h:
if tx_height == 0:
tx_age = 0
else:
tx_age = self.get_local_height() - tx_height + 1
if tx_age > age:
age = tx_age
return age > age_limit
def bump_fee(self, tx, delta):
if tx.is_final():
raise BaseException(_("Cannot bump fee: transaction is final"))
inputs = copy.deepcopy(tx.inputs())
outputs = copy.deepcopy(tx.outputs())
for txin in inputs:
txin['signatures'] = [None] * len(txin['signatures'])
self.add_input_info(txin)
# use own outputs
s = filter(lambda x: self.is_mine(x[1]), outputs)
# ... unless there is none
if not s:
s = outputs
# prioritize low value outputs, to get rid of dust
s = sorted(s, key=lambda x: x[2])
for o in s:
i = outputs.index(o)
otype, address, value = o
if value - delta >= self.dust_threshold():
outputs[i] = otype, address, value - delta
delta = 0
break
else:
del outputs[i]
delta -= value
if delta > 0:
continue
if delta > 0:
raise BaseException(_('Cannot bump fee: cound not find suitable outputs'))
return Transaction.from_io(inputs, outputs)
def add_input_info(self, txin):
# Add address for utxo that are in wallet
if txin.get('scriptSig') == '':
coins = self.get_spendable_coins()
for item in coins:
if txin.get('prevout_hash') == item.get('prevout_hash') and txin.get('prevout_n') == item.get('prevout_n'):
txin['address'] = item.get('address')
address = txin['address']
if self.is_mine(address):
self.add_input_sig_info(txin, address)
def can_sign(self, tx):
if tx.is_complete():
return False
for k in self.get_keystores():
if k.can_sign(tx):
return True
return False
def get_input_tx(self, tx_hash):
# First look up an input transaction in the wallet where it
# will likely be. If co-signing a transaction it may not have
# all the input txs, in which case we ask the network.
tx = self.transactions.get(tx_hash)
if not tx and self.network:
request = ('blockchain.transaction.get', [tx_hash])
tx = Transaction(self.network.synchronous_get(request))
return tx
def add_hw_info(self, tx):
# add previous tx for hw wallets
for txin in tx.inputs():
tx_hash = txin['prevout_hash']
txin['prev_tx'] = self.get_input_tx(tx_hash)
# add output info for hw wallets
info = {}
xpubs = self.get_master_public_keys()
for txout in tx.outputs():
_type, addr, amount = txout
if self.is_change(addr):
index = self.get_address_index(addr)
pubkeys = self.get_public_keys(addr)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
info[addr] = index, sorted_xpubs, self.m if isinstance(self, Multisig_Wallet) else None
tx.output_info = info
def sign_transaction(self, tx, password):
if self.is_watching_only():
return
# hardware wallets require extra info
if any([(isinstance(k, Hardware_KeyStore) and k.can_sign(tx)) for k in self.get_keystores()]):
self.add_hw_info(tx)
# sign
for k in self.get_keystores():
try:
if k.can_sign(tx):
k.sign_transaction(tx, password)
except UserCancelled:
continue
def get_unused_addresses(self):
# fixme: use slots from expired requests
domain = self.get_receiving_addresses()
return [addr for addr in domain if not self.history.get(addr)
and addr not in self.receive_requests.keys()]
def get_unused_address(self):
addrs = self.get_unused_addresses()
if addrs:
return addrs[0]
def get_receiving_address(self):
# always return an address
domain = self.get_receiving_addresses()
choice = domain[0]
for addr in domain:
if not self.history.get(addr):
if addr not in self.receive_requests.keys():
return addr
else:
choice = addr
return choice
def get_payment_status(self, address, amount):
local_height = self.get_local_height()
received, sent = self.get_addr_io(address)
l = []
for txo, x in received.items():
h, v, is_cb = x
txid, n = txo.split(':')
info = self.verified_tx.get(txid)
if info:
tx_height, timestamp, pos = info
conf = local_height - tx_height
else:
conf = 0
l.append((conf, v))
vsum = 0
for conf, v in reversed(sorted(l)):
vsum += v
if vsum >= amount:
return True, conf
return False, None
def get_payment_request(self, addr, config):
import util
r = self.receive_requests.get(addr)
if not r:
return
out = copy.copy(r)
out['URI'] = 'trumpcoin:' + addr + '?amount=' + util.format_satoshis(out.get('amount'))
status, conf = self.get_request_status(addr)
out['status'] = status
if conf is not None:
out['confirmations'] = conf
# check if bip70 file exists
rdir = config.get('requests_dir')
if rdir:
key = out.get('id', addr)
path = os.path.join(rdir, 'req', key[0], key[1], key)
if os.path.exists(path):
baseurl = 'file://' + rdir
rewrite = config.get('url_rewrite')
if rewrite:
baseurl = baseurl.replace(*rewrite)
out['request_url'] = os.path.join(baseurl, 'req', key[0], key[1], key, key)
out['URI'] += '&r=' + out['request_url']
out['index_url'] = os.path.join(baseurl, 'index.html') + '?id=' + key
websocket_server_announce = config.get('websocket_server_announce')
if websocket_server_announce:
out['websocket_server'] = websocket_server_announce
else:
out['websocket_server'] = config.get('websocket_server', 'localhost')
websocket_port_announce = config.get('websocket_port_announce')
if websocket_port_announce:
out['websocket_port'] = websocket_port_announce
else:
out['websocket_port'] = config.get('websocket_port', 9999)
return out
def get_request_status(self, key):
from paymentrequest import PR_PAID, PR_UNPAID, PR_UNKNOWN, PR_EXPIRED
r = self.receive_requests.get(key)
if r is None:
return PR_UNKNOWN
address = r['address']
amount = r.get('amount')
timestamp = r.get('time', 0)
if timestamp and type(timestamp) != int:
timestamp = 0
expiration = r.get('exp')
if expiration and type(expiration) != int:
expiration = 0
conf = None
if amount:
if self.up_to_date:
paid, conf = self.get_payment_status(address, amount)
status = PR_PAID if paid else PR_UNPAID
if status == PR_UNPAID and expiration is not None and time.time() > timestamp + expiration:
status = PR_EXPIRED
else:
status = PR_UNKNOWN
else:
status = PR_UNKNOWN
return status, conf
def make_payment_request(self, addr, amount, message, expiration):
timestamp = int(time.time())
_id = Hash(addr + "%d"%timestamp).encode('hex')[0:10]
r = {'time':timestamp, 'amount':amount, 'exp':expiration, 'address':addr, 'memo':message, 'id':_id}
return r
def sign_payment_request(self, key, alias, alias_addr, password):
req = self.receive_requests.get(key)
alias_privkey = self.get_private_key(alias_addr, password)[0]
pr = paymentrequest.make_unsigned_request(req)
paymentrequest.sign_request_with_alias(pr, alias, alias_privkey)
req['name'] = pr.pki_data
req['sig'] = pr.signature.encode('hex')
self.receive_requests[key] = req
self.storage.put('payment_requests', self.receive_requests)
def add_payment_request(self, req, config):
import os
addr = req['address']
amount = req.get('amount')
message = req.get('memo')
self.receive_requests[addr] = req
self.storage.put('payment_requests', self.receive_requests)
self.set_label(addr, message) # should be a default label
rdir = config.get('requests_dir')
if rdir and amount is not None:
key = req.get('id', addr)
pr = paymentrequest.make_request(config, req)
path = os.path.join(rdir, 'req', key[0], key[1], key)
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
with open(os.path.join(path, key), 'w') as f:
f.write(pr.SerializeToString())
# reload
req = self.get_payment_request(addr, config)
with open(os.path.join(path, key + '.json'), 'w') as f:
f.write(json.dumps(req))
return req
def remove_payment_request(self, addr, config):
if addr not in self.receive_requests:
return False
r = self.receive_requests.pop(addr)
rdir = config.get('requests_dir')
if rdir:
key = r.get('id', addr)
for s in ['.json', '']:
n = os.path.join(rdir, 'req', key[0], key[1], key, key + s)
if os.path.exists(n):
os.unlink(n)
self.storage.put('payment_requests', self.receive_requests)
return True
def get_sorted_requests(self, config):
def f(x):
try:
addr = x.get('address')
return self.get_address_index(addr)
except:
return -1, (0, 0)
return sorted(map(lambda x: self.get_payment_request(x, config), self.receive_requests.keys()), key=f)
def get_fingerprint(self):
raise NotImplementedError()
def can_import_privkey(self):
return False
def can_import_address(self):
return False
def can_delete_address(self):
return False
def add_address(self, address):
if address not in self.history:
self.history[address] = []
if self.synchronizer:
self.synchronizer.add(address)
def has_password(self):
return self.storage.get('use_encryption', False)
class Imported_Wallet(Abstract_Wallet):
# wallet made of imported addresses
wallet_type = 'imported'
def __init__(self, storage):
Abstract_Wallet.__init__(self, storage)
def load_keystore(self):
pass
def load_addresses(self):
self.addresses = self.storage.get('addresses', [])
self.receiving_addresses = self.addresses
self.change_addresses = []
def get_keystores(self):
return []
def has_password(self):
return False
def can_change_password(self):
return False
def can_import_address(self):
return True
def is_watching_only(self):
return True
def has_seed(self):
return False
def is_deterministic(self):
return False
def is_used(self, address):
return False
def get_master_public_keys(self):
return []
def is_beyond_limit(self, address, is_change):
return False
def get_fingerprint(self):
return ''
def get_addresses(self, include_change=False):
return self.addresses
def import_address(self, address):
if address in self.addresses:
return
self.addresses.append(address)
self.storage.put('addresses', self.addresses)
self.storage.write()
self.add_address(address)
return address
def can_delete_address(self):
return True
def delete_address(self, address):
if address not in self.addresses:
return
self.addresses.remove(address)
self.storage.put('addresses', self.addresses)
self.storage.write()
def get_receiving_addresses(self):
return self.addresses[:]
def get_change_addresses(self):
return []
def add_input_sig_info(self, txin, address):
addrtype, hash160 = bc_address_to_hash_160(address)
xpubkey = 'fd' + (chr(addrtype) + hash160).encode('hex')
txin['x_pubkeys'] = [ xpubkey ]
txin['pubkeys'] = [ xpubkey ]
txin['signatures'] = [None]
class P2PKH_Wallet(Abstract_Wallet):
def pubkeys_to_address(self, pubkey):
return public_key_to_bc_address(pubkey.decode('hex'))
def load_keystore(self):
self.keystore = load_keystore(self.storage, 'keystore')
def get_pubkey(self, c, i):
pubkey_list = self.change_pubkeys if c else self.receiving_pubkeys
return pubkey_list[i]
def get_public_keys(self, address):
return [self.get_public_key(address)]
def add_input_sig_info(self, txin, address):
if not self.keystore.can_import():
txin['derivation'] = derivation = self.get_address_index(address)
x_pubkey = self.keystore.get_xpubkey(*derivation)
pubkey = self.get_pubkey(*derivation)
else:
pubkey = self.get_public_key(address)
assert pubkey is not None
x_pubkey = pubkey
txin['x_pubkeys'] = [x_pubkey]
txin['pubkeys'] = [pubkey]
txin['signatures'] = [None]
txin['redeemPubkey'] = pubkey
txin['num_sig'] = 1
def sign_message(self, address, message, password):
index = self.get_address_index(address)
return self.keystore.sign_message(index, message, password)
def decrypt_message(self, pubkey, message, password):
index = self.get_pubkey_index(pubkey)
return self.keystore.decrypt_message(index, message, password)
class Deterministic_Wallet(Abstract_Wallet):
def __init__(self, storage):
Abstract_Wallet.__init__(self, storage)
self.gap_limit = storage.get('gap_limit', 20)
def has_seed(self):
return self.keystore.has_seed()
def is_deterministic(self):
return self.keystore.is_deterministic()
def get_receiving_addresses(self):
return self.receiving_addresses
def get_change_addresses(self):
return self.change_addresses
def get_seed(self, password):
return self.keystore.get_seed(password)
def add_seed(self, seed, pw):
self.keystore.add_seed(seed, pw)
def change_gap_limit(self, value):
'''This method is not called in the code, it is kept for console use'''
if value >= self.gap_limit:
self.gap_limit = value
self.storage.put('gap_limit', self.gap_limit)
return True
elif value >= self.min_acceptable_gap():
addresses = self.get_receiving_addresses()
k = self.num_unused_trailing_addresses(addresses)
n = len(addresses) - k + value
self.receiving_pubkeys = self.receiving_pubkeys[0:n]
self.receiving_addresses = self.receiving_addresses[0:n]
self.gap_limit = value
self.storage.put('gap_limit', self.gap_limit)
self.save_pubkeys()
return True
else:
return False
def num_unused_trailing_addresses(self, addresses):
k = 0
for a in addresses[::-1]:
if self.history.get(a):break
k = k + 1
return k
def min_acceptable_gap(self):
# fixme: this assumes wallet is synchronized
n = 0
nmax = 0
addresses = self.account.get_receiving_addresses()
k = self.num_unused_trailing_addresses(addresses)
for a in addresses[0:-k]:
if self.history.get(a):
n = 0
else:
n += 1
if n > nmax: nmax = n
return nmax + 1
def create_new_address(self, for_change):
pubkey_list = self.change_pubkeys if for_change else self.receiving_pubkeys
n = len(pubkey_list)
x = self.new_pubkeys(for_change, n)
pubkey_list.append(x)
self.save_pubkeys()
address = self.pubkeys_to_address(x)
addr_list = self.change_addresses if for_change else self.receiving_addresses
addr_list.append(address)
self.add_address(address)
return address
def synchronize_sequence(self, for_change):
limit = self.gap_limit_for_change if for_change else self.gap_limit
while True:
addresses = self.get_change_addresses() if for_change else self.get_receiving_addresses()
if len(addresses) < limit:
self.create_new_address(for_change)
continue
if map(lambda a: self.address_is_old(a), addresses[-limit:] ) == limit*[False]:
break
else:
self.create_new_address(for_change)
def synchronize(self):
with self.lock:
if self.is_deterministic():
self.synchronize_sequence(False)
self.synchronize_sequence(True)
else:
if len(self.receiving_pubkeys) != len(self.keystore.keypairs):
self.receiving_pubkeys = self.keystore.keypairs.keys()
self.save_pubkeys()
self.receiving_addresses = map(self.pubkeys_to_address, self.receiving_pubkeys)
for addr in self.receiving_addresses:
self.add_address(addr)
def is_beyond_limit(self, address, is_change):
addr_list = self.get_change_addresses() if is_change else self.get_receiving_addresses()
i = addr_list.index(address)
prev_addresses = addr_list[:max(0, i)]
limit = self.gap_limit_for_change if is_change else self.gap_limit
if len(prev_addresses) < limit:
return False
prev_addresses = prev_addresses[max(0, i - limit):]
for addr in prev_addresses:
if self.history.get(addr):
return False
return True
def get_master_public_keys(self):
return [self.get_master_public_key()]
def get_fingerprint(self):
return self.get_master_public_key()
class Standard_Wallet(Deterministic_Wallet, P2PKH_Wallet):
wallet_type = 'standard'
def __init__(self, storage):
Deterministic_Wallet.__init__(self, storage)
def get_master_public_key(self):
return self.keystore.get_master_public_key()
def new_pubkeys(self, c, i):
return self.keystore.derive_pubkey(c, i)
def get_keystore(self):
return self.keystore
def get_keystores(self):
return [self.keystore]
def is_watching_only(self):
return self.keystore.is_watching_only()
def can_change_password(self):
return self.keystore.can_change_password()
def check_password(self, password):
self.keystore.check_password(password)
def update_password(self, old_pw, new_pw):
self.keystore.update_password(old_pw, new_pw)
self.save_keystore()
self.storage.put('use_encryption', (new_pw is not None))
self.storage.write()
def save_keystore(self):
self.storage.put('keystore', self.keystore.dump())
def can_delete_address(self):
return self.keystore.can_import()
def delete_address(self, address):
pubkey = self.get_public_key(address)
self.keystore.delete_imported_key(pubkey)
self.save_keystore()
self.receiving_pubkeys.remove(pubkey)
self.receiving_addresses.remove(address)
self.storage.write()
def can_import_privkey(self):
return self.keystore.can_import()
def import_key(self, pk, pw):
pubkey = self.keystore.import_key(pk, pw)
self.save_keystore()
self.receiving_pubkeys.append(pubkey)
self.save_pubkeys()
addr = self.pubkeys_to_address(pubkey)
self.receiving_addresses.append(addr)
self.storage.write()
self.add_address(addr)
return addr
class Multisig_Wallet(Deterministic_Wallet):
# generic m of n
gap_limit = 20
def __init__(self, storage):
self.wallet_type = storage.get('wallet_type')
self.m, self.n = multisig_type(self.wallet_type)
Deterministic_Wallet.__init__(self, storage)
def get_pubkeys(self, c, i):
pubkey_list = self.change_pubkeys if c else self.receiving_pubkeys
return pubkey_list[i]
def redeem_script(self, c, i):
pubkeys = self.get_pubkeys(c, i)
return Transaction.multisig_script(sorted(pubkeys), self.m)
def pubkeys_to_address(self, pubkeys):
redeem_script = Transaction.multisig_script(sorted(pubkeys), self.m)
address = hash_160_to_bc_address(hash_160(redeem_script.decode('hex')), bitcoin.ADDRTYPE_P2SH)
return address
def new_pubkeys(self, c, i):
return [k.derive_pubkey(c, i) for k in self.get_keystores()]
def load_keystore(self):
self.keystores = {}
for i in range(self.n):
name = 'x%d/'%(i+1)
self.keystores[name] = load_keystore(self.storage, name)
self.keystore = self.keystores['x1/']
def save_keystore(self):
for name, k in self.keystores.items():
self.storage.put(name, k.dump())
def get_keystore(self):
return self.keystores.get('x1/')
def get_keystores(self):
return [self.keystores[i] for i in sorted(self.keystores.keys())]
def update_password(self, old_pw, new_pw):
for name, keystore in self.keystores.items():
if keystore.can_change_password():
keystore.update_password(old_pw, new_pw)
self.storage.put(name, keystore.dump())
self.storage.put('use_encryption', (new_pw is not None))
def check_password(self, password):
self.keystore.check_password(password)
def has_seed(self):
return self.keystore.has_seed()
def can_change_password(self):
return self.keystore.can_change_password()
def is_watching_only(self):
return not any([not k.is_watching_only() for k in self.get_keystores()])
def get_master_public_key(self):
return self.keystore.get_master_public_key()
def get_master_public_keys(self):
return [k.get_master_public_key() for k in self.get_keystores()]
def get_fingerprint(self):
return ''.join(sorted(self.get_master_public_keys()))
def add_input_sig_info(self, txin, address):
txin['derivation'] = derivation = self.get_address_index(address)
pubkeys = self.get_pubkeys(*derivation)
x_pubkeys = [k.get_xpubkey(*derivation) for k in self.get_keystores()]
# sort pubkeys and x_pubkeys, using the order of pubkeys
pubkeys, x_pubkeys = zip(*sorted(zip(pubkeys, x_pubkeys)))
txin['pubkeys'] = list(pubkeys)
txin['x_pubkeys'] = list(x_pubkeys)
txin['signatures'] = [None] * len(pubkeys)
txin['redeemScript'] = self.redeem_script(*derivation)
txin['num_sig'] = self.m
wallet_types = ['standard', 'multisig', 'imported']
def register_wallet_type(category):
wallet_types.append(category)
wallet_constructors = {
'standard': Standard_Wallet,
'old': Standard_Wallet,
'xpub': Standard_Wallet,
'imported': Imported_Wallet
}
def register_constructor(wallet_type, constructor):
wallet_constructors[wallet_type] = constructor
# former WalletFactory
class Wallet(object):
"""The main wallet "entry point".
This class is actually a factory that will return a wallet of the correct
type when passed a WalletStorage instance."""
def __new__(self, storage):
wallet_type = storage.get('wallet_type')
WalletClass = Wallet.wallet_class(wallet_type)
wallet = WalletClass(storage)
# Convert hardware wallets restored with older versions of
# Electrum to BIP44 wallets. A hardware wallet does not have
# a seed and plugins do not need to handle having one.
rwc = getattr(wallet, 'restore_wallet_class', None)
if rwc and storage.get('seed', ''):
storage.print_error("converting wallet type to " + rwc.wallet_type)
storage.put('wallet_type', rwc.wallet_type)
wallet = rwc(storage)
return wallet
@staticmethod
def wallet_class(wallet_type):
if multisig_type(wallet_type):
return Multisig_Wallet
if wallet_type in wallet_constructors:
return wallet_constructors[wallet_type]
raise RuntimeError("Unknown wallet type: " + wallet_type)
| 35.947035 | 126 | 0.575801 |
import os
import hashlib
import ast
import threading
import random
import time
import json
import copy
import re
import stat
from functools import partial
from collections import namedtuple, defaultdict
from i18n import _
from util import NotEnoughFunds, PrintError, UserCancelled, profiler
from bitcoin import *
from version import *
from keystore import load_keystore, Hardware_KeyStore
from storage import multisig_type
from transaction import Transaction
from plugins import run_hook
import bitcoin
import coinchooser
from synchronizer import Synchronizer
from verifier import SPV
from mnemonic import Mnemonic
import paymentrequest
from storage import WalletStorage
TX_STATUS = [
_('Replaceable'),
_('Unconfirmed parent'),
_('Low fee'),
_('Unconfirmed'),
_('Not Verified'),
]
class Abstract_Wallet(PrintError):
max_change_outputs = 3
def __init__(self, storage):
self.electrum_version = ELECTRUM_VERSION
self.storage = storage
self.network = None
self.synchronizer = None
self.verifier = None
self.gap_limit_for_change = 6
self.use_change = storage.get('use_change', True)
self.multiple_change = storage.get('multiple_change', False)
self.labels = storage.get('labels', {})
self.frozen_addresses = set(storage.get('frozen_addresses',[]))
self.stored_height = storage.get('stored_height', 0)
self.history = storage.get('addr_history',{})
self.load_keystore()
self.load_addresses()
self.load_transactions()
self.build_reverse_history()
self.receive_requests = self.storage.get('payment_requests', {})
self.unverified_tx = defaultdict(int)
self.verified_tx = storage.get('verified_tx3', {})
self.up_to_date = False
self.lock = threading.Lock()
self.transaction_lock = threading.Lock()
self.check_history()
if self.storage.get('wallet_type') is None:
self.storage.put('wallet_type', self.wallet_type)
def diagnostic_name(self):
return self.basename()
def __str__(self):
return self.basename()
def get_master_public_key(self):
return None
@profiler
def load_transactions(self):
self.txi = self.storage.get('txi', {})
self.txo = self.storage.get('txo', {})
self.tx_fees = self.storage.get('tx_fees', {})
self.pruned_txo = self.storage.get('pruned_txo', {})
tx_list = self.storage.get('transactions', {})
self.transactions = {}
for tx_hash, raw in tx_list.items():
tx = Transaction(raw)
self.transactions[tx_hash] = tx
if self.txi.get(tx_hash) is None and self.txo.get(tx_hash) is None and (tx_hash not in self.pruned_txo.values()):
self.print_error("removing unreferenced tx", tx_hash)
self.transactions.pop(tx_hash)
@profiler
def save_transactions(self, write=False):
with self.transaction_lock:
tx = {}
for k,v in self.transactions.items():
tx[k] = str(v)
self.storage.put('transactions', tx)
self.storage.put('txi', self.txi)
self.storage.put('txo', self.txo)
self.storage.put('tx_fees', self.tx_fees)
self.storage.put('pruned_txo', self.pruned_txo)
self.storage.put('addr_history', self.history)
if write:
self.storage.write()
def clear_history(self):
with self.transaction_lock:
self.txi = {}
self.txo = {}
self.tx_fees = {}
self.pruned_txo = {}
self.save_transactions()
with self.lock:
self.history = {}
self.tx_addr_hist = {}
@profiler
def build_reverse_history(self):
self.tx_addr_hist = {}
for addr, hist in self.history.items():
for tx_hash, h in hist:
s = self.tx_addr_hist.get(tx_hash, set())
s.add(addr)
self.tx_addr_hist[tx_hash] = s
@profiler
def check_history(self):
save = False
for addr, hist in self.history.items():
if not self.is_mine(addr):
self.history.pop(addr)
save = True
continue
for tx_hash, tx_height in hist:
if tx_hash in self.pruned_txo.values() or self.txi.get(tx_hash) or self.txo.get(tx_hash):
continue
tx = self.transactions.get(tx_hash)
if tx is not None:
self.add_transaction(tx_hash, tx)
save = True
if save:
self.save_transactions()
def basename(self):
return os.path.basename(self.storage.path)
def save_pubkeys(self):
self.storage.put('pubkeys', {'receiving':self.receiving_pubkeys, 'change':self.change_pubkeys})
def load_addresses(self):
d = self.storage.get('pubkeys', {})
self.receiving_pubkeys = d.get('receiving', [])
self.change_pubkeys = d.get('change', [])
self.receiving_addresses = map(self.pubkeys_to_address, self.receiving_pubkeys)
self.change_addresses = map(self.pubkeys_to_address, self.change_pubkeys)
def synchronize(self):
pass
def set_up_to_date(self, up_to_date):
with self.lock:
self.up_to_date = up_to_date
if up_to_date:
self.save_transactions(write=True)
def is_up_to_date(self):
with self.lock: return self.up_to_date
def set_label(self, name, text = None):
changed = False
old_text = self.labels.get(name)
if text:
if old_text != text:
self.labels[name] = text
changed = True
else:
if old_text:
self.labels.pop(name)
changed = True
if changed:
run_hook('set_label', self, name, text)
self.storage.put('labels', self.labels)
return changed
def is_mine(self, address):
return address in self.get_addresses()
def is_change(self, address):
if not self.is_mine(address):
return False
return address in self.change_addresses
def get_address_index(self, address):
if self.keystore.can_import():
i = self.receiving_addresses.index(address)
return self.receiving_pubkeys[i]
elif address in self.receiving_addresses:
return False, self.receiving_addresses.index(address)
if address in self.change_addresses:
return True, self.change_addresses.index(address)
raise Exception("Address not found", address)
def get_pubkey_index(self, pubkey):
if self.keystore.can_import():
assert pubkey in self.receiving_pubkeys
return pubkey
elif pubkey in self.receiving_pubkeys:
return False, self.receiving_pubkeys.index(pubkey)
if pubkey in self.change_pubkeys:
return True, self.change_pubkeys.index(pubkey)
raise Exception("Pubkey not found", pubkey)
def get_private_key(self, address, password):
if self.is_watching_only():
return []
index = self.get_address_index(address)
pk = self.keystore.get_private_key(index, password)
return [pk]
def get_public_key(self, address):
if self.keystore.can_import():
i = self.receiving_addresses.index(address)
pubkey = self.receiving_pubkeys[i]
else:
sequence = self.get_address_index(address)
pubkey = self.get_pubkey(*sequence)
return pubkey
def get_public_keys(self, address):
sequence = self.get_address_index(address)
return self.get_pubkeys(*sequence)
def add_unverified_tx(self, tx_hash, tx_height):
if tx_hash not in self.verified_tx:
self.unverified_tx[tx_hash] = tx_height
def add_verified_tx(self, tx_hash, info):
self.unverified_tx.pop(tx_hash, None)
with self.lock:
self.verified_tx[tx_hash] = info
height, conf, timestamp = self.get_tx_height(tx_hash)
self.network.trigger_callback('verified', tx_hash, height, conf, timestamp)
def get_unverified_txs(self):
return self.unverified_tx
def undo_verifications(self, height):
txs = []
with self.lock:
for tx_hash, item in self.verified_tx:
tx_height, timestamp, pos = item
if tx_height >= height:
self.verified_tx.pop(tx_hash, None)
txs.append(tx_hash)
return txs
def get_local_height(self):
return self.network.get_local_height() if self.network else self.stored_height
def get_tx_height(self, tx_hash):
with self.lock:
if tx_hash in self.verified_tx:
height, timestamp, pos = self.verified_tx[tx_hash]
conf = max(self.get_local_height() - height + 1, 0)
return height, conf, timestamp
else:
height = self.unverified_tx[tx_hash]
return height, 0, False
def get_txpos(self, tx_hash):
with self.lock:
x = self.verified_tx.get(tx_hash)
y = self.unverified_tx.get(tx_hash)
if x:
height, timestamp, pos = x
return height, pos
elif y > 0:
return y, 0
else:
return 1e12, 0
def is_found(self):
return self.history.values() != [[]] * len(self.history)
def get_num_tx(self, address):
return len(self.history.get(address, []))
def get_tx_delta(self, tx_hash, address):
if tx_hash in self.pruned_txo.values():
return None
delta = 0
d = self.txi.get(tx_hash, {}).get(address, [])
for n, v in d:
delta -= v
d = self.txo.get(tx_hash, {}).get(address, [])
for n, v, cb in d:
delta += v
return delta
def get_wallet_delta(self, tx):
addresses = self.get_addresses()
is_relevant = False
is_mine = False
is_pruned = False
is_partial = False
v_in = v_out = v_out_mine = 0
for item in tx.inputs():
addr = item.get('address')
if addr in addresses:
is_mine = True
is_relevant = True
d = self.txo.get(item['prevout_hash'], {}).get(addr, [])
for n, v, cb in d:
if n == item['prevout_n']:
value = v
break
else:
value = None
if value is None:
is_pruned = True
else:
v_in += value
else:
is_partial = True
if not is_mine:
is_partial = False
for addr, value in tx.get_outputs():
v_out += value
if addr in addresses:
v_out_mine += value
is_relevant = True
if is_pruned:
fee = None
if is_mine:
v = v_out_mine - v_out
else:
v = v_out_mine
else:
v = v_out_mine - v_in
if is_partial:
fee = None
else:
fee = v_in - v_out
if not is_mine:
fee = None
return is_relevant, is_mine, v, fee
def get_tx_info(self, tx):
is_relevant, is_mine, v, fee = self.get_wallet_delta(tx)
exp_n = None
can_broadcast = False
can_bump = False
label = ''
height = conf = timestamp = None
if tx.is_complete():
tx_hash = tx.hash()
if tx_hash in self.transactions.keys():
label = self.get_label(tx_hash)
height, conf, timestamp = self.get_tx_height(tx_hash)
if height > 0:
if conf:
status = _("%d confirmations") % conf
else:
status = _('Not verified')
else:
status = _('Unconfirmed')
if fee is None:
fee = self.tx_fees.get(tx_hash)
if fee:
size = tx.estimated_size()
fee_per_kb = fee * 1000 / size
exp_n = self.network.config.reverse_dynfee(fee_per_kb)
can_bump = is_mine and not tx.is_final()
else:
status = _("Signed")
can_broadcast = self.network is not None
else:
s, r = tx.signature_count()
status = _("Unsigned") if s == 0 else _('Partially signed') + ' (%d/%d)'%(s,r)
tx_hash = None
if is_relevant:
if is_mine:
if fee is not None:
amount = v + fee
else:
amount = v
else:
amount = v
else:
amount = None
return tx_hash, status, label, can_broadcast, can_bump, amount, fee, height, conf, timestamp, exp_n
def get_addr_io(self, address):
h = self.history.get(address, [])
received = {}
sent = {}
for tx_hash, height in h:
l = self.txo.get(tx_hash, {}).get(address, [])
for n, v, is_cb in l:
received[tx_hash + ':%d'%n] = (height, v, is_cb)
for tx_hash, height in h:
l = self.txi.get(tx_hash, {}).get(address, [])
for txi, v in l:
sent[txi] = height
return received, sent
def get_addr_utxo(self, address):
coins, spent = self.get_addr_io(address)
for txi in spent:
coins.pop(txi)
out = []
for txo, v in coins.items():
tx_height, value, is_cb = v
prevout_hash, prevout_n = txo.split(':')
x = {
'address':address,
'value':value,
'prevout_n':int(prevout_n),
'prevout_hash':prevout_hash,
'height':tx_height,
'coinbase':is_cb
}
out.append(x)
return out
def get_addr_received(self, address):
received, sent = self.get_addr_io(address)
return sum([v for height, v, is_cb in received.values()])
def get_addr_balance(self, address):
received, sent = self.get_addr_io(address)
c = u = x = 0
for txo, (tx_height, v, is_cb) in received.items():
if is_cb and tx_height + COINBASE_MATURITY > self.get_local_height():
x += v
elif tx_height > 0:
c += v
else:
u += v
if txo in sent:
if sent[txo] > 0:
c -= v
else:
u -= v
return c, u, x
def get_spendable_coins(self, domain = None):
return self.get_utxos(domain, exclude_frozen=True, mature=True)
def get_utxos(self, domain = None, exclude_frozen = False, mature = False):
coins = []
if domain is None:
domain = self.get_addresses()
if exclude_frozen:
domain = set(domain) - self.frozen_addresses
for addr in domain:
utxos = self.get_addr_utxo(addr)
for x in utxos:
if mature and x['coinbase'] and x['height'] + COINBASE_MATURITY > self.get_local_height():
continue
coins.append(x)
continue
return coins
def dummy_address(self):
return self.get_receiving_addresses()[0]
def get_addresses(self):
out = []
out += self.get_receiving_addresses()
out += self.get_change_addresses()
return out
def get_frozen_balance(self):
return self.get_balance(self.frozen_addresses)
def get_balance(self, domain=None):
if domain is None:
domain = self.get_addresses()
cc = uu = xx = 0
for addr in domain:
c, u, x = self.get_addr_balance(addr)
cc += c
uu += u
xx += x
return cc, uu, xx
def get_address_history(self, address):
with self.lock:
return self.history.get(address, [])
def find_pay_to_pubkey_address(self, prevout_hash, prevout_n):
dd = self.txo.get(prevout_hash, {})
for addr, l in dd.items():
for n, v, is_cb in l:
if n == prevout_n:
self.print_error("found pay-to-pubkey address:", addr)
return addr
def add_transaction(self, tx_hash, tx):
is_coinbase = tx.inputs()[0].get('is_coinbase') == True
with self.transaction_lock:
self.txi[tx_hash] = d = {}
for txi in tx.inputs():
addr = txi.get('address')
if not txi.get('is_coinbase'):
prevout_hash = txi['prevout_hash']
prevout_n = txi['prevout_n']
ser = prevout_hash + ':%d'%prevout_n
if addr == "(pubkey)":
addr = self.find_pay_to_pubkey_address(prevout_hash, prevout_n)
if addr and self.is_mine(addr):
dd = self.txo.get(prevout_hash, {})
for n, v, is_cb in dd.get(addr, []):
if n == prevout_n:
if d.get(addr) is None:
d[addr] = []
d[addr].append((ser, v))
break
else:
self.pruned_txo[ser] = tx_hash
self.txo[tx_hash] = d = {}
for n, txo in enumerate(tx.outputs()):
ser = tx_hash + ':%d'%n
_type, x, v = txo
if _type == TYPE_ADDRESS:
addr = x
elif _type == TYPE_PUBKEY:
addr = public_key_to_bc_address(x.decode('hex'))
else:
addr = None
if addr and self.is_mine(addr):
if d.get(addr) is None:
d[addr] = []
d[addr].append((n, v, is_coinbase))
next_tx = self.pruned_txo.get(ser)
if next_tx is not None:
self.pruned_txo.pop(ser)
dd = self.txi.get(next_tx, {})
if dd.get(addr) is None:
dd[addr] = []
dd[addr].append((ser, v))
self.transactions[tx_hash] = tx
def remove_transaction(self, tx_hash):
with self.transaction_lock:
self.print_error("removing tx from history", tx_hash)
for ser, hh in self.pruned_txo.items():
if hh == tx_hash:
self.pruned_txo.pop(ser)
for next_tx, dd in self.txi.items():
for addr, l in dd.items():
ll = l[:]
for item in ll:
ser, v = item
prev_hash, prev_n = ser.split(':')
if prev_hash == tx_hash:
l.remove(item)
self.pruned_txo[ser] = next_tx
if l == []:
dd.pop(addr)
else:
dd[addr] = l
try:
self.txi.pop(tx_hash)
self.txo.pop(tx_hash)
except KeyError:
self.print_error("tx was not in history", tx_hash)
def receive_tx_callback(self, tx_hash, tx, tx_height):
self.add_transaction(tx_hash, tx)
self.add_unverified_tx(tx_hash, tx_height)
def receive_history_callback(self, addr, hist, tx_fees):
with self.lock:
old_hist = self.history.get(addr, [])
for tx_hash, height in old_hist:
if (tx_hash, height) not in hist:
self.tx_addr_hist[tx_hash].remove(addr)
if not self.tx_addr_hist[tx_hash]:
self.remove_transaction(tx_hash)
self.history[addr] = hist
for tx_hash, tx_height in hist:
# add it in case it was previously unconfirmed
self.add_unverified_tx(tx_hash, tx_height)
# add reference in tx_addr_hist
s = self.tx_addr_hist.get(tx_hash, set())
s.add(addr)
self.tx_addr_hist[tx_hash] = s
# if addr is new, we have to recompute txi and txo
tx = self.transactions.get(tx_hash)
if tx is not None and self.txi.get(tx_hash, {}).get(addr) is None and self.txo.get(tx_hash, {}).get(addr) is None:
self.add_transaction(tx_hash, tx)
# Store fees
self.tx_fees.update(tx_fees)
def get_history(self, domain=None):
# get domain
if domain is None:
domain = self.get_addresses()
# 1. Get the history of each address in the domain, maintain the
# delta of a tx as the sum of its deltas on domain addresses
tx_deltas = defaultdict(int)
for addr in domain:
h = self.get_address_history(addr)
for tx_hash, height in h:
delta = self.get_tx_delta(tx_hash, addr)
if delta is None or tx_deltas[tx_hash] is None:
tx_deltas[tx_hash] = None
else:
tx_deltas[tx_hash] += delta
# 2. create sorted history
history = []
for tx_hash in tx_deltas:
delta = tx_deltas[tx_hash]
height, conf, timestamp = self.get_tx_height(tx_hash)
history.append((tx_hash, height, conf, timestamp, delta))
history.sort(key = lambda x: self.get_txpos(x[0]))
history.reverse()
# 3. add balance
c, u, x = self.get_balance(domain)
balance = c + u + x
h2 = []
for tx_hash, height, conf, timestamp, delta in history:
h2.append((tx_hash, height, conf, timestamp, delta, balance))
if balance is None or delta is None:
balance = None
else:
balance -= delta
h2.reverse()
# fixme: this may happen if history is incomplete
if balance not in [None, 0]:
self.print_error("Error: history not synchronized")
return []
return h2
def get_label(self, tx_hash):
label = self.labels.get(tx_hash, '')
if label is '':
label = self.get_default_label(tx_hash)
return label
def get_default_label(self, tx_hash):
if self.txi.get(tx_hash) == {}:
d = self.txo.get(tx_hash, {})
labels = []
for addr in d.keys():
label = self.labels.get(addr)
if label:
labels.append(label)
return ', '.join(labels)
return ''
def get_tx_status(self, tx_hash, height, conf, timestamp):
from util import format_time
if conf == 0:
tx = self.transactions.get(tx_hash)
if not tx:
return 3, 'unknown'
is_final = tx and tx.is_final()
fee = self.tx_fees.get(tx_hash)
if fee and self.network and self.network.config.has_fee_estimates():
size = len(tx.raw)/2
low_fee = int(self.network.config.dynfee(0)*size/1000)
is_lowfee = fee < low_fee * 0.5
else:
is_lowfee = False
if height==0 and not is_final:
status = 0
elif height < 0:
status = 1
elif height == 0 and is_lowfee:
status = 2
elif height == 0:
status = 3
else:
status = 4
else:
status = 4 + min(conf, 6)
time_str = format_time(timestamp) if timestamp else _("unknown")
status_str = TX_STATUS[status] if status < 5 else time_str
return status, status_str
def relayfee(self):
RELAY_FEE = 5000
MAX_RELAY_FEE = 50000
f = self.network.relay_fee if self.network and self.network.relay_fee else RELAY_FEE
return min(f, MAX_RELAY_FEE)
def dust_threshold(self):
# Change <= dust threshold is added to the tx fee
return 182 * 3 * self.relayfee() / 1000
def get_tx_fee(self, tx):
# this method can be overloaded
return tx.get_fee()
def make_unsigned_transaction(self, inputs, outputs, config, fixed_fee=None, change_addr=None):
# check outputs
i_max = None
for i, o in enumerate(outputs):
_type, data, value = o
if _type == TYPE_ADDRESS:
if not is_address(data):
raise BaseException("Invalid bitcoin address:" + data)
if value == '!':
if i_max is not None:
raise BaseException("More than one output set to spend max")
i_max = i
# Avoid index-out-of-range with inputs[0] below
if not inputs:
raise NotEnoughFunds()
for item in inputs:
self.add_input_info(item)
# change address
if change_addr:
change_addrs = [change_addr]
else:
addrs = self.get_change_addresses()[-self.gap_limit_for_change:]
if self.use_change and addrs:
# New change addresses are created only after a few
# confirmations. Select the unused addresses within the
# gap limit; if none take one at random
change_addrs = [addr for addr in addrs if
self.get_num_tx(addr) == 0]
if not change_addrs:
change_addrs = [random.choice(addrs)]
else:
change_addrs = [inputs[0]['address']]
# Fee estimator
if fixed_fee is None:
fee_estimator = partial(self.estimate_fee, config)
else:
fee_estimator = lambda size: fixed_fee
if i_max is None:
# Let the coin chooser select the coins to spend
max_change = self.max_change_outputs if self.multiple_change else 1
coin_chooser = coinchooser.get_coin_chooser(config)
tx = coin_chooser.make_tx(inputs, outputs, change_addrs[:max_change],
fee_estimator, self.dust_threshold())
else:
sendable = sum(map(lambda x:x['value'], inputs))
_type, data, value = outputs[i_max]
outputs[i_max] = (_type, data, 0)
tx = Transaction.from_io(inputs, outputs[:])
fee = fee_estimator(tx.estimated_size())
amount = max(0, sendable - tx.output_value() - fee)
outputs[i_max] = (_type, data, amount)
tx = Transaction.from_io(inputs, outputs[:])
# Sort the inputs and outputs deterministically
tx.BIP_LI01_sort()
tx.postime = int(time.time())
run_hook('make_unsigned_transaction', self, tx)
return tx
def estimate_fee(self, config, size):
fee = int(config.fee_per_kb() * size / 1000.)
return fee
def mktx(self, outputs, password, config, fee=None, change_addr=None, domain=None):
coins = self.get_spendable_coins(domain)
tx = self.make_unsigned_transaction(coins, outputs, config, fee, change_addr)
self.sign_transaction(tx, password)
return tx
def sweep(self, privkeys, network, config, recipient, fee=None, imax=100):
inputs = []
keypairs = {}
for privkey in privkeys:
pubkey = public_key_from_private_key(privkey)
address = address_from_private_key(privkey)
u = network.synchronous_get(('blockchain.address.listunspent', [address]))
pay_script = Transaction.pay_script(TYPE_ADDRESS, address)
for item in u:
if len(inputs) >= imax:
break
item['scriptPubKey'] = pay_script
item['redeemPubkey'] = pubkey
item['address'] = address
item['prevout_hash'] = item['tx_hash']
item['prevout_n'] = item['tx_pos']
item['pubkeys'] = [pubkey]
item['x_pubkeys'] = [pubkey]
item['signatures'] = [None]
item['num_sig'] = 1
inputs.append(item)
keypairs[pubkey] = privkey
if not inputs:
return
total = sum(i.get('value') for i in inputs)
if fee is None:
outputs = [(TYPE_ADDRESS, recipient, total)]
tx = Transaction.from_io(inputs, outputs)
fee = self.estimate_fee(config, tx.estimated_size())
outputs = [(TYPE_ADDRESS, recipient, total - fee)]
tx = Transaction.from_io(inputs, outputs)
tx.sign(keypairs)
return tx
def is_frozen(self, addr):
return addr in self.frozen_addresses
def set_frozen_state(self, addrs, freeze):
if all(self.is_mine(addr) for addr in addrs):
if freeze:
self.frozen_addresses |= set(addrs)
else:
self.frozen_addresses -= set(addrs)
self.storage.put('frozen_addresses', list(self.frozen_addresses))
return True
return False
def prepare_for_verifier(self):
# review transactions that are in the history
for addr, hist in self.history.items():
for tx_hash, tx_height in hist:
# add it in case it was previously unconfirmed
self.add_unverified_tx(tx_hash, tx_height)
# if we are on a pruning server, remove unverified transactions
with self.lock:
vr = self.verified_tx.keys() + self.unverified_tx.keys()
for tx_hash in self.transactions.keys():
if tx_hash not in vr:
self.print_error("removing transaction", tx_hash)
self.transactions.pop(tx_hash)
def start_threads(self, network):
self.network = network
if self.network is not None:
self.prepare_for_verifier()
self.verifier = SPV(self.network, self)
self.synchronizer = Synchronizer(self, network)
network.add_jobs([self.verifier, self.synchronizer])
else:
self.verifier = None
self.synchronizer = None
def stop_threads(self):
if self.network:
self.network.remove_jobs([self.synchronizer, self.verifier])
self.synchronizer.release()
self.synchronizer = None
self.verifier = None
# Now no references to the syncronizer or verifier
# remain so they will be GC-ed
self.storage.put('stored_height', self.get_local_height())
self.save_transactions()
self.storage.put('verified_tx3', self.verified_tx)
self.storage.write()
def wait_until_synchronized(self, callback=None):
def wait_for_wallet():
self.set_up_to_date(False)
while not self.is_up_to_date():
if callback:
msg = "%s\n%s %d"%(
_("Please wait..."),
_("Addresses generated:"),
len(self.addresses(True)))
callback(msg)
time.sleep(0.1)
def wait_for_network():
while not self.network.is_connected():
if callback:
msg = "%s \n" % (_("Connecting..."))
callback(msg)
time.sleep(0.1)
# wait until we are connected, because the user
# might have selected another server
if self.network:
wait_for_network()
wait_for_wallet()
else:
self.synchronize()
def can_export(self):
return not self.is_watching_only()
def is_used(self, address):
h = self.history.get(address,[])
c, u, x = self.get_addr_balance(address)
return len(h) > 0 and c + u + x == 0
def is_empty(self, address):
c, u, x = self.get_addr_balance(address)
return c+u+x == 0
def address_is_old(self, address, age_limit=2):
age = -1
h = self.history.get(address, [])
for tx_hash, tx_height in h:
if tx_height == 0:
tx_age = 0
else:
tx_age = self.get_local_height() - tx_height + 1
if tx_age > age:
age = tx_age
return age > age_limit
def bump_fee(self, tx, delta):
if tx.is_final():
raise BaseException(_("Cannot bump fee: transaction is final"))
inputs = copy.deepcopy(tx.inputs())
outputs = copy.deepcopy(tx.outputs())
for txin in inputs:
txin['signatures'] = [None] * len(txin['signatures'])
self.add_input_info(txin)
# use own outputs
s = filter(lambda x: self.is_mine(x[1]), outputs)
# ... unless there is none
if not s:
s = outputs
# prioritize low value outputs, to get rid of dust
s = sorted(s, key=lambda x: x[2])
for o in s:
i = outputs.index(o)
otype, address, value = o
if value - delta >= self.dust_threshold():
outputs[i] = otype, address, value - delta
delta = 0
break
else:
del outputs[i]
delta -= value
if delta > 0:
continue
if delta > 0:
raise BaseException(_('Cannot bump fee: cound not find suitable outputs'))
return Transaction.from_io(inputs, outputs)
def add_input_info(self, txin):
# Add address for utxo that are in wallet
if txin.get('scriptSig') == '':
coins = self.get_spendable_coins()
for item in coins:
if txin.get('prevout_hash') == item.get('prevout_hash') and txin.get('prevout_n') == item.get('prevout_n'):
txin['address'] = item.get('address')
address = txin['address']
if self.is_mine(address):
self.add_input_sig_info(txin, address)
def can_sign(self, tx):
if tx.is_complete():
return False
for k in self.get_keystores():
if k.can_sign(tx):
return True
return False
def get_input_tx(self, tx_hash):
# First look up an input transaction in the wallet where it
# will likely be. If co-signing a transaction it may not have
# all the input txs, in which case we ask the network.
tx = self.transactions.get(tx_hash)
if not tx and self.network:
request = ('blockchain.transaction.get', [tx_hash])
tx = Transaction(self.network.synchronous_get(request))
return tx
def add_hw_info(self, tx):
# add previous tx for hw wallets
for txin in tx.inputs():
tx_hash = txin['prevout_hash']
txin['prev_tx'] = self.get_input_tx(tx_hash)
# add output info for hw wallets
info = {}
xpubs = self.get_master_public_keys()
for txout in tx.outputs():
_type, addr, amount = txout
if self.is_change(addr):
index = self.get_address_index(addr)
pubkeys = self.get_public_keys(addr)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
info[addr] = index, sorted_xpubs, self.m if isinstance(self, Multisig_Wallet) else None
tx.output_info = info
def sign_transaction(self, tx, password):
if self.is_watching_only():
return
# hardware wallets require extra info
if any([(isinstance(k, Hardware_KeyStore) and k.can_sign(tx)) for k in self.get_keystores()]):
self.add_hw_info(tx)
# sign
for k in self.get_keystores():
try:
if k.can_sign(tx):
k.sign_transaction(tx, password)
except UserCancelled:
continue
def get_unused_addresses(self):
# fixme: use slots from expired requests
domain = self.get_receiving_addresses()
return [addr for addr in domain if not self.history.get(addr)
and addr not in self.receive_requests.keys()]
def get_unused_address(self):
addrs = self.get_unused_addresses()
if addrs:
return addrs[0]
def get_receiving_address(self):
# always return an address
domain = self.get_receiving_addresses()
choice = domain[0]
for addr in domain:
if not self.history.get(addr):
if addr not in self.receive_requests.keys():
return addr
else:
choice = addr
return choice
def get_payment_status(self, address, amount):
local_height = self.get_local_height()
received, sent = self.get_addr_io(address)
l = []
for txo, x in received.items():
h, v, is_cb = x
txid, n = txo.split(':')
info = self.verified_tx.get(txid)
if info:
tx_height, timestamp, pos = info
conf = local_height - tx_height
else:
conf = 0
l.append((conf, v))
vsum = 0
for conf, v in reversed(sorted(l)):
vsum += v
if vsum >= amount:
return True, conf
return False, None
def get_payment_request(self, addr, config):
import util
r = self.receive_requests.get(addr)
if not r:
return
out = copy.copy(r)
out['URI'] = 'trumpcoin:' + addr + '?amount=' + util.format_satoshis(out.get('amount'))
status, conf = self.get_request_status(addr)
out['status'] = status
if conf is not None:
out['confirmations'] = conf
# check if bip70 file exists
rdir = config.get('requests_dir')
if rdir:
key = out.get('id', addr)
path = os.path.join(rdir, 'req', key[0], key[1], key)
if os.path.exists(path):
baseurl = 'file://' + rdir
rewrite = config.get('url_rewrite')
if rewrite:
baseurl = baseurl.replace(*rewrite)
out['request_url'] = os.path.join(baseurl, 'req', key[0], key[1], key, key)
out['URI'] += '&r=' + out['request_url']
out['index_url'] = os.path.join(baseurl, 'index.html') + '?id=' + key
websocket_server_announce = config.get('websocket_server_announce')
if websocket_server_announce:
out['websocket_server'] = websocket_server_announce
else:
out['websocket_server'] = config.get('websocket_server', 'localhost')
websocket_port_announce = config.get('websocket_port_announce')
if websocket_port_announce:
out['websocket_port'] = websocket_port_announce
else:
out['websocket_port'] = config.get('websocket_port', 9999)
return out
def get_request_status(self, key):
from paymentrequest import PR_PAID, PR_UNPAID, PR_UNKNOWN, PR_EXPIRED
r = self.receive_requests.get(key)
if r is None:
return PR_UNKNOWN
address = r['address']
amount = r.get('amount')
timestamp = r.get('time', 0)
if timestamp and type(timestamp) != int:
timestamp = 0
expiration = r.get('exp')
if expiration and type(expiration) != int:
expiration = 0
conf = None
if amount:
if self.up_to_date:
paid, conf = self.get_payment_status(address, amount)
status = PR_PAID if paid else PR_UNPAID
if status == PR_UNPAID and expiration is not None and time.time() > timestamp + expiration:
status = PR_EXPIRED
else:
status = PR_UNKNOWN
else:
status = PR_UNKNOWN
return status, conf
def make_payment_request(self, addr, amount, message, expiration):
timestamp = int(time.time())
_id = Hash(addr + "%d"%timestamp).encode('hex')[0:10]
r = {'time':timestamp, 'amount':amount, 'exp':expiration, 'address':addr, 'memo':message, 'id':_id}
return r
def sign_payment_request(self, key, alias, alias_addr, password):
req = self.receive_requests.get(key)
alias_privkey = self.get_private_key(alias_addr, password)[0]
pr = paymentrequest.make_unsigned_request(req)
paymentrequest.sign_request_with_alias(pr, alias, alias_privkey)
req['name'] = pr.pki_data
req['sig'] = pr.signature.encode('hex')
self.receive_requests[key] = req
self.storage.put('payment_requests', self.receive_requests)
def add_payment_request(self, req, config):
import os
addr = req['address']
amount = req.get('amount')
message = req.get('memo')
self.receive_requests[addr] = req
self.storage.put('payment_requests', self.receive_requests)
self.set_label(addr, message) # should be a default label
rdir = config.get('requests_dir')
if rdir and amount is not None:
key = req.get('id', addr)
pr = paymentrequest.make_request(config, req)
path = os.path.join(rdir, 'req', key[0], key[1], key)
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
with open(os.path.join(path, key), 'w') as f:
f.write(pr.SerializeToString())
# reload
req = self.get_payment_request(addr, config)
with open(os.path.join(path, key + '.json'), 'w') as f:
f.write(json.dumps(req))
return req
def remove_payment_request(self, addr, config):
if addr not in self.receive_requests:
return False
r = self.receive_requests.pop(addr)
rdir = config.get('requests_dir')
if rdir:
key = r.get('id', addr)
for s in ['.json', '']:
n = os.path.join(rdir, 'req', key[0], key[1], key, key + s)
if os.path.exists(n):
os.unlink(n)
self.storage.put('payment_requests', self.receive_requests)
return True
def get_sorted_requests(self, config):
def f(x):
try:
addr = x.get('address')
return self.get_address_index(addr)
except:
return -1, (0, 0)
return sorted(map(lambda x: self.get_payment_request(x, config), self.receive_requests.keys()), key=f)
def get_fingerprint(self):
raise NotImplementedError()
def can_import_privkey(self):
return False
def can_import_address(self):
return False
def can_delete_address(self):
return False
def add_address(self, address):
if address not in self.history:
self.history[address] = []
if self.synchronizer:
self.synchronizer.add(address)
def has_password(self):
return self.storage.get('use_encryption', False)
class Imported_Wallet(Abstract_Wallet):
# wallet made of imported addresses
wallet_type = 'imported'
def __init__(self, storage):
Abstract_Wallet.__init__(self, storage)
def load_keystore(self):
pass
def load_addresses(self):
self.addresses = self.storage.get('addresses', [])
self.receiving_addresses = self.addresses
self.change_addresses = []
def get_keystores(self):
return []
def has_password(self):
return False
def can_change_password(self):
return False
def can_import_address(self):
return True
def is_watching_only(self):
return True
def has_seed(self):
return False
def is_deterministic(self):
return False
def is_used(self, address):
return False
def get_master_public_keys(self):
return []
def is_beyond_limit(self, address, is_change):
return False
def get_fingerprint(self):
return ''
def get_addresses(self, include_change=False):
return self.addresses
def import_address(self, address):
if address in self.addresses:
return
self.addresses.append(address)
self.storage.put('addresses', self.addresses)
self.storage.write()
self.add_address(address)
return address
def can_delete_address(self):
return True
def delete_address(self, address):
if address not in self.addresses:
return
self.addresses.remove(address)
self.storage.put('addresses', self.addresses)
self.storage.write()
def get_receiving_addresses(self):
return self.addresses[:]
def get_change_addresses(self):
return []
def add_input_sig_info(self, txin, address):
addrtype, hash160 = bc_address_to_hash_160(address)
xpubkey = 'fd' + (chr(addrtype) + hash160).encode('hex')
txin['x_pubkeys'] = [ xpubkey ]
txin['pubkeys'] = [ xpubkey ]
txin['signatures'] = [None]
class P2PKH_Wallet(Abstract_Wallet):
def pubkeys_to_address(self, pubkey):
return public_key_to_bc_address(pubkey.decode('hex'))
def load_keystore(self):
self.keystore = load_keystore(self.storage, 'keystore')
def get_pubkey(self, c, i):
pubkey_list = self.change_pubkeys if c else self.receiving_pubkeys
return pubkey_list[i]
def get_public_keys(self, address):
return [self.get_public_key(address)]
def add_input_sig_info(self, txin, address):
if not self.keystore.can_import():
txin['derivation'] = derivation = self.get_address_index(address)
x_pubkey = self.keystore.get_xpubkey(*derivation)
pubkey = self.get_pubkey(*derivation)
else:
pubkey = self.get_public_key(address)
assert pubkey is not None
x_pubkey = pubkey
txin['x_pubkeys'] = [x_pubkey]
txin['pubkeys'] = [pubkey]
txin['signatures'] = [None]
txin['redeemPubkey'] = pubkey
txin['num_sig'] = 1
def sign_message(self, address, message, password):
index = self.get_address_index(address)
return self.keystore.sign_message(index, message, password)
def decrypt_message(self, pubkey, message, password):
index = self.get_pubkey_index(pubkey)
return self.keystore.decrypt_message(index, message, password)
class Deterministic_Wallet(Abstract_Wallet):
def __init__(self, storage):
Abstract_Wallet.__init__(self, storage)
self.gap_limit = storage.get('gap_limit', 20)
def has_seed(self):
return self.keystore.has_seed()
def is_deterministic(self):
return self.keystore.is_deterministic()
def get_receiving_addresses(self):
return self.receiving_addresses
def get_change_addresses(self):
return self.change_addresses
def get_seed(self, password):
return self.keystore.get_seed(password)
def add_seed(self, seed, pw):
self.keystore.add_seed(seed, pw)
def change_gap_limit(self, value):
if value >= self.gap_limit:
self.gap_limit = value
self.storage.put('gap_limit', self.gap_limit)
return True
elif value >= self.min_acceptable_gap():
addresses = self.get_receiving_addresses()
k = self.num_unused_trailing_addresses(addresses)
n = len(addresses) - k + value
self.receiving_pubkeys = self.receiving_pubkeys[0:n]
self.receiving_addresses = self.receiving_addresses[0:n]
self.gap_limit = value
self.storage.put('gap_limit', self.gap_limit)
self.save_pubkeys()
return True
else:
return False
def num_unused_trailing_addresses(self, addresses):
k = 0
for a in addresses[::-1]:
if self.history.get(a):break
k = k + 1
return k
def min_acceptable_gap(self):
# fixme: this assumes wallet is synchronized
n = 0
nmax = 0
addresses = self.account.get_receiving_addresses()
k = self.num_unused_trailing_addresses(addresses)
for a in addresses[0:-k]:
if self.history.get(a):
n = 0
else:
n += 1
if n > nmax: nmax = n
return nmax + 1
def create_new_address(self, for_change):
pubkey_list = self.change_pubkeys if for_change else self.receiving_pubkeys
n = len(pubkey_list)
x = self.new_pubkeys(for_change, n)
pubkey_list.append(x)
self.save_pubkeys()
address = self.pubkeys_to_address(x)
addr_list = self.change_addresses if for_change else self.receiving_addresses
addr_list.append(address)
self.add_address(address)
return address
def synchronize_sequence(self, for_change):
limit = self.gap_limit_for_change if for_change else self.gap_limit
while True:
addresses = self.get_change_addresses() if for_change else self.get_receiving_addresses()
if len(addresses) < limit:
self.create_new_address(for_change)
continue
if map(lambda a: self.address_is_old(a), addresses[-limit:] ) == limit*[False]:
break
else:
self.create_new_address(for_change)
def synchronize(self):
with self.lock:
if self.is_deterministic():
self.synchronize_sequence(False)
self.synchronize_sequence(True)
else:
if len(self.receiving_pubkeys) != len(self.keystore.keypairs):
self.receiving_pubkeys = self.keystore.keypairs.keys()
self.save_pubkeys()
self.receiving_addresses = map(self.pubkeys_to_address, self.receiving_pubkeys)
for addr in self.receiving_addresses:
self.add_address(addr)
def is_beyond_limit(self, address, is_change):
addr_list = self.get_change_addresses() if is_change else self.get_receiving_addresses()
i = addr_list.index(address)
prev_addresses = addr_list[:max(0, i)]
limit = self.gap_limit_for_change if is_change else self.gap_limit
if len(prev_addresses) < limit:
return False
prev_addresses = prev_addresses[max(0, i - limit):]
for addr in prev_addresses:
if self.history.get(addr):
return False
return True
def get_master_public_keys(self):
return [self.get_master_public_key()]
def get_fingerprint(self):
return self.get_master_public_key()
class Standard_Wallet(Deterministic_Wallet, P2PKH_Wallet):
wallet_type = 'standard'
def __init__(self, storage):
Deterministic_Wallet.__init__(self, storage)
def get_master_public_key(self):
return self.keystore.get_master_public_key()
def new_pubkeys(self, c, i):
return self.keystore.derive_pubkey(c, i)
def get_keystore(self):
return self.keystore
def get_keystores(self):
return [self.keystore]
def is_watching_only(self):
return self.keystore.is_watching_only()
def can_change_password(self):
return self.keystore.can_change_password()
def check_password(self, password):
self.keystore.check_password(password)
def update_password(self, old_pw, new_pw):
self.keystore.update_password(old_pw, new_pw)
self.save_keystore()
self.storage.put('use_encryption', (new_pw is not None))
self.storage.write()
def save_keystore(self):
self.storage.put('keystore', self.keystore.dump())
def can_delete_address(self):
return self.keystore.can_import()
def delete_address(self, address):
pubkey = self.get_public_key(address)
self.keystore.delete_imported_key(pubkey)
self.save_keystore()
self.receiving_pubkeys.remove(pubkey)
self.receiving_addresses.remove(address)
self.storage.write()
def can_import_privkey(self):
return self.keystore.can_import()
def import_key(self, pk, pw):
pubkey = self.keystore.import_key(pk, pw)
self.save_keystore()
self.receiving_pubkeys.append(pubkey)
self.save_pubkeys()
addr = self.pubkeys_to_address(pubkey)
self.receiving_addresses.append(addr)
self.storage.write()
self.add_address(addr)
return addr
class Multisig_Wallet(Deterministic_Wallet):
# generic m of n
gap_limit = 20
def __init__(self, storage):
self.wallet_type = storage.get('wallet_type')
self.m, self.n = multisig_type(self.wallet_type)
Deterministic_Wallet.__init__(self, storage)
def get_pubkeys(self, c, i):
pubkey_list = self.change_pubkeys if c else self.receiving_pubkeys
return pubkey_list[i]
def redeem_script(self, c, i):
pubkeys = self.get_pubkeys(c, i)
return Transaction.multisig_script(sorted(pubkeys), self.m)
def pubkeys_to_address(self, pubkeys):
redeem_script = Transaction.multisig_script(sorted(pubkeys), self.m)
address = hash_160_to_bc_address(hash_160(redeem_script.decode('hex')), bitcoin.ADDRTYPE_P2SH)
return address
def new_pubkeys(self, c, i):
return [k.derive_pubkey(c, i) for k in self.get_keystores()]
def load_keystore(self):
self.keystores = {}
for i in range(self.n):
name = 'x%d/'%(i+1)
self.keystores[name] = load_keystore(self.storage, name)
self.keystore = self.keystores['x1/']
def save_keystore(self):
for name, k in self.keystores.items():
self.storage.put(name, k.dump())
def get_keystore(self):
return self.keystores.get('x1/')
def get_keystores(self):
return [self.keystores[i] for i in sorted(self.keystores.keys())]
def update_password(self, old_pw, new_pw):
for name, keystore in self.keystores.items():
if keystore.can_change_password():
keystore.update_password(old_pw, new_pw)
self.storage.put(name, keystore.dump())
self.storage.put('use_encryption', (new_pw is not None))
def check_password(self, password):
self.keystore.check_password(password)
def has_seed(self):
return self.keystore.has_seed()
def can_change_password(self):
return self.keystore.can_change_password()
def is_watching_only(self):
return not any([not k.is_watching_only() for k in self.get_keystores()])
def get_master_public_key(self):
return self.keystore.get_master_public_key()
def get_master_public_keys(self):
return [k.get_master_public_key() for k in self.get_keystores()]
def get_fingerprint(self):
return ''.join(sorted(self.get_master_public_keys()))
def add_input_sig_info(self, txin, address):
txin['derivation'] = derivation = self.get_address_index(address)
pubkeys = self.get_pubkeys(*derivation)
x_pubkeys = [k.get_xpubkey(*derivation) for k in self.get_keystores()]
# sort pubkeys and x_pubkeys, using the order of pubkeys
pubkeys, x_pubkeys = zip(*sorted(zip(pubkeys, x_pubkeys)))
txin['pubkeys'] = list(pubkeys)
txin['x_pubkeys'] = list(x_pubkeys)
txin['signatures'] = [None] * len(pubkeys)
txin['redeemScript'] = self.redeem_script(*derivation)
txin['num_sig'] = self.m
wallet_types = ['standard', 'multisig', 'imported']
def register_wallet_type(category):
wallet_types.append(category)
wallet_constructors = {
'standard': Standard_Wallet,
'old': Standard_Wallet,
'xpub': Standard_Wallet,
'imported': Imported_Wallet
}
def register_constructor(wallet_type, constructor):
wallet_constructors[wallet_type] = constructor
# former WalletFactory
class Wallet(object):
def __new__(self, storage):
wallet_type = storage.get('wallet_type')
WalletClass = Wallet.wallet_class(wallet_type)
wallet = WalletClass(storage)
# Convert hardware wallets restored with older versions of
# Electrum to BIP44 wallets. A hardware wallet does not have
# a seed and plugins do not need to handle having one.
rwc = getattr(wallet, 'restore_wallet_class', None)
if rwc and storage.get('seed', ''):
storage.print_error("converting wallet type to " + rwc.wallet_type)
storage.put('wallet_type', rwc.wallet_type)
wallet = rwc(storage)
return wallet
@staticmethod
def wallet_class(wallet_type):
if multisig_type(wallet_type):
return Multisig_Wallet
if wallet_type in wallet_constructors:
return wallet_constructors[wallet_type]
raise RuntimeError("Unknown wallet type: " + wallet_type)
| true | true |
f7320dd67249784102fd2dc4e3484ed2393a91cc | 1,593 | py | Python | kf_cavatica/files.py | kids-first/kf-cavatica-python-tools | 5f821511685dc63df8785a54c1ac31caebc2cba2 | [
"Apache-2.0"
] | null | null | null | kf_cavatica/files.py | kids-first/kf-cavatica-python-tools | 5f821511685dc63df8785a54c1ac31caebc2cba2 | [
"Apache-2.0"
] | 10 | 2021-03-30T13:16:34.000Z | 2021-08-10T19:17:29.000Z | kf_cavatica/files.py | kids-first/kf-cavatica-python-tools | 5f821511685dc63df8785a54c1ac31caebc2cba2 | [
"Apache-2.0"
] | 1 | 2022-02-04T04:13:48.000Z | 2022-02-04T04:13:48.000Z | import sevenbridges as sbg
from pathlib import Path
# generate the list of files
def list_files_recursively(
api,
query,
parent,
files=[],
folder_name="",
):
"""List all the files in a project.
:param api: API object generated by sevenbridges.Api()
:type api: Sevenbridges API Object
:param query: api.files.query object
:type query: api.files.query
:param parent: parent of the files returned in the query. project if the
query is at the root of a project or file if the query is not at project
root
:type parent: sbg.models.project.Project or sbg.models.file.File
:param files: files returned by the function
:type files: list
:param folder_name: folder name of files within query
:type folder_name: string
:return: list of sevenbridges file objects
:rtype: list
"""
# type checking
if isinstance(parent, sbg.models.project.Project):
parent_id = parent.root_folder
elif isinstance(parent, sbg.models.file.File):
parent_id = parent.id
if not folder_name:
folder_name = Path(folder_name)
for file in query.all():
if not file.is_folder():
file.metadata["parent_file_name"] = folder_name
files.append(file)
else:
folder_name = folder_name / file.name
res = list_files_recursively(
api,
api.files.query(parent=file),
folder_name=folder_name,
parent=file,
)
folder_name = folder_name.parents[0]
return files
| 31.235294 | 77 | 0.638418 | import sevenbridges as sbg
from pathlib import Path
def list_files_recursively(
api,
query,
parent,
files=[],
folder_name="",
):
if isinstance(parent, sbg.models.project.Project):
parent_id = parent.root_folder
elif isinstance(parent, sbg.models.file.File):
parent_id = parent.id
if not folder_name:
folder_name = Path(folder_name)
for file in query.all():
if not file.is_folder():
file.metadata["parent_file_name"] = folder_name
files.append(file)
else:
folder_name = folder_name / file.name
res = list_files_recursively(
api,
api.files.query(parent=file),
folder_name=folder_name,
parent=file,
)
folder_name = folder_name.parents[0]
return files
| true | true |
f7320eb4b502e470801de71bcd1debc943db995d | 1,075 | py | Python | alfred_wrapper.py | fur6y/timely-alfred-workflow | 4ddfd2bd2f37becc78c45c6c07664d44fec5ccde | [
"MIT"
] | 1 | 2018-10-24T20:09:46.000Z | 2018-10-24T20:09:46.000Z | alfred_wrapper.py | fabianfetting/timely-alfred-workflow | 4ddfd2bd2f37becc78c45c6c07664d44fec5ccde | [
"MIT"
] | null | null | null | alfred_wrapper.py | fabianfetting/timely-alfred-workflow | 4ddfd2bd2f37becc78c45c6c07664d44fec5ccde | [
"MIT"
] | null | null | null | import subprocess
import sys
import json
def print_time(t):
output = {
'items': [
{
'uid': 'result',
'type': 'file',
'title': t,
'subtitle': sys.argv[1],
'arg': sys.argv[1],
'icon': {
'path': 'icon.png'
}
}
]
}
output_json = json.dumps(output)
sys.stdout.write(output_json)
def print_invalid():
output = {
'items': [
{
'uid': 'invalid',
'type': 'file',
'title': 'Invalid',
'subtitle': sys.argv[1],
'arg': sys.argv[1],
'icon': {
'path': 'icon.png'
}
}
]
}
output_json = json.dumps(output)
sys.stdout.write(output_json)
try:
result = subprocess.check_output(['./timely.py'] + sys.argv[1:])
except subprocess.CalledProcessError as e:
print_invalid()
exit(0)
print_time(result[:-1])
| 20.673077 | 68 | 0.413953 | import subprocess
import sys
import json
def print_time(t):
output = {
'items': [
{
'uid': 'result',
'type': 'file',
'title': t,
'subtitle': sys.argv[1],
'arg': sys.argv[1],
'icon': {
'path': 'icon.png'
}
}
]
}
output_json = json.dumps(output)
sys.stdout.write(output_json)
def print_invalid():
output = {
'items': [
{
'uid': 'invalid',
'type': 'file',
'title': 'Invalid',
'subtitle': sys.argv[1],
'arg': sys.argv[1],
'icon': {
'path': 'icon.png'
}
}
]
}
output_json = json.dumps(output)
sys.stdout.write(output_json)
try:
result = subprocess.check_output(['./timely.py'] + sys.argv[1:])
except subprocess.CalledProcessError as e:
print_invalid()
exit(0)
print_time(result[:-1])
| true | true |
f7320f0efd80d6927395a59cc57e82be5509efc8 | 594 | py | Python | Vivy/setup.py | yametetomete/EncodeScripts | 925c175b56cdbe1251cf7978620808b01cfca4e5 | [
"MIT"
] | 3 | 2020-08-01T09:39:37.000Z | 2021-12-05T07:31:34.000Z | Vivy/setup.py | yametetomete/EncodeScripts | 925c175b56cdbe1251cf7978620808b01cfca4e5 | [
"MIT"
] | null | null | null | Vivy/setup.py | yametetomete/EncodeScripts | 925c175b56cdbe1251cf7978620808b01cfca4e5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import setuptools
name = "vivy_common"
version = "0.0.0"
release = "0.0.0"
setuptools.setup(
name=name,
version=release,
author="louis",
author_email="louis@poweris.moe",
description="yametetomete vivy common module",
packages=["vivy_common"],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
package_data={
'vivy_common': ['py.typed', 'workraw-settings', 'final-settings'],
},
python_requires='>=3.8',
)
| 22.846154 | 74 | 0.619529 |
import setuptools
name = "vivy_common"
version = "0.0.0"
release = "0.0.0"
setuptools.setup(
name=name,
version=release,
author="louis",
author_email="louis@poweris.moe",
description="yametetomete vivy common module",
packages=["vivy_common"],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
package_data={
'vivy_common': ['py.typed', 'workraw-settings', 'final-settings'],
},
python_requires='>=3.8',
)
| true | true |
f73210129a2c3754a07a4faf0ce894d3104d954c | 5,446 | py | Python | losses.py | koba35/retinanet | 99820cde438a2fc14e38973437766de6fe6a94a3 | [
"Apache-2.0"
] | null | null | null | losses.py | koba35/retinanet | 99820cde438a2fc14e38973437766de6fe6a94a3 | [
"Apache-2.0"
] | null | null | null | losses.py | koba35/retinanet | 99820cde438a2fc14e38973437766de6fe6a94a3 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import torch
import torch.nn as nn
def calc_iou(a, b):
area = (b[:, 2] - b[:, 0]) * (b[:, 3] - b[:, 1])
iw = torch.min(torch.unsqueeze(a[:, 2], dim=1), b[:, 2]) - torch.max(torch.unsqueeze(a[:, 0], 1), b[:, 0])
ih = torch.min(torch.unsqueeze(a[:, 3], dim=1), b[:, 3]) - torch.max(torch.unsqueeze(a[:, 1], 1), b[:, 1])
iw = torch.clamp(iw, min=0)
ih = torch.clamp(ih, min=0)
ua = torch.unsqueeze((a[:, 2] - a[:, 0]) * (a[:, 3] - a[:, 1]), dim=1) + area - iw * ih
ua = torch.clamp(ua, min=1e-8)
intersection = iw * ih
IoU = intersection / ua
return IoU
class FocalLoss(nn.Module):
# def __init__(self):
def forward(self, classifications, regressions, anchors, annotations):
alpha = 0.25
gamma = 2.0
batch_size = classifications.shape[0]
classification_losses = []
regression_losses = []
anchor = anchors[0, :, :]
anchor_widths = anchor[:, 2] - anchor[:, 0]
anchor_heights = anchor[:, 3] - anchor[:, 1]
anchor_ctr_x = anchor[:, 0] + 0.5 * anchor_widths
anchor_ctr_y = anchor[:, 1] + 0.5 * anchor_heights
for j in range(batch_size):
classification = classifications[j, :, :]
regression = regressions[j, :, :]
bbox_annotation = annotations[j, :, :]
bbox_annotation = bbox_annotation[bbox_annotation[:, 4] != -1]
if bbox_annotation.shape[0] == 0:
regression_losses.append(torch.tensor(0).float().cuda())
classification_losses.append(torch.tensor(0).float().cuda())
continue
classification = torch.clamp(classification, 1e-4, 1.0 - 1e-4)
IoU = calc_iou(anchors[0, :, :], bbox_annotation[:, :4]) # num_anchors x num_annotations
IoU_max, IoU_argmax = torch.max(IoU, dim=1) # num_anchors x 1
# import pdb
# pdb.set_trace()
# compute the loss for classification
targets = torch.ones(classification.shape) * -1
targets = targets.cuda()
targets[torch.lt(IoU_max, 0.4), :] = 0
positive_indices = torch.ge(IoU_max, 0.5)
num_positive_anchors = positive_indices.sum()
assigned_annotations = bbox_annotation[IoU_argmax, :]
targets[positive_indices, :] = 0
targets[positive_indices, assigned_annotations[positive_indices, 4].long()] = 1
alpha_factor = torch.ones(targets.shape).cuda() * alpha
alpha_factor = torch.where(torch.eq(targets, 1.), alpha_factor, 1. - alpha_factor)
focal_weight = torch.where(torch.eq(targets, 1.), 1. - classification, classification)
focal_weight = alpha_factor * torch.pow(focal_weight, gamma)
bce = -(targets * torch.log(classification) + (1.0 - targets) * torch.log(1.0 - classification))
# cls_loss = focal_weight * torch.pow(bce, gamma)
cls_loss = focal_weight * bce
cls_loss = torch.where(torch.ne(targets, -1.0), cls_loss, torch.zeros(cls_loss.shape).cuda())
classification_losses.append(cls_loss.sum() / torch.clamp(num_positive_anchors.float(), min=1.0))
# compute the loss for regression
if positive_indices.sum() > 0:
assigned_annotations = assigned_annotations[positive_indices, :]
anchor_widths_pi = anchor_widths[positive_indices]
anchor_heights_pi = anchor_heights[positive_indices]
anchor_ctr_x_pi = anchor_ctr_x[positive_indices]
anchor_ctr_y_pi = anchor_ctr_y[positive_indices]
gt_widths = assigned_annotations[:, 2] - assigned_annotations[:, 0]
gt_heights = assigned_annotations[:, 3] - assigned_annotations[:, 1]
gt_ctr_x = assigned_annotations[:, 0] + 0.5 * gt_widths
gt_ctr_y = assigned_annotations[:, 1] + 0.5 * gt_heights
# clip widths to 1
gt_widths = torch.clamp(gt_widths, min=1)
gt_heights = torch.clamp(gt_heights, min=1)
targets_dx = (gt_ctr_x - anchor_ctr_x_pi) / anchor_widths_pi
targets_dy = (gt_ctr_y - anchor_ctr_y_pi) / anchor_heights_pi
targets_dw = torch.log(gt_widths / anchor_widths_pi)
targets_dh = torch.log(gt_heights / anchor_heights_pi)
targets = torch.stack((targets_dx, targets_dy, targets_dw, targets_dh))
targets = targets.t()
targets = targets / torch.Tensor([[0.1, 0.1, 0.2, 0.2]]).cuda()
negative_indices = 1 - positive_indices
regression_diff = torch.abs(targets - regression[positive_indices, :])
regression_loss = torch.where(
torch.le(regression_diff, 1.0 / 9.0),
0.5 * 9.0 * torch.pow(regression_diff, 2),
regression_diff - 0.5 / 9.0
)
regression_losses.append(regression_loss.mean())
else:
regression_losses.append(torch.tensor(0).float().cuda())
return torch.stack(classification_losses).mean(dim=0, keepdim=True), torch.stack(regression_losses).mean(dim=0,
keepdim=True)
| 38.9 | 126 | 0.57051 | import numpy as np
import torch
import torch.nn as nn
def calc_iou(a, b):
area = (b[:, 2] - b[:, 0]) * (b[:, 3] - b[:, 1])
iw = torch.min(torch.unsqueeze(a[:, 2], dim=1), b[:, 2]) - torch.max(torch.unsqueeze(a[:, 0], 1), b[:, 0])
ih = torch.min(torch.unsqueeze(a[:, 3], dim=1), b[:, 3]) - torch.max(torch.unsqueeze(a[:, 1], 1), b[:, 1])
iw = torch.clamp(iw, min=0)
ih = torch.clamp(ih, min=0)
ua = torch.unsqueeze((a[:, 2] - a[:, 0]) * (a[:, 3] - a[:, 1]), dim=1) + area - iw * ih
ua = torch.clamp(ua, min=1e-8)
intersection = iw * ih
IoU = intersection / ua
return IoU
class FocalLoss(nn.Module):
def forward(self, classifications, regressions, anchors, annotations):
alpha = 0.25
gamma = 2.0
batch_size = classifications.shape[0]
classification_losses = []
regression_losses = []
anchor = anchors[0, :, :]
anchor_widths = anchor[:, 2] - anchor[:, 0]
anchor_heights = anchor[:, 3] - anchor[:, 1]
anchor_ctr_x = anchor[:, 0] + 0.5 * anchor_widths
anchor_ctr_y = anchor[:, 1] + 0.5 * anchor_heights
for j in range(batch_size):
classification = classifications[j, :, :]
regression = regressions[j, :, :]
bbox_annotation = annotations[j, :, :]
bbox_annotation = bbox_annotation[bbox_annotation[:, 4] != -1]
if bbox_annotation.shape[0] == 0:
regression_losses.append(torch.tensor(0).float().cuda())
classification_losses.append(torch.tensor(0).float().cuda())
continue
classification = torch.clamp(classification, 1e-4, 1.0 - 1e-4)
IoU = calc_iou(anchors[0, :, :], bbox_annotation[:, :4])
IoU_max, IoU_argmax = torch.max(IoU, dim=1)
targets = torch.ones(classification.shape) * -1
targets = targets.cuda()
targets[torch.lt(IoU_max, 0.4), :] = 0
positive_indices = torch.ge(IoU_max, 0.5)
num_positive_anchors = positive_indices.sum()
assigned_annotations = bbox_annotation[IoU_argmax, :]
targets[positive_indices, :] = 0
targets[positive_indices, assigned_annotations[positive_indices, 4].long()] = 1
alpha_factor = torch.ones(targets.shape).cuda() * alpha
alpha_factor = torch.where(torch.eq(targets, 1.), alpha_factor, 1. - alpha_factor)
focal_weight = torch.where(torch.eq(targets, 1.), 1. - classification, classification)
focal_weight = alpha_factor * torch.pow(focal_weight, gamma)
bce = -(targets * torch.log(classification) + (1.0 - targets) * torch.log(1.0 - classification))
cls_loss = focal_weight * bce
cls_loss = torch.where(torch.ne(targets, -1.0), cls_loss, torch.zeros(cls_loss.shape).cuda())
classification_losses.append(cls_loss.sum() / torch.clamp(num_positive_anchors.float(), min=1.0))
if positive_indices.sum() > 0:
assigned_annotations = assigned_annotations[positive_indices, :]
anchor_widths_pi = anchor_widths[positive_indices]
anchor_heights_pi = anchor_heights[positive_indices]
anchor_ctr_x_pi = anchor_ctr_x[positive_indices]
anchor_ctr_y_pi = anchor_ctr_y[positive_indices]
gt_widths = assigned_annotations[:, 2] - assigned_annotations[:, 0]
gt_heights = assigned_annotations[:, 3] - assigned_annotations[:, 1]
gt_ctr_x = assigned_annotations[:, 0] + 0.5 * gt_widths
gt_ctr_y = assigned_annotations[:, 1] + 0.5 * gt_heights
gt_widths = torch.clamp(gt_widths, min=1)
gt_heights = torch.clamp(gt_heights, min=1)
targets_dx = (gt_ctr_x - anchor_ctr_x_pi) / anchor_widths_pi
targets_dy = (gt_ctr_y - anchor_ctr_y_pi) / anchor_heights_pi
targets_dw = torch.log(gt_widths / anchor_widths_pi)
targets_dh = torch.log(gt_heights / anchor_heights_pi)
targets = torch.stack((targets_dx, targets_dy, targets_dw, targets_dh))
targets = targets.t()
targets = targets / torch.Tensor([[0.1, 0.1, 0.2, 0.2]]).cuda()
negative_indices = 1 - positive_indices
regression_diff = torch.abs(targets - regression[positive_indices, :])
regression_loss = torch.where(
torch.le(regression_diff, 1.0 / 9.0),
0.5 * 9.0 * torch.pow(regression_diff, 2),
regression_diff - 0.5 / 9.0
)
regression_losses.append(regression_loss.mean())
else:
regression_losses.append(torch.tensor(0).float().cuda())
return torch.stack(classification_losses).mean(dim=0, keepdim=True), torch.stack(regression_losses).mean(dim=0,
keepdim=True)
| true | true |
f73211257b07190070fdd8e364260ba0efeb2273 | 922 | py | Python | test/testCommentOptions.py | apiiro/lizard | 083c3a023559ddc9be44581a207bf5e16ce7e16b | [
"MIT"
] | null | null | null | test/testCommentOptions.py | apiiro/lizard | 083c3a023559ddc9be44581a207bf5e16ce7e16b | [
"MIT"
] | null | null | null | test/testCommentOptions.py | apiiro/lizard | 083c3a023559ddc9be44581a207bf5e16ce7e16b | [
"MIT"
] | null | null | null | import unittest
from .testHelpers import get_cpp_function_list
class TestCommentOptions(unittest.TestCase):
def test_function_with_comment_option_should_be_forgiven(self):
function_list = get_cpp_function_list("void foo(){/* #lizard forgives*/}")
self.assertEqual(0, len(function_list))
def test_function_with_comment_option_before_it_should_be_forgiven(self):
function_list = get_cpp_function_list("/* #lizard forgives*/void foo(){}")
self.assertEqual(0, len(function_list))
def test_function_after_comment_option_should_not_be_forgiven(self):
function_list = get_cpp_function_list("/* #lizard forgives*/void foo(){}void bar(){}")
self.assertEqual(1, len(function_list))
def test_generated_code_should_be_ignored(self):
function_list = get_cpp_function_list("/* GENERATED CODE */void foo(){}")
self.assertEqual(0, len(function_list))
| 40.086957 | 94 | 0.74295 | import unittest
from .testHelpers import get_cpp_function_list
class TestCommentOptions(unittest.TestCase):
def test_function_with_comment_option_should_be_forgiven(self):
function_list = get_cpp_function_list("void foo(){/* #lizard forgives*/}")
self.assertEqual(0, len(function_list))
def test_function_with_comment_option_before_it_should_be_forgiven(self):
function_list = get_cpp_function_list("/* #lizard forgives*/void foo(){}")
self.assertEqual(0, len(function_list))
def test_function_after_comment_option_should_not_be_forgiven(self):
function_list = get_cpp_function_list("/* #lizard forgives*/void foo(){}void bar(){}")
self.assertEqual(1, len(function_list))
def test_generated_code_should_be_ignored(self):
function_list = get_cpp_function_list("/* GENERATED CODE */void foo(){}")
self.assertEqual(0, len(function_list))
| true | true |
f7321235fc8b743ae4d4140233e088137b8036ee | 14,754 | py | Python | contextual_lenses/train_utils.py | googleinterns/protein-embedding-retrieval | be198b5f95d7b97a06ed04a6b131fc10573365fb | [
"Apache-2.0"
] | 31 | 2020-10-29T13:59:18.000Z | 2021-09-13T08:37:31.000Z | contextual_lenses/train_utils.py | amirshane/protein-embedding-retrieval | 388563d3206e1486fe5dbcfd8326be6f1185a00e | [
"Apache-2.0"
] | 7 | 2020-06-01T20:42:12.000Z | 2021-05-31T10:48:10.000Z | contextual_lenses/train_utils.py | amirshane/protein-embedding-retrieval | 388563d3206e1486fe5dbcfd8326be6f1185a00e | [
"Apache-2.0"
] | 7 | 2020-05-18T21:07:23.000Z | 2020-11-23T11:46:23.000Z | """Train utils
General tools for instantiating and training models.
"""
import flax
from flax import nn
from flax import optim
from flax.training import checkpoints
from flax.training import common_utils
import jax
from jax import random
import jax.nn
import jax.numpy as jnp
from jax.config import config
config.enable_omnistaging()
import tensorflow as tf
import numpy as np
import functools
import copy
from google_research.protein_lm import models
# Data batching.
def create_data_iterator(df,
input_col,
output_col,
batch_size,
epochs=1,
buffer_size=None,
seed=0,
drop_remainder=False,
add_outputs=True,
as_numpy=True):
"""Creates iterator of batches of (inputs) or (inputs, outputs)."""
if buffer_size is None:
buffer_size = len(df)
inputs = list(df[input_col].values)
inputs = tf.data.Dataset.from_tensor_slices(inputs)
outputs = df[output_col].values
outputs = tf.data.Dataset.from_tensor_slices(outputs)
if add_outputs:
batches = tf.data.Dataset.zip(
(inputs, outputs)).shuffle(buffer_size=buffer_size,
seed=seed,
reshuffle_each_iteration=True)
else:
batches = inputs.shuffle(buffer_size=buffer_size,
seed=seed,
reshuffle_each_iteration=True)
batches = batches.repeat(epochs).batch(batch_size=batch_size,
drop_remainder=drop_remainder)
if as_numpy:
batches = batches.as_numpy_iterator()
return batches
def path_inclusion_filter_fn(path, param, layer):
"""Returns whether or not layer name is contained in path."""
return layer in path
def create_optimizer(model, learning_rate, weight_decay, layers=None):
"""Instantiates Adam multi-optimizer."""
if layers is None:
assert (
type(learning_rate) == type(weight_decay) == float
), 'Specify float values for moded learning rate and weight decay!'
optimizer_def = optim.Adam(learning_rate=learning_rate,
weight_decay=weight_decay)
optimizer = optimizer_def.create(model)
else:
assert (
len(learning_rate) == len(weight_decay) == len(layers)
), 'Number of specified learning rates, weight decays, and layers must be equal!'
optimizers = []
for lr, wd, layer in zip(learning_rate, weight_decay, layers):
if lr > 0:
opt = optim.Adam(learning_rate=lr, weight_decay=wd)
filter_fn = functools.partial(path_inclusion_filter_fn,
layer=layer)
traversal = optim.ModelParamTraversal(filter_fn)
traversal_opt = (traversal, opt)
optimizers.append(traversal_opt)
optimizer_def = optim.MultiOptimizer(*optimizers)
optimizer = optimizer_def.create(model)
return optimizer
@functools.partial(jax.jit, static_argnums=(3, 4))
def train_step(optimizer, X, Y, loss_fn, loss_fn_kwargs):
"""Trains model (optimizer.target) using specified loss function."""
def compute_loss_fn(model, X, Y, loss_fn, loss_fn_kwargs):
Y_hat = model(X)
loss = loss_fn(Y, Y_hat, **loss_fn_kwargs)
return loss
grad_fn = jax.value_and_grad(compute_loss_fn)
_, grad = grad_fn(optimizer.target, X, Y, loss_fn, loss_fn_kwargs)
optimizer = optimizer.apply_gradient(grad)
return optimizer
def get_p_train_step():
"""Wraps train_step with jax.pmap."""
p_train_step = jax.pmap(train_step,
axis_name='batch',
static_broadcasted_argnums=(3, 4))
return p_train_step
def train(model,
train_data,
loss_fn,
loss_fn_kwargs,
learning_rate=1e-4,
weight_decay=0.1,
layers=None,
restore_dir=None,
save_dir=None,
use_pmap=False):
"""Instantiates optimizer, applies train_step/p_train_step over training data."""
optimizer = create_optimizer(model,
learning_rate=learning_rate,
weight_decay=weight_decay,
layers=layers)
if restore_dir is not None:
optimizer = checkpoints.restore_checkpoint(ckpt_dir=restore_dir,
target=optimizer)
if use_pmap:
p_train_step = get_p_train_step()
optimizer = optimizer.replicate()
for batch in iter(train_data):
X, Y = batch
X, Y = common_utils.shard(X), common_utils.shard(Y)
optimizer = p_train_step(optimizer, X, Y, loss_fn, loss_fn_kwargs)
optimizer = optimizer.unreplicate()
else:
for batch in iter(train_data):
X, Y = batch
optimizer = train_step(optimizer, X, Y, loss_fn, loss_fn_kwargs)
if save_dir is not None:
state = optimizer.state
if type(state) == list:
step = [sub_state.step for sub_state in state]
else:
step = state.step
checkpoints.save_checkpoint(ckpt_dir=save_dir,
target=optimizer,
step=step)
return optimizer
def load_params(params,
encoder_fn_params=None,
reduce_fn_params=None,
predict_fn_params=None):
"""Updates randomly initialized parameters using loaded parameters."""
loaded_params = copy.deepcopy(params)
fn_names = list(loaded_params.keys())
num_learnable_layers = len([
params_dict for params_dict in
[encoder_fn_params, reduce_fn_params, predict_fn_params]
if params_dict is not None
])
if encoder_fn_params is not None:
encoder_fn_ind = '_0'
if reduce_fn_params is not None:
reduce_fn_ind = '_1'
predict_fn_ind = '_2'
else:
predict_fn_ind = '_1'
else:
if reduce_fn_params is not None:
reduce_fn_ind = '_0'
predict_fn_ind = '_1'
else:
predict_fn_ind = '_0'
assert (len(loaded_params.keys()) >= num_learnable_layers
), 'Model encoder and lens architecture incorrectly specified!'
encoder_fn_name = None
if encoder_fn_params is not None:
for fn_name in fn_names:
if encoder_fn_ind in fn_name:
if encoder_fn_name is not None:
raise ValueError(
'Multiple instances of encoder_fn detected. %s' %
fn_name)
encoder_fn_name = fn_name
loaded_params[encoder_fn_name] = encoder_fn_params
reduce_fn_name = None
if reduce_fn_params is not None:
for fn_name in fn_names:
if reduce_fn_ind in fn_name:
if reduce_fn_name is not None:
raise ValueError(
'Multiple instances of reduce_fn detected. %s' %
fn_name)
reduce_fn_name = fn_name
loaded_params[reduce_fn_name] = reduce_fn_params
predict_fn_name = None
if predict_fn_params is not None:
for fn_name in fn_names:
if predict_fn_ind in fn_name:
if predict_fn_name is not None:
raise ValueError(
'Multiple instances of predict_fn detected. %s' %
fn_name)
predict_fn_name = fn_name
loaded_params[predict_fn_name] = predict_fn_params
return loaded_params
class RepresentationModel(nn.Module):
def apply(self,
x,
encoder_fn,
encoder_fn_kwargs,
reduce_fn,
reduce_fn_kwargs,
num_categories,
output_features,
output='prediction',
use_transformer=False,
padding_mask=None):
"""Computes padding mask, encodes indices using embeddings,
applies lensing operation, predicts scalar value.
"""
outputs = dict()
if padding_mask is None:
padding_mask = jnp.expand_dims(jnp.where(x < num_categories - 1, 1,
0),
axis=2)
if not use_transformer:
x = encoder_fn(x,
num_categories=num_categories,
**encoder_fn_kwargs)
else:
x = encoder_fn(x)
rep = reduce_fn(x, padding_mask=padding_mask, **reduce_fn_kwargs)
outputs['embedding'] = rep
out = nn.Dense(rep,
output_features,
kernel_init=nn.initializers.xavier_uniform(),
bias_init=nn.initializers.normal(stddev=1e-6))
outputs['prediction'] = out
return outputs[output]
def create_representation_model(encoder_fn,
encoder_fn_kwargs,
reduce_fn,
reduce_fn_kwargs,
num_categories,
output_features,
output='prediction',
key=random.PRNGKey(0),
encoder_fn_params=None,
reduce_fn_params=None,
predict_fn_params=None):
"""Instantiates a RepresentationModel object."""
module = RepresentationModel.partial(encoder_fn=encoder_fn,
encoder_fn_kwargs=encoder_fn_kwargs,
reduce_fn=reduce_fn,
reduce_fn_kwargs=reduce_fn_kwargs,
num_categories=num_categories,
output_features=output_features,
output=output,
use_transformer=False)
_, initial_params = RepresentationModel.init_by_shape(
key,
input_specs=[((1, 1), jnp.float32)],
encoder_fn=encoder_fn,
encoder_fn_kwargs=encoder_fn_kwargs,
reduce_fn=reduce_fn,
reduce_fn_kwargs=reduce_fn_kwargs,
num_categories=num_categories,
output_features=output_features,
output=output,
use_transformer=False)
loaded_params = load_params(initial_params, encoder_fn_params,
reduce_fn_params, predict_fn_params)
model = nn.Model(module, loaded_params)
return model
def create_transformer_representation_model(transformer_kwargs,
reduce_fn,
reduce_fn_kwargs,
num_categories,
output_features,
bidirectional=False,
output='prediction',
key=random.PRNGKey(0),
encoder_fn_params=None,
reduce_fn_params=None,
predict_fn_params=None):
"""Instantiates a RepresentationModel object with Transformer encoder."""
if not bidirectional:
transformer = models.FlaxLM(**transformer_kwargs)
else:
transformer = models.FlaxBERT(**transformer_kwargs)
transformer_optimizer = transformer._optimizer
transformer_model = models.jax_utils.unreplicate(
transformer_optimizer.target)
transformer_encoder = transformer_model.module.partial(
output_head='output_emb')
module = RepresentationModel.partial(encoder_fn=transformer_encoder,
encoder_fn_kwargs={},
reduce_fn=reduce_fn,
reduce_fn_kwargs=reduce_fn_kwargs,
num_categories=num_categories,
output_features=output_features,
output=output,
use_transformer=True)
_, initial_params = RepresentationModel.init_by_shape(
key,
input_specs=[((1, 1), jnp.float32)],
encoder_fn=transformer_encoder,
encoder_fn_kwargs={},
reduce_fn=reduce_fn,
reduce_fn_kwargs=reduce_fn_kwargs,
num_categories=num_categories,
output_features=output_features,
output=output,
use_transformer=True)
loaded_params = load_params(initial_params, encoder_fn_params,
reduce_fn_params, predict_fn_params)
model = nn.Model(module, loaded_params)
return model
def architecture_to_layers(encoder_fn_name, reduce_fn_name):
layers = []
no_trainable_encoder = False
if encoder_fn_name is None or encoder_fn_name == 'transformer':
layers.append('Transformer_0')
elif encoder_fn_name == 'one_hot':
no_trainable_encoder = True
elif encoder_fn_name == 'cnn_one_hot':
layers.append('CNN_0')
else:
raise ValueError('Incorrect encoder name specified.')
no_trainable_lens = False
if reduce_fn_name == 'mean_pool' or reduce_fn_name == 'max_pool':
no_trainable_lens = True
elif reduce_fn_name == 'linear_mean_pool' or reduce_fn_name == 'linear_max_pool':
if no_trainable_encoder:
layers.append('Dense_0')
else:
layers.append('Dense_1')
elif reduce_fn_name == 'gated_conv':
if no_trainable_encoder:
layers.append('GatedConv_0')
else:
layers.append('GatedConv_1')
else:
raise ValueError('Incorrect lens name specified.')
if no_trainable_encoder:
if no_trainable_lens:
layers.append('Dense_0')
else:
layers.append('Dense_1')
else:
if no_trainable_lens:
layers.append('Dense_1')
else:
layers.append('Dense_2')
trainable_encoder = not no_trainable_encoder
return layers, trainable_encoder
| 34.232019 | 89 | 0.560729 |
import flax
from flax import nn
from flax import optim
from flax.training import checkpoints
from flax.training import common_utils
import jax
from jax import random
import jax.nn
import jax.numpy as jnp
from jax.config import config
config.enable_omnistaging()
import tensorflow as tf
import numpy as np
import functools
import copy
from google_research.protein_lm import models
def create_data_iterator(df,
input_col,
output_col,
batch_size,
epochs=1,
buffer_size=None,
seed=0,
drop_remainder=False,
add_outputs=True,
as_numpy=True):
if buffer_size is None:
buffer_size = len(df)
inputs = list(df[input_col].values)
inputs = tf.data.Dataset.from_tensor_slices(inputs)
outputs = df[output_col].values
outputs = tf.data.Dataset.from_tensor_slices(outputs)
if add_outputs:
batches = tf.data.Dataset.zip(
(inputs, outputs)).shuffle(buffer_size=buffer_size,
seed=seed,
reshuffle_each_iteration=True)
else:
batches = inputs.shuffle(buffer_size=buffer_size,
seed=seed,
reshuffle_each_iteration=True)
batches = batches.repeat(epochs).batch(batch_size=batch_size,
drop_remainder=drop_remainder)
if as_numpy:
batches = batches.as_numpy_iterator()
return batches
def path_inclusion_filter_fn(path, param, layer):
return layer in path
def create_optimizer(model, learning_rate, weight_decay, layers=None):
if layers is None:
assert (
type(learning_rate) == type(weight_decay) == float
), 'Specify float values for moded learning rate and weight decay!'
optimizer_def = optim.Adam(learning_rate=learning_rate,
weight_decay=weight_decay)
optimizer = optimizer_def.create(model)
else:
assert (
len(learning_rate) == len(weight_decay) == len(layers)
), 'Number of specified learning rates, weight decays, and layers must be equal!'
optimizers = []
for lr, wd, layer in zip(learning_rate, weight_decay, layers):
if lr > 0:
opt = optim.Adam(learning_rate=lr, weight_decay=wd)
filter_fn = functools.partial(path_inclusion_filter_fn,
layer=layer)
traversal = optim.ModelParamTraversal(filter_fn)
traversal_opt = (traversal, opt)
optimizers.append(traversal_opt)
optimizer_def = optim.MultiOptimizer(*optimizers)
optimizer = optimizer_def.create(model)
return optimizer
@functools.partial(jax.jit, static_argnums=(3, 4))
def train_step(optimizer, X, Y, loss_fn, loss_fn_kwargs):
def compute_loss_fn(model, X, Y, loss_fn, loss_fn_kwargs):
Y_hat = model(X)
loss = loss_fn(Y, Y_hat, **loss_fn_kwargs)
return loss
grad_fn = jax.value_and_grad(compute_loss_fn)
_, grad = grad_fn(optimizer.target, X, Y, loss_fn, loss_fn_kwargs)
optimizer = optimizer.apply_gradient(grad)
return optimizer
def get_p_train_step():
p_train_step = jax.pmap(train_step,
axis_name='batch',
static_broadcasted_argnums=(3, 4))
return p_train_step
def train(model,
train_data,
loss_fn,
loss_fn_kwargs,
learning_rate=1e-4,
weight_decay=0.1,
layers=None,
restore_dir=None,
save_dir=None,
use_pmap=False):
optimizer = create_optimizer(model,
learning_rate=learning_rate,
weight_decay=weight_decay,
layers=layers)
if restore_dir is not None:
optimizer = checkpoints.restore_checkpoint(ckpt_dir=restore_dir,
target=optimizer)
if use_pmap:
p_train_step = get_p_train_step()
optimizer = optimizer.replicate()
for batch in iter(train_data):
X, Y = batch
X, Y = common_utils.shard(X), common_utils.shard(Y)
optimizer = p_train_step(optimizer, X, Y, loss_fn, loss_fn_kwargs)
optimizer = optimizer.unreplicate()
else:
for batch in iter(train_data):
X, Y = batch
optimizer = train_step(optimizer, X, Y, loss_fn, loss_fn_kwargs)
if save_dir is not None:
state = optimizer.state
if type(state) == list:
step = [sub_state.step for sub_state in state]
else:
step = state.step
checkpoints.save_checkpoint(ckpt_dir=save_dir,
target=optimizer,
step=step)
return optimizer
def load_params(params,
encoder_fn_params=None,
reduce_fn_params=None,
predict_fn_params=None):
loaded_params = copy.deepcopy(params)
fn_names = list(loaded_params.keys())
num_learnable_layers = len([
params_dict for params_dict in
[encoder_fn_params, reduce_fn_params, predict_fn_params]
if params_dict is not None
])
if encoder_fn_params is not None:
encoder_fn_ind = '_0'
if reduce_fn_params is not None:
reduce_fn_ind = '_1'
predict_fn_ind = '_2'
else:
predict_fn_ind = '_1'
else:
if reduce_fn_params is not None:
reduce_fn_ind = '_0'
predict_fn_ind = '_1'
else:
predict_fn_ind = '_0'
assert (len(loaded_params.keys()) >= num_learnable_layers
), 'Model encoder and lens architecture incorrectly specified!'
encoder_fn_name = None
if encoder_fn_params is not None:
for fn_name in fn_names:
if encoder_fn_ind in fn_name:
if encoder_fn_name is not None:
raise ValueError(
'Multiple instances of encoder_fn detected. %s' %
fn_name)
encoder_fn_name = fn_name
loaded_params[encoder_fn_name] = encoder_fn_params
reduce_fn_name = None
if reduce_fn_params is not None:
for fn_name in fn_names:
if reduce_fn_ind in fn_name:
if reduce_fn_name is not None:
raise ValueError(
'Multiple instances of reduce_fn detected. %s' %
fn_name)
reduce_fn_name = fn_name
loaded_params[reduce_fn_name] = reduce_fn_params
predict_fn_name = None
if predict_fn_params is not None:
for fn_name in fn_names:
if predict_fn_ind in fn_name:
if predict_fn_name is not None:
raise ValueError(
'Multiple instances of predict_fn detected. %s' %
fn_name)
predict_fn_name = fn_name
loaded_params[predict_fn_name] = predict_fn_params
return loaded_params
class RepresentationModel(nn.Module):
def apply(self,
x,
encoder_fn,
encoder_fn_kwargs,
reduce_fn,
reduce_fn_kwargs,
num_categories,
output_features,
output='prediction',
use_transformer=False,
padding_mask=None):
outputs = dict()
if padding_mask is None:
padding_mask = jnp.expand_dims(jnp.where(x < num_categories - 1, 1,
0),
axis=2)
if not use_transformer:
x = encoder_fn(x,
num_categories=num_categories,
**encoder_fn_kwargs)
else:
x = encoder_fn(x)
rep = reduce_fn(x, padding_mask=padding_mask, **reduce_fn_kwargs)
outputs['embedding'] = rep
out = nn.Dense(rep,
output_features,
kernel_init=nn.initializers.xavier_uniform(),
bias_init=nn.initializers.normal(stddev=1e-6))
outputs['prediction'] = out
return outputs[output]
def create_representation_model(encoder_fn,
encoder_fn_kwargs,
reduce_fn,
reduce_fn_kwargs,
num_categories,
output_features,
output='prediction',
key=random.PRNGKey(0),
encoder_fn_params=None,
reduce_fn_params=None,
predict_fn_params=None):
module = RepresentationModel.partial(encoder_fn=encoder_fn,
encoder_fn_kwargs=encoder_fn_kwargs,
reduce_fn=reduce_fn,
reduce_fn_kwargs=reduce_fn_kwargs,
num_categories=num_categories,
output_features=output_features,
output=output,
use_transformer=False)
_, initial_params = RepresentationModel.init_by_shape(
key,
input_specs=[((1, 1), jnp.float32)],
encoder_fn=encoder_fn,
encoder_fn_kwargs=encoder_fn_kwargs,
reduce_fn=reduce_fn,
reduce_fn_kwargs=reduce_fn_kwargs,
num_categories=num_categories,
output_features=output_features,
output=output,
use_transformer=False)
loaded_params = load_params(initial_params, encoder_fn_params,
reduce_fn_params, predict_fn_params)
model = nn.Model(module, loaded_params)
return model
def create_transformer_representation_model(transformer_kwargs,
reduce_fn,
reduce_fn_kwargs,
num_categories,
output_features,
bidirectional=False,
output='prediction',
key=random.PRNGKey(0),
encoder_fn_params=None,
reduce_fn_params=None,
predict_fn_params=None):
if not bidirectional:
transformer = models.FlaxLM(**transformer_kwargs)
else:
transformer = models.FlaxBERT(**transformer_kwargs)
transformer_optimizer = transformer._optimizer
transformer_model = models.jax_utils.unreplicate(
transformer_optimizer.target)
transformer_encoder = transformer_model.module.partial(
output_head='output_emb')
module = RepresentationModel.partial(encoder_fn=transformer_encoder,
encoder_fn_kwargs={},
reduce_fn=reduce_fn,
reduce_fn_kwargs=reduce_fn_kwargs,
num_categories=num_categories,
output_features=output_features,
output=output,
use_transformer=True)
_, initial_params = RepresentationModel.init_by_shape(
key,
input_specs=[((1, 1), jnp.float32)],
encoder_fn=transformer_encoder,
encoder_fn_kwargs={},
reduce_fn=reduce_fn,
reduce_fn_kwargs=reduce_fn_kwargs,
num_categories=num_categories,
output_features=output_features,
output=output,
use_transformer=True)
loaded_params = load_params(initial_params, encoder_fn_params,
reduce_fn_params, predict_fn_params)
model = nn.Model(module, loaded_params)
return model
def architecture_to_layers(encoder_fn_name, reduce_fn_name):
layers = []
no_trainable_encoder = False
if encoder_fn_name is None or encoder_fn_name == 'transformer':
layers.append('Transformer_0')
elif encoder_fn_name == 'one_hot':
no_trainable_encoder = True
elif encoder_fn_name == 'cnn_one_hot':
layers.append('CNN_0')
else:
raise ValueError('Incorrect encoder name specified.')
no_trainable_lens = False
if reduce_fn_name == 'mean_pool' or reduce_fn_name == 'max_pool':
no_trainable_lens = True
elif reduce_fn_name == 'linear_mean_pool' or reduce_fn_name == 'linear_max_pool':
if no_trainable_encoder:
layers.append('Dense_0')
else:
layers.append('Dense_1')
elif reduce_fn_name == 'gated_conv':
if no_trainable_encoder:
layers.append('GatedConv_0')
else:
layers.append('GatedConv_1')
else:
raise ValueError('Incorrect lens name specified.')
if no_trainable_encoder:
if no_trainable_lens:
layers.append('Dense_0')
else:
layers.append('Dense_1')
else:
if no_trainable_lens:
layers.append('Dense_1')
else:
layers.append('Dense_2')
trainable_encoder = not no_trainable_encoder
return layers, trainable_encoder
| true | true |
f73212a4f983365554182ac75a527d12007f372c | 130 | py | Python | DappurMake/core/__init__.py | DapperX/DappurMake | 48a9559e891890a3b797fdf8f51cc17d6daf56d3 | [
"BSD-2-Clause"
] | null | null | null | DappurMake/core/__init__.py | DapperX/DappurMake | 48a9559e891890a3b797fdf8f51cc17d6daf56d3 | [
"BSD-2-Clause"
] | null | null | null | DappurMake/core/__init__.py | DapperX/DappurMake | 48a9559e891890a3b797fdf8f51cc17d6daf56d3 | [
"BSD-2-Clause"
] | null | null | null | from .variable import variable
from .make import make
from .rule import rule
__all__ = ["variable", "make", "rule"]
print(dir()) | 18.571429 | 38 | 0.715385 | from .variable import variable
from .make import make
from .rule import rule
__all__ = ["variable", "make", "rule"]
print(dir()) | true | true |
f73212cf0f4772ea728789c0c196c35ce4427f28 | 6,015 | py | Python | tests/test_30_store.py | arista-netdevops-community/runAM | c461b0fada8ddb22ed1607eb5773cd6aef43dbf9 | [
"BSD-3-Clause"
] | null | null | null | tests/test_30_store.py | arista-netdevops-community/runAM | c461b0fada8ddb22ed1607eb5773cd6aef43dbf9 | [
"BSD-3-Clause"
] | 3 | 2021-01-15T08:06:41.000Z | 2021-02-17T13:23:11.000Z | tests/test_30_store.py | arista-netdevops-community/runAM | c461b0fada8ddb22ed1607eb5773cd6aef43dbf9 | [
"BSD-3-Clause"
] | null | null | null | import pytest
import os
import sys
import runAM
import json
# insert project directory to $PATH for imports to work
test_file = os.path.realpath(__file__)
test_dir = os.path.dirname(test_file)
project_dir = os.path.dirname(test_dir)
sys.path.append(project_dir)
bookstore_json = {"store": {
"book": [
{
"category": "reference",
"author": "Nigel Rees",
"title": "Sayings of the Century",
"price": 8.95,
},
{
"category": "fiction",
"author": "Evelyn Waugh",
"title": "Sword of Honour",
"price": 12.99,
},
{
"category": "fiction",
"author": "Herman Melville",
"title": "Moby Dick",
"isbn": "0-553-21311-3",
"price": 8.99,
"tags": ["adventure", "fiction", "1851"]
},
{
"category": "fiction",
"author": "J. R. R. Tolkien",
"title": "The Lord of the Rings",
"isbn": "0-395-19395-8",
"price": 22.99,
"tags": ["fantasy", "fiction", "1954"]
}
],
"bicycle": [
{
"color": "red",
"price": 19.95
}
]
}
}
def test_000_can_assert_true():
# before any test verify if PyTest is working and can assert True
assert True
def test_010_store_open_store_write():
# init store and confirm that we have write access
store = runAM.db.JSONStore(database_name='test_store', directory=os.path.join(project_dir, 'temp'))
assert store.write()
def test_020_drop_table():
# drop all tables in the document
all_tables_clean = True
store = runAM.db.JSONStore(database_name='test_store', directory=os.path.join(project_dir, 'temp'))
for table_name in store.db.keys():
table_content = store.drop_table(table_name)
if table_content: # if table is not empty, change the flag to false
all_tables_clean = False
store.write()
assert all_tables_clean
def test_030_insert_documents():
# insert documents into book and bicycle table
store = runAM.db.JSONStore(database_name='test_store', directory=os.path.join(project_dir, 'temp'))
book_doc_id_list = list()
bicycle_doc_id_list = list()
for book in bookstore_json['store']['book']:
doc_id = store.insert_doc(data=book, table_name='book')
book_doc_id_list.append(doc_id)
for bicycle in bookstore_json['store']['bicycle']:
doc_id = store.insert_doc(data=bicycle, doc_id='42', table_name='bicycle')
bicycle_doc_id_list.append(doc_id)
store.write()
assert (
book_doc_id_list == ['1', '2', '3', '4']
) and (
bicycle_doc_id_list == ['42']
)
def test_040_get_table():
# get table content
store = runAM.db.JSONStore(database_name='test_store', directory=os.path.join(project_dir, 'temp'))
assert store.table('bicycle') == {"42": {"color": "red", "price": 19.95}}
def test_060_jq():
# test basic jq query: find all books with tags
store = runAM.db.JSONStore(database_name='test_store', directory=os.path.join(project_dir, 'temp'))
value = store.jq(table_name='book', query_expression='..|select(.tags?!=null)')
assert value == [
{
"category": "fiction",
"author": "Herman Melville",
"title": "Moby Dick",
"isbn": "0-553-21311-3",
"price": 8.99,
"tags": ["adventure", "fiction", "1851"]
},
{
"category": "fiction",
"author": "J. R. R. Tolkien",
"title": "The Lord of the Rings",
"isbn": "0-395-19395-8",
"price": 22.99,
"tags": ["fantasy", "fiction", "1954"]
}
]
def test_070_jq_path():
# find the path to every value matched by jq
store = runAM.db.JSONStore(database_name='test_store', directory=os.path.join(project_dir, 'temp'))
path_list = store.jq_path(table_name='book', query_expression='..|select(.tags?!=null)')
assert path_list == [['3'],['4']]
def test_080_delete_doc():
# delete a document from a table
store = runAM.db.JSONStore(database_name='test_store', directory=os.path.join(project_dir, 'temp'))
deleted_docs_list = store.delete_doc(table_name='bicycle', doc_id='42')
store.write()
assert deleted_docs_list == ['42']
def test_090_get_value():
# find a value that corresponds to the path
store = runAM.db.JSONStore(database_name='test_store', directory=os.path.join(project_dir, 'temp'))
value = store.get_val(path_list=['4', 'tags', 0], table_name='book')
assert value == 'fantasy'
def test_100_update_path():
# update value in a table based on specified path
store = runAM.db.JSONStore(database_name='test_store', directory=os.path.join(project_dir, 'temp'))
updated_table = store.update_path(path=['4', 'tags', 2], data='year-1954', table_name='book')
store.write()
assert updated_table == {
"1": {
"category": "reference",
"author": "Nigel Rees",
"title": "Sayings of the Century",
"price": 8.95
},
"2": {
"category": "fiction",
"author": "Evelyn Waugh",
"title": "Sword of Honour",
"price": 12.99
},
"3": {
"category": "fiction",
"author": "Herman Melville",
"title": "Moby Dick",
"isbn": "0-553-21311-3",
"price": 8.99,
"tags": [
"adventure",
"fiction",
"1851"
]
},
"4": {
"category": "fiction",
"author": "J. R. R. Tolkien",
"title": "The Lord of the Rings",
"isbn": "0-395-19395-8",
"price": 22.99,
"tags": [
"fantasy",
"fiction",
"year-1954"
]
}
}
| 33.232044 | 103 | 0.556608 | import pytest
import os
import sys
import runAM
import json
test_file = os.path.realpath(__file__)
test_dir = os.path.dirname(test_file)
project_dir = os.path.dirname(test_dir)
sys.path.append(project_dir)
bookstore_json = {"store": {
"book": [
{
"category": "reference",
"author": "Nigel Rees",
"title": "Sayings of the Century",
"price": 8.95,
},
{
"category": "fiction",
"author": "Evelyn Waugh",
"title": "Sword of Honour",
"price": 12.99,
},
{
"category": "fiction",
"author": "Herman Melville",
"title": "Moby Dick",
"isbn": "0-553-21311-3",
"price": 8.99,
"tags": ["adventure", "fiction", "1851"]
},
{
"category": "fiction",
"author": "J. R. R. Tolkien",
"title": "The Lord of the Rings",
"isbn": "0-395-19395-8",
"price": 22.99,
"tags": ["fantasy", "fiction", "1954"]
}
],
"bicycle": [
{
"color": "red",
"price": 19.95
}
]
}
}
def test_000_can_assert_true():
assert True
def test_010_store_open_store_write():
store = runAM.db.JSONStore(database_name='test_store', directory=os.path.join(project_dir, 'temp'))
assert store.write()
def test_020_drop_table():
all_tables_clean = True
store = runAM.db.JSONStore(database_name='test_store', directory=os.path.join(project_dir, 'temp'))
for table_name in store.db.keys():
table_content = store.drop_table(table_name)
if table_content:
all_tables_clean = False
store.write()
assert all_tables_clean
def test_030_insert_documents():
store = runAM.db.JSONStore(database_name='test_store', directory=os.path.join(project_dir, 'temp'))
book_doc_id_list = list()
bicycle_doc_id_list = list()
for book in bookstore_json['store']['book']:
doc_id = store.insert_doc(data=book, table_name='book')
book_doc_id_list.append(doc_id)
for bicycle in bookstore_json['store']['bicycle']:
doc_id = store.insert_doc(data=bicycle, doc_id='42', table_name='bicycle')
bicycle_doc_id_list.append(doc_id)
store.write()
assert (
book_doc_id_list == ['1', '2', '3', '4']
) and (
bicycle_doc_id_list == ['42']
)
def test_040_get_table():
store = runAM.db.JSONStore(database_name='test_store', directory=os.path.join(project_dir, 'temp'))
assert store.table('bicycle') == {"42": {"color": "red", "price": 19.95}}
def test_060_jq():
store = runAM.db.JSONStore(database_name='test_store', directory=os.path.join(project_dir, 'temp'))
value = store.jq(table_name='book', query_expression='..|select(.tags?!=null)')
assert value == [
{
"category": "fiction",
"author": "Herman Melville",
"title": "Moby Dick",
"isbn": "0-553-21311-3",
"price": 8.99,
"tags": ["adventure", "fiction", "1851"]
},
{
"category": "fiction",
"author": "J. R. R. Tolkien",
"title": "The Lord of the Rings",
"isbn": "0-395-19395-8",
"price": 22.99,
"tags": ["fantasy", "fiction", "1954"]
}
]
def test_070_jq_path():
store = runAM.db.JSONStore(database_name='test_store', directory=os.path.join(project_dir, 'temp'))
path_list = store.jq_path(table_name='book', query_expression='..|select(.tags?!=null)')
assert path_list == [['3'],['4']]
def test_080_delete_doc():
store = runAM.db.JSONStore(database_name='test_store', directory=os.path.join(project_dir, 'temp'))
deleted_docs_list = store.delete_doc(table_name='bicycle', doc_id='42')
store.write()
assert deleted_docs_list == ['42']
def test_090_get_value():
store = runAM.db.JSONStore(database_name='test_store', directory=os.path.join(project_dir, 'temp'))
value = store.get_val(path_list=['4', 'tags', 0], table_name='book')
assert value == 'fantasy'
def test_100_update_path():
store = runAM.db.JSONStore(database_name='test_store', directory=os.path.join(project_dir, 'temp'))
updated_table = store.update_path(path=['4', 'tags', 2], data='year-1954', table_name='book')
store.write()
assert updated_table == {
"1": {
"category": "reference",
"author": "Nigel Rees",
"title": "Sayings of the Century",
"price": 8.95
},
"2": {
"category": "fiction",
"author": "Evelyn Waugh",
"title": "Sword of Honour",
"price": 12.99
},
"3": {
"category": "fiction",
"author": "Herman Melville",
"title": "Moby Dick",
"isbn": "0-553-21311-3",
"price": 8.99,
"tags": [
"adventure",
"fiction",
"1851"
]
},
"4": {
"category": "fiction",
"author": "J. R. R. Tolkien",
"title": "The Lord of the Rings",
"isbn": "0-395-19395-8",
"price": 22.99,
"tags": [
"fantasy",
"fiction",
"year-1954"
]
}
}
| true | true |
f732135ed8fd5cb28d543d0c92d4201d25d4de6a | 5,547 | py | Python | algorand/TmplSig.py | cryptorites-scotti/wormhole | 2e220a6f76a1ec03364fa2fac2e571b9824744f8 | [
"Apache-2.0"
] | null | null | null | algorand/TmplSig.py | cryptorites-scotti/wormhole | 2e220a6f76a1ec03364fa2fac2e571b9824744f8 | [
"Apache-2.0"
] | 36 | 2022-02-21T13:31:14.000Z | 2022-03-28T04:47:23.000Z | algorand/TmplSig.py | cryptorites-scotti/wormhole | 2e220a6f76a1ec03364fa2fac2e571b9824744f8 | [
"Apache-2.0"
] | null | null | null | from time import time, sleep
from typing import List, Tuple, Dict, Any, Optional, Union
from base64 import b64decode
import base64
import random
import hashlib
import uuid
import sys
import json
import uvarint
import pprint
from local_blob import LocalBlob
from algosdk.v2client.algod import AlgodClient
from algosdk.kmd import KMDClient
from algosdk import account, mnemonic
from algosdk.encoding import decode_address
from algosdk.future import transaction
from pyteal import compileTeal, Mode, Expr
from pyteal import *
from algosdk.logic import get_application_address
from algosdk.future.transaction import LogicSigAccount
class TmplSig:
"""KeySig class reads in a json map containing assembly details of a template smart signature and allows you to populate it with the variables
In this case we are only interested in a single variable, the key which is a byte string to make the address unique.
In this demo we're using random strings but in practice you can choose something meaningful to your application
"""
def __init__(self, name):
# Read the source map
# with open("{}.json".format(name)) as f:
# self.map = json.loads(f.read())
self.map = {"name":"lsig.teal","version":6,"source":"","bytecode":"BiABAYEASIAASDEQgQYSRDEZIhJEMRiBABJEMSCAABJEMQGBABJEMQkyAxJEMRUyAxJEIg==",
"template_labels":{
"TMPL_ADDR_IDX":{"source_line":3,"position":5,"bytes":False},
"TMPL_EMITTER_ID":{"source_line":5,"position":8,"bytes":True},
"TMPL_APP_ID":{"source_line":16,"position":24,"bytes":False},
"TMPL_APP_ADDRESS":{"source_line":20,"position":30,"bytes":True}
},
"label_map":{},"line_map":[0,1,4,6,7,9,10,12,14,15,16,18,19,20,21,23,25,26,27,29,31,32,33,35,37,38,39,41,43,44,45,47,49,50,51]
}
self.src = base64.b64decode(self.map["bytecode"])
self.sorted = dict(
sorted(
self.map["template_labels"].items(),
key=lambda item: item[1]["position"],
)
)
def populate(self, values: Dict[str, Union[str, int]]) -> LogicSigAccount:
"""populate uses the map to fill in the variable of the bytecode and returns a logic sig with the populated bytecode"""
# Get the template source
contract = list(base64.b64decode(self.map["bytecode"]))
shift = 0
for k, v in self.sorted.items():
if k in values:
pos = v["position"] + shift
if v["bytes"]:
val = bytes.fromhex(values[k])
lbyte = uvarint.encode(len(val))
# -1 to account for the existing 00 byte for length
shift += (len(lbyte) - 1) + len(val)
# +1 to overwrite the existing 00 byte for length
contract[pos : pos + 1] = lbyte + val
else:
val = uvarint.encode(values[k])
# -1 to account for existing 00 byte
shift += len(val) - 1
# +1 to overwrite existing 00 byte
contract[pos : pos + 1] = val
# Create a new LogicSigAccount given the populated bytecode,
#pprint.pprint({"values": values, "contract": bytes(contract).hex()})
return LogicSigAccount(bytes(contract))
def get_bytecode_chunk(self, idx: int) -> Bytes:
start = 0
if idx > 0:
start = list(self.sorted.values())[idx - 1]["position"] + 1
stop = len(self.src)
if idx < len(self.sorted):
stop = list(self.sorted.values())[idx]["position"]
chunk = self.src[start:stop]
return Bytes(chunk)
def get_bytecode_raw(self, idx: int):
start = 0
if idx > 0:
start = list(self.sorted.values())[idx - 1]["position"] + 1
stop = len(self.src)
if idx < len(self.sorted):
stop = list(self.sorted.values())[idx]["position"]
chunk = self.src[start:stop]
return chunk
def get_sig_tmpl(self):
def sig_tmpl():
admin_app_id = ScratchVar()
admin_address= ScratchVar()
return Seq(
# Just putting adding this as a tmpl var to make the address unique and deterministic
# We don't actually care what the value is, pop it
Pop(Tmpl.Int("TMPL_ADDR_IDX")),
Pop(Tmpl.Bytes("TMPL_EMITTER_ID")),
Assert(Txn.type_enum() == TxnType.ApplicationCall),
Assert(Txn.on_completion() == OnComplete.OptIn),
Assert(Txn.application_id() == Tmpl.Int("TMPL_APP_ID")),
Assert(Txn.rekey_to() == Tmpl.Bytes("TMPL_APP_ADDRESS")),
Assert(Txn.fee() == Int(0)),
Assert(Txn.close_remainder_to() == Global.zero_address()),
Assert(Txn.asset_close_to() == Global.zero_address()),
Approve()
)
return compileTeal(sig_tmpl(), mode=Mode.Signature, version=6, assembleConstants=True)
if __name__ == '__main__':
core = TmplSig("sig")
# client = AlgodClient("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "http://localhost:4001")
# pprint.pprint(client.compile( core.get_sig_tmpl()))
with open("sig.tmpl.teal", "w") as f:
f.write(core.get_sig_tmpl())
| 38.520833 | 149 | 0.589508 | from time import time, sleep
from typing import List, Tuple, Dict, Any, Optional, Union
from base64 import b64decode
import base64
import random
import hashlib
import uuid
import sys
import json
import uvarint
import pprint
from local_blob import LocalBlob
from algosdk.v2client.algod import AlgodClient
from algosdk.kmd import KMDClient
from algosdk import account, mnemonic
from algosdk.encoding import decode_address
from algosdk.future import transaction
from pyteal import compileTeal, Mode, Expr
from pyteal import *
from algosdk.logic import get_application_address
from algosdk.future.transaction import LogicSigAccount
class TmplSig:
def __init__(self, name):
self.map = {"name":"lsig.teal","version":6,"source":"","bytecode":"BiABAYEASIAASDEQgQYSRDEZIhJEMRiBABJEMSCAABJEMQGBABJEMQkyAxJEMRUyAxJEIg==",
"template_labels":{
"TMPL_ADDR_IDX":{"source_line":3,"position":5,"bytes":False},
"TMPL_EMITTER_ID":{"source_line":5,"position":8,"bytes":True},
"TMPL_APP_ID":{"source_line":16,"position":24,"bytes":False},
"TMPL_APP_ADDRESS":{"source_line":20,"position":30,"bytes":True}
},
"label_map":{},"line_map":[0,1,4,6,7,9,10,12,14,15,16,18,19,20,21,23,25,26,27,29,31,32,33,35,37,38,39,41,43,44,45,47,49,50,51]
}
self.src = base64.b64decode(self.map["bytecode"])
self.sorted = dict(
sorted(
self.map["template_labels"].items(),
key=lambda item: item[1]["position"],
)
)
def populate(self, values: Dict[str, Union[str, int]]) -> LogicSigAccount:
contract = list(base64.b64decode(self.map["bytecode"]))
shift = 0
for k, v in self.sorted.items():
if k in values:
pos = v["position"] + shift
if v["bytes"]:
val = bytes.fromhex(values[k])
lbyte = uvarint.encode(len(val))
shift += (len(lbyte) - 1) + len(val)
contract[pos : pos + 1] = lbyte + val
else:
val = uvarint.encode(values[k])
shift += len(val) - 1
contract[pos : pos + 1] = val
return LogicSigAccount(bytes(contract))
def get_bytecode_chunk(self, idx: int) -> Bytes:
start = 0
if idx > 0:
start = list(self.sorted.values())[idx - 1]["position"] + 1
stop = len(self.src)
if idx < len(self.sorted):
stop = list(self.sorted.values())[idx]["position"]
chunk = self.src[start:stop]
return Bytes(chunk)
def get_bytecode_raw(self, idx: int):
start = 0
if idx > 0:
start = list(self.sorted.values())[idx - 1]["position"] + 1
stop = len(self.src)
if idx < len(self.sorted):
stop = list(self.sorted.values())[idx]["position"]
chunk = self.src[start:stop]
return chunk
def get_sig_tmpl(self):
def sig_tmpl():
admin_app_id = ScratchVar()
admin_address= ScratchVar()
return Seq(
Pop(Tmpl.Int("TMPL_ADDR_IDX")),
Pop(Tmpl.Bytes("TMPL_EMITTER_ID")),
Assert(Txn.type_enum() == TxnType.ApplicationCall),
Assert(Txn.on_completion() == OnComplete.OptIn),
Assert(Txn.application_id() == Tmpl.Int("TMPL_APP_ID")),
Assert(Txn.rekey_to() == Tmpl.Bytes("TMPL_APP_ADDRESS")),
Assert(Txn.fee() == Int(0)),
Assert(Txn.close_remainder_to() == Global.zero_address()),
Assert(Txn.asset_close_to() == Global.zero_address()),
Approve()
)
return compileTeal(sig_tmpl(), mode=Mode.Signature, version=6, assembleConstants=True)
if __name__ == '__main__':
core = TmplSig("sig")
# client = AlgodClient("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "http://localhost:4001")
# pprint.pprint(client.compile( core.get_sig_tmpl()))
with open("sig.tmpl.teal", "w") as f:
f.write(core.get_sig_tmpl())
| true | true |
f7321391696b0402752b89e18571a7b90492e6b0 | 1,713 | py | Python | date_sniff/tests.py | nanvel/date-sniff | 5a142861656b985fdbb9029cdf4a541455e16b9a | [
"MIT"
] | null | null | null | date_sniff/tests.py | nanvel/date-sniff | 5a142861656b985fdbb9029cdf4a541455e16b9a | [
"MIT"
] | null | null | null | date_sniff/tests.py | nanvel/date-sniff | 5a142861656b985fdbb9029cdf4a541455e16b9a | [
"MIT"
] | null | null | null | from date_sniff.sniffer import DateSniffer
def test_years_separation():
sniffer = DateSniffer(year=2019)
assert sniffer.sniff('2019') == {'2019': []}
assert sniffer.sniff('prefix 2019 and long text') == {'prefix 2019 and long text': []}
res = {'prefix 2019 and long text another 2019': []}
assert sniffer.sniff('prefix 2019 and long text another 2019') == res
assert sniffer.sniff('2019 two 2019') == {'2019 two 2019': []}
def test_month_search():
sniffer = DateSniffer(year=2019, month=1)
assert sniffer.sniff('prefix 2019') == {}
assert sniffer.sniff('prefix January 2019') == {'prefix January 2019': []}
assert sniffer.sniff('prefix 2019-01-10') == {'prefix 2019-01-10': [10]}
sniffer = DateSniffer(year=2019, month=3)
res = sniffer.sniff('EXPANSION PLAN Germany Finland Denmark 2019 Norway Egypt UAE France Spain 2021')
assert res == {}
res = sniffer.sniff('EXPANSION PLAN Germany Finland March. 2019 Norway Egypt UAE France Spain 2021')
assert res == {'EXPANSION PLAN Germany Finland March. 2019 Norway Egypt UAE France Spain 2021': []}
def test_find_isolated():
sniffer = DateSniffer(year=2019, month=3)
res = sniffer.find_isolated('10', '2019-03-04 101')
assert res == []
def test_keyword_search():
sniffer = DateSniffer(year=2019, month=1, keyword='test')
assert sniffer.sniff('prefix 2019-01-10') == {}
print(sniffer.sniff('prefix 2019-01-10 test'))
assert sniffer.sniff('prefix 2019-01-10 test') == {'prefix 2019-01-10 test': [10]}
def test_days():
sniffer = DateSniffer(year=2019, month=3)
res = sniffer.sniff('2019-03-04 101')
assert res == {'2019-03-04 101': [4]}
| 39.837209 | 114 | 0.663164 | from date_sniff.sniffer import DateSniffer
def test_years_separation():
sniffer = DateSniffer(year=2019)
assert sniffer.sniff('2019') == {'2019': []}
assert sniffer.sniff('prefix 2019 and long text') == {'prefix 2019 and long text': []}
res = {'prefix 2019 and long text another 2019': []}
assert sniffer.sniff('prefix 2019 and long text another 2019') == res
assert sniffer.sniff('2019 two 2019') == {'2019 two 2019': []}
def test_month_search():
sniffer = DateSniffer(year=2019, month=1)
assert sniffer.sniff('prefix 2019') == {}
assert sniffer.sniff('prefix January 2019') == {'prefix January 2019': []}
assert sniffer.sniff('prefix 2019-01-10') == {'prefix 2019-01-10': [10]}
sniffer = DateSniffer(year=2019, month=3)
res = sniffer.sniff('EXPANSION PLAN Germany Finland Denmark 2019 Norway Egypt UAE France Spain 2021')
assert res == {}
res = sniffer.sniff('EXPANSION PLAN Germany Finland March. 2019 Norway Egypt UAE France Spain 2021')
assert res == {'EXPANSION PLAN Germany Finland March. 2019 Norway Egypt UAE France Spain 2021': []}
def test_find_isolated():
sniffer = DateSniffer(year=2019, month=3)
res = sniffer.find_isolated('10', '2019-03-04 101')
assert res == []
def test_keyword_search():
sniffer = DateSniffer(year=2019, month=1, keyword='test')
assert sniffer.sniff('prefix 2019-01-10') == {}
print(sniffer.sniff('prefix 2019-01-10 test'))
assert sniffer.sniff('prefix 2019-01-10 test') == {'prefix 2019-01-10 test': [10]}
def test_days():
sniffer = DateSniffer(year=2019, month=3)
res = sniffer.sniff('2019-03-04 101')
assert res == {'2019-03-04 101': [4]}
| true | true |
f73216568bc0065366c2d6d33f8c92b81662b090 | 6,286 | py | Python | lte/gateway/python/scripts/config_stateless_agw.py | saurabhsoni88/magma | 4236c9d8edb7bd203707ff7e861b1f7c12fb84c7 | [
"BSD-3-Clause"
] | null | null | null | lte/gateway/python/scripts/config_stateless_agw.py | saurabhsoni88/magma | 4236c9d8edb7bd203707ff7e861b1f7c12fb84c7 | [
"BSD-3-Clause"
] | null | null | null | lte/gateway/python/scripts/config_stateless_agw.py | saurabhsoni88/magma | 4236c9d8edb7bd203707ff7e861b1f7c12fb84c7 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Script to trigger pre and post start commands for the Sctpd systemd unit
"""
import argparse
import os
import subprocess
import sys
import shlex
import time
from enum import Enum
from magma.configuration.service_configs import (
load_override_config,
load_service_config,
save_override_config,
)
return_codes = Enum(
"return_codes", "STATELESS STATEFUL CORRUPT INVALID", start=0
)
STATELESS_SERVICE_CONFIGS = [
("mme", "use_stateless", True),
("mobilityd", "persist_to_redis", True),
("pipelined", "clean_restart", False),
("pipelined", "redis_enabled", True),
("sessiond", "support_stateless", True),
]
def check_stateless_service_config(service, config_name, config_value):
service_config = load_service_config(service)
if service_config.get(config_name) == config_value:
print("STATELESS\t%s -> %s" % (service, config_name))
return return_codes.STATELESS
print("STATEFUL\t%s -> %s" % (service, config_name))
return return_codes.STATEFUL
def check_stateless_services():
num_stateful = 0
for service, config, value in STATELESS_SERVICE_CONFIGS:
if (
check_stateless_service_config(service, config, value)
== return_codes.STATEFUL
):
num_stateful += 1
if num_stateful == 0:
res = return_codes.STATELESS
elif num_stateful == len(STATELESS_SERVICE_CONFIGS):
res = return_codes.STATEFUL
else:
res = return_codes.CORRUPT
print("Check returning", res)
return res
def check_stateless_agw():
sys.exit(check_stateless_services().value)
def clear_redis_state():
if os.getuid() != 0:
print("Need to run as root to clear Redis state.")
sys.exit(return_codes.INVALID)
# stop MME, which in turn stops mobilityd, pipelined and sessiond
subprocess.call("service magma@mme stop".split())
# delete all keys from Redis which capture service state
for key_regex in [
"*_state",
"IMSI*",
"mobilityd:assigned_ip_blocks",
"mobilityd:ip_states:*",
"NO_VLAN:mobilityd_gw_info",
"QosManager",
"s1ap_imsi_map",
]:
redis_cmd = (
"redis-cli -p 6380 KEYS '"
+ key_regex
+ "' | xargs redis-cli -p 6380 DEL"
)
subprocess.call(
shlex.split(redis_cmd),
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
def flushall_redis():
if os.getuid() != 0:
print("Need to run as root to clear Redis state.")
sys.exit(return_codes.INVALID)
print("Flushing all content in Redis")
subprocess.call("service magma@* stop".split())
subprocess.call("service magma@redis start".split())
subprocess.call("redis-cli -p 6380 flushall".split())
subprocess.call("service magma@redis stop".split())
def start_magmad():
if os.getuid() != 0:
print("Need to run as root to start magmad.")
sys.exit(return_codes.INVALID)
subprocess.call("service magma@magmad start".split())
def restart_sctpd():
if os.getuid() != 0:
print("Need to run as root to restart sctpd.")
sys.exit(return_codes.INVALID)
print("Restarting sctpd")
subprocess.call("service sctpd restart".split())
# delay return after restarting so that Magma and OVS services come up
time.sleep(30)
def enable_stateless_agw():
if check_stateless_services() == return_codes.STATELESS:
print("Nothing to enable, AGW is stateless")
sys.exit(return_codes.STATELESS.value)
for service, config, value in STATELESS_SERVICE_CONFIGS:
cfg = load_override_config(service) or {}
cfg[config] = value
save_override_config(service, cfg)
# restart Sctpd so that eNB connections are reset and local state cleared
restart_sctpd()
sys.exit(check_stateless_services().value)
def disable_stateless_agw():
if check_stateless_services() == return_codes.STATEFUL:
print("Nothing to disable, AGW is stateful")
sys.exit(return_codes.STATEFUL.value)
for service, config, value in STATELESS_SERVICE_CONFIGS:
cfg = load_override_config(service) or {}
# remove the stateless override
cfg.pop(config, None)
save_override_config(service, cfg)
# restart Sctpd so that eNB connections are reset and local state cleared
restart_sctpd()
sys.exit(check_stateless_services().value)
def sctpd_pre_start():
if check_stateless_services() == return_codes.STATEFUL:
# switching from stateless to stateful
print("AGW is stateful, nothing to be done")
else:
clear_redis_state()
sys.exit(0)
def sctpd_post_start():
subprocess.Popen("/bin/systemctl start magma@mme".split())
subprocess.Popen("/bin/systemctl start magma@pipelined".split())
subprocess.Popen("/bin/systemctl start magma@sessiond".split())
subprocess.Popen("/bin/systemctl start magma@mobilityd".split())
sys.exit(0)
def clear_redis_and_restart():
clear_redis_state()
sctpd_post_start()
sys.exit(0)
def flushall_redis_and_restart():
flushall_redis()
start_magmad()
restart_sctpd()
sys.exit(0)
STATELESS_FUNC_DICT = {
"check": check_stateless_agw,
"enable": enable_stateless_agw,
"disable": disable_stateless_agw,
"sctpd_pre": sctpd_pre_start,
"sctpd_post": sctpd_post_start,
"clear_redis": clear_redis_and_restart,
"flushall_redis": flushall_redis_and_restart,
}
def main():
parser = argparse.ArgumentParser()
parser.add_argument("command", choices=STATELESS_FUNC_DICT.keys())
args = parser.parse_args()
func = STATELESS_FUNC_DICT[args.command]
func()
if __name__ == "__main__":
main()
| 28.703196 | 77 | 0.685173 |
import argparse
import os
import subprocess
import sys
import shlex
import time
from enum import Enum
from magma.configuration.service_configs import (
load_override_config,
load_service_config,
save_override_config,
)
return_codes = Enum(
"return_codes", "STATELESS STATEFUL CORRUPT INVALID", start=0
)
STATELESS_SERVICE_CONFIGS = [
("mme", "use_stateless", True),
("mobilityd", "persist_to_redis", True),
("pipelined", "clean_restart", False),
("pipelined", "redis_enabled", True),
("sessiond", "support_stateless", True),
]
def check_stateless_service_config(service, config_name, config_value):
service_config = load_service_config(service)
if service_config.get(config_name) == config_value:
print("STATELESS\t%s -> %s" % (service, config_name))
return return_codes.STATELESS
print("STATEFUL\t%s -> %s" % (service, config_name))
return return_codes.STATEFUL
def check_stateless_services():
num_stateful = 0
for service, config, value in STATELESS_SERVICE_CONFIGS:
if (
check_stateless_service_config(service, config, value)
== return_codes.STATEFUL
):
num_stateful += 1
if num_stateful == 0:
res = return_codes.STATELESS
elif num_stateful == len(STATELESS_SERVICE_CONFIGS):
res = return_codes.STATEFUL
else:
res = return_codes.CORRUPT
print("Check returning", res)
return res
def check_stateless_agw():
sys.exit(check_stateless_services().value)
def clear_redis_state():
if os.getuid() != 0:
print("Need to run as root to clear Redis state.")
sys.exit(return_codes.INVALID)
subprocess.call("service magma@mme stop".split())
for key_regex in [
"*_state",
"IMSI*",
"mobilityd:assigned_ip_blocks",
"mobilityd:ip_states:*",
"NO_VLAN:mobilityd_gw_info",
"QosManager",
"s1ap_imsi_map",
]:
redis_cmd = (
"redis-cli -p 6380 KEYS '"
+ key_regex
+ "' | xargs redis-cli -p 6380 DEL"
)
subprocess.call(
shlex.split(redis_cmd),
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
def flushall_redis():
if os.getuid() != 0:
print("Need to run as root to clear Redis state.")
sys.exit(return_codes.INVALID)
print("Flushing all content in Redis")
subprocess.call("service magma@* stop".split())
subprocess.call("service magma@redis start".split())
subprocess.call("redis-cli -p 6380 flushall".split())
subprocess.call("service magma@redis stop".split())
def start_magmad():
if os.getuid() != 0:
print("Need to run as root to start magmad.")
sys.exit(return_codes.INVALID)
subprocess.call("service magma@magmad start".split())
def restart_sctpd():
if os.getuid() != 0:
print("Need to run as root to restart sctpd.")
sys.exit(return_codes.INVALID)
print("Restarting sctpd")
subprocess.call("service sctpd restart".split())
time.sleep(30)
def enable_stateless_agw():
if check_stateless_services() == return_codes.STATELESS:
print("Nothing to enable, AGW is stateless")
sys.exit(return_codes.STATELESS.value)
for service, config, value in STATELESS_SERVICE_CONFIGS:
cfg = load_override_config(service) or {}
cfg[config] = value
save_override_config(service, cfg)
restart_sctpd()
sys.exit(check_stateless_services().value)
def disable_stateless_agw():
if check_stateless_services() == return_codes.STATEFUL:
print("Nothing to disable, AGW is stateful")
sys.exit(return_codes.STATEFUL.value)
for service, config, value in STATELESS_SERVICE_CONFIGS:
cfg = load_override_config(service) or {}
cfg.pop(config, None)
save_override_config(service, cfg)
restart_sctpd()
sys.exit(check_stateless_services().value)
def sctpd_pre_start():
if check_stateless_services() == return_codes.STATEFUL:
print("AGW is stateful, nothing to be done")
else:
clear_redis_state()
sys.exit(0)
def sctpd_post_start():
subprocess.Popen("/bin/systemctl start magma@mme".split())
subprocess.Popen("/bin/systemctl start magma@pipelined".split())
subprocess.Popen("/bin/systemctl start magma@sessiond".split())
subprocess.Popen("/bin/systemctl start magma@mobilityd".split())
sys.exit(0)
def clear_redis_and_restart():
clear_redis_state()
sctpd_post_start()
sys.exit(0)
def flushall_redis_and_restart():
flushall_redis()
start_magmad()
restart_sctpd()
sys.exit(0)
STATELESS_FUNC_DICT = {
"check": check_stateless_agw,
"enable": enable_stateless_agw,
"disable": disable_stateless_agw,
"sctpd_pre": sctpd_pre_start,
"sctpd_post": sctpd_post_start,
"clear_redis": clear_redis_and_restart,
"flushall_redis": flushall_redis_and_restart,
}
def main():
parser = argparse.ArgumentParser()
parser.add_argument("command", choices=STATELESS_FUNC_DICT.keys())
args = parser.parse_args()
func = STATELESS_FUNC_DICT[args.command]
func()
if __name__ == "__main__":
main()
| true | true |
f732168707cd1e2c13f9b34495a0213b31ec4c9d | 2,164 | py | Python | flow/benchmarks/rllib/ars_runner.py | berkeleyflow/flow | bed5ec959aaf0eaa8dbc7fa03f0c3fd3f0184b80 | [
"MIT"
] | 16 | 2018-05-25T06:30:28.000Z | 2020-08-08T00:03:47.000Z | flow/benchmarks/rllib/ars_runner.py | berkeleyflow/flow | bed5ec959aaf0eaa8dbc7fa03f0c3fd3f0184b80 | [
"MIT"
] | 46 | 2018-05-22T21:32:55.000Z | 2019-06-12T13:10:02.000Z | flow/benchmarks/rllib/ars_runner.py | berkeleyflow/flow | bed5ec959aaf0eaa8dbc7fa03f0c3fd3f0184b80 | [
"MIT"
] | 6 | 2018-06-22T14:59:14.000Z | 2019-08-29T06:00:34.000Z | """
Runner script for environments located in flow/benchmarks.
The environment file can be modified in the imports to change the environment
this runner script is executed on. Furthermore, the rllib specific algorithm/
parameters can be specified here once and used on multiple environments.
"""
import json
import ray
import ray.rllib.ars as ars
from ray.tune import run_experiments, grid_search
from ray.tune.registry import register_env
from flow.utils.registry import make_create_env
from flow.utils.rllib import FlowParamsEncoder
# use this to specify the environment to run
from flow.benchmarks.figureeight2 import flow_params
# number of rollouts per training iteration
N_ROLLOUTS = 25
# number of parallel workers
PARALLEL_ROLLOUTS = 25
if __name__ == "__main__":
# get the env name and a creator for the environment
create_env, env_name = make_create_env(params=flow_params, version=0)
# initialize a ray instance
ray.init(redis_address="localhost:6379", redirect_output=True)
config = ars.DEFAULT_CONFIG.copy()
config["num_workers"] = PARALLEL_ROLLOUTS
config["num_deltas"] = N_ROLLOUTS
config["deltas_used"] = grid_search([25, 50])
config["sgd_stepsize"] = .01
config["delta_std"] = grid_search([.01, .02])
config['policy'] = 'Linear'
config["observation_filter"] = "NoFilter"
config['eval_rollouts'] = PARALLEL_ROLLOUTS
# save the flow params for replay
flow_json = json.dumps(flow_params, cls=FlowParamsEncoder, sort_keys=True,
indent=4)
config['env_config']['flow_params'] = flow_json
# Register as rllib env
register_env(env_name, create_env)
trials = run_experiments({
flow_params["exp_tag"]: {
"run": "ARS",
"env": env_name,
"config": {
**config
},
"checkpoint_freq": 5,
"max_failures": 999,
"stop": {"training_iteration": 500},
"repeat": 3,
"trial_resources": {
"cpu": 1,
"gpu": 0,
"extra_cpu": PARALLEL_ROLLOUTS - 1,
},
},
})
| 30.914286 | 78 | 0.657116 | import json
import ray
import ray.rllib.ars as ars
from ray.tune import run_experiments, grid_search
from ray.tune.registry import register_env
from flow.utils.registry import make_create_env
from flow.utils.rllib import FlowParamsEncoder
from flow.benchmarks.figureeight2 import flow_params
N_ROLLOUTS = 25
PARALLEL_ROLLOUTS = 25
if __name__ == "__main__":
create_env, env_name = make_create_env(params=flow_params, version=0)
ray.init(redis_address="localhost:6379", redirect_output=True)
config = ars.DEFAULT_CONFIG.copy()
config["num_workers"] = PARALLEL_ROLLOUTS
config["num_deltas"] = N_ROLLOUTS
config["deltas_used"] = grid_search([25, 50])
config["sgd_stepsize"] = .01
config["delta_std"] = grid_search([.01, .02])
config['policy'] = 'Linear'
config["observation_filter"] = "NoFilter"
config['eval_rollouts'] = PARALLEL_ROLLOUTS
flow_json = json.dumps(flow_params, cls=FlowParamsEncoder, sort_keys=True,
indent=4)
config['env_config']['flow_params'] = flow_json
register_env(env_name, create_env)
trials = run_experiments({
flow_params["exp_tag"]: {
"run": "ARS",
"env": env_name,
"config": {
**config
},
"checkpoint_freq": 5,
"max_failures": 999,
"stop": {"training_iteration": 500},
"repeat": 3,
"trial_resources": {
"cpu": 1,
"gpu": 0,
"extra_cpu": PARALLEL_ROLLOUTS - 1,
},
},
})
| true | true |
f732169581c39351b8a2047a34b4fbc17e58829f | 1,325 | py | Python | test/integration/modules/test_sfp_comodo.py | khiemtq-cyber/spiderfoot | 66e671918853b0334931fd2fbabad0096d506726 | [
"MIT"
] | null | null | null | test/integration/modules/test_sfp_comodo.py | khiemtq-cyber/spiderfoot | 66e671918853b0334931fd2fbabad0096d506726 | [
"MIT"
] | null | null | null | test/integration/modules/test_sfp_comodo.py | khiemtq-cyber/spiderfoot | 66e671918853b0334931fd2fbabad0096d506726 | [
"MIT"
] | null | null | null | import pytest
import unittest
from modules.sfp_comodo import sfp_comodo
from sflib import SpiderFoot
from spiderfoot import SpiderFootEvent, SpiderFootTarget
@pytest.mark.usefixtures
class TestModuleIntegrationcomodo(unittest.TestCase):
def test_handleEvent_event_data_safe_internet_name_not_blocked_should_not_return_event(self):
sf = SpiderFoot(self.default_options)
module = sfp_comodo()
module.setup(sf, dict())
target_value = 'spiderfoot.net'
target_type = 'INTERNET_NAME'
target = SpiderFootTarget(target_value, target_type)
module.setTarget(target)
def new_notifyListeners(self, event):
raise Exception(f"Raised event {event.eventType}: {event.data}")
module.notifyListeners = new_notifyListeners.__get__(module, sfp_comodo)
event_type = 'ROOT'
event_data = 'example data'
event_module = ''
source_event = ''
evt = SpiderFootEvent(event_type, event_data, event_module, source_event)
event_type = 'INTERNET_NAME'
event_data = 'comodo.com'
event_module = 'example module'
source_event = evt
evt = SpiderFootEvent(event_type, event_data, event_module, source_event)
result = module.handleEvent(evt)
self.assertIsNone(result)
| 30.813953 | 97 | 0.703396 | import pytest
import unittest
from modules.sfp_comodo import sfp_comodo
from sflib import SpiderFoot
from spiderfoot import SpiderFootEvent, SpiderFootTarget
@pytest.mark.usefixtures
class TestModuleIntegrationcomodo(unittest.TestCase):
def test_handleEvent_event_data_safe_internet_name_not_blocked_should_not_return_event(self):
sf = SpiderFoot(self.default_options)
module = sfp_comodo()
module.setup(sf, dict())
target_value = 'spiderfoot.net'
target_type = 'INTERNET_NAME'
target = SpiderFootTarget(target_value, target_type)
module.setTarget(target)
def new_notifyListeners(self, event):
raise Exception(f"Raised event {event.eventType}: {event.data}")
module.notifyListeners = new_notifyListeners.__get__(module, sfp_comodo)
event_type = 'ROOT'
event_data = 'example data'
event_module = ''
source_event = ''
evt = SpiderFootEvent(event_type, event_data, event_module, source_event)
event_type = 'INTERNET_NAME'
event_data = 'comodo.com'
event_module = 'example module'
source_event = evt
evt = SpiderFootEvent(event_type, event_data, event_module, source_event)
result = module.handleEvent(evt)
self.assertIsNone(result)
| true | true |
f7321779e9a3a21168aeabf948fff5b7c3a72cf1 | 4,197 | py | Python | python/ray/serve/tests/test_router.py | yuanchi2807/ray | cf512254bb4bcd71ff1818dff5c868ab10c5f620 | [
"Apache-2.0"
] | 1 | 2021-09-20T15:45:59.000Z | 2021-09-20T15:45:59.000Z | python/ray/serve/tests/test_router.py | yuanchi2807/ray | cf512254bb4bcd71ff1818dff5c868ab10c5f620 | [
"Apache-2.0"
] | 53 | 2021-10-06T20:08:04.000Z | 2022-03-21T20:17:25.000Z | python/ray/serve/tests/test_router.py | yuanchi2807/ray | cf512254bb4bcd71ff1818dff5c868ab10c5f620 | [
"Apache-2.0"
] | 1 | 2022-03-27T09:01:59.000Z | 2022-03-27T09:01:59.000Z | """
Unit tests for the router class. Please don't add any test that will involve
controller or the actual replica wrapper, use mock if necessary.
"""
import asyncio
import pytest
import ray
from ray.serve.common import RunningReplicaInfo
from ray.serve.router import Query, ReplicaSet, RequestMetadata
from ray._private.test_utils import SignalActor
pytestmark = pytest.mark.asyncio
@pytest.fixture
def ray_instance():
# Note(simon):
# This line should be not turned on on master because it leads to very
# spammy and not useful log in case of a failure in CI.
# To run locally, please use this instead.
# SERVE_LOG_DEBUG=1 pytest -v -s test_api.py
# os.environ["SERVE_LOG_DEBUG"] = "1" <- Do not uncomment this.
ray.init(num_cpus=16)
yield
ray.shutdown()
def mock_task_runner():
@ray.remote(num_cpus=0)
class TaskRunnerMock:
def __init__(self):
self.query = None
self.queries = []
@ray.method(num_returns=2)
async def handle_request(self, request_metadata, *args, **kwargs):
self.query = Query(args, kwargs, request_metadata)
self.queries.append(self.query)
return b"", "DONE"
def get_recent_call(self):
return self.query
def get_all_calls(self):
return self.queries
def clear_calls(self):
self.queries = []
async def reconfigure(self, user_config):
return
return TaskRunnerMock.remote()
@pytest.fixture
def task_runner_mock_actor():
yield mock_task_runner()
async def test_replica_set(ray_instance):
signal = SignalActor.remote()
@ray.remote(num_cpus=0)
class MockWorker:
_num_queries = 0
@ray.method(num_returns=2)
async def handle_request(self, request):
self._num_queries += 1
await signal.wait.remote()
return b"", "DONE"
async def num_queries(self):
return self._num_queries
# We will test a scenario with two replicas in the replica set.
rs = ReplicaSet(
"my_deployment",
asyncio.get_event_loop(),
)
replicas = [
RunningReplicaInfo(
deployment_name="my_deployment",
replica_tag=str(i),
actor_handle=MockWorker.remote(),
max_concurrent_queries=1,
)
for i in range(2)
]
rs.update_running_replicas(replicas)
# Send two queries. They should go through the router but blocked by signal
# actors.
query = Query([], {}, RequestMetadata("request-id", "endpoint"))
first_ref = await rs.assign_replica(query)
second_ref = await rs.assign_replica(query)
# These should be blocked by signal actor.
with pytest.raises(ray.exceptions.GetTimeoutError):
ray.get([first_ref, second_ref], timeout=1)
# Each replica should have exactly one inflight query. Let make sure the
# queries arrived there.
for replica in replicas:
while await replica.actor_handle.num_queries.remote() != 1:
await asyncio.sleep(1)
# Let's try to send another query.
third_ref_pending_task = asyncio.get_event_loop().create_task(
rs.assign_replica(query)
)
# We should fail to assign a replica, so this coroutine should still be
# pending after some time.
await asyncio.sleep(0.2)
assert not third_ref_pending_task.done()
# Let's unblock the two replicas
await signal.send.remote()
assert await first_ref == "DONE"
assert await second_ref == "DONE"
# The third request should be unblocked and sent to first replica.
# This meas we should be able to get the object ref.
third_ref = await third_ref_pending_task
# Now we got the object ref, let's get it result.
await signal.send.remote()
assert await third_ref == "DONE"
# Finally, make sure that one of the replica processed the third query.
num_queries_set = {
(await replica.actor_handle.num_queries.remote()) for replica in replicas
}
assert num_queries_set == {2, 1}
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-s", __file__]))
| 29.145833 | 81 | 0.65928 | import asyncio
import pytest
import ray
from ray.serve.common import RunningReplicaInfo
from ray.serve.router import Query, ReplicaSet, RequestMetadata
from ray._private.test_utils import SignalActor
pytestmark = pytest.mark.asyncio
@pytest.fixture
def ray_instance():
ray.init(num_cpus=16)
yield
ray.shutdown()
def mock_task_runner():
@ray.remote(num_cpus=0)
class TaskRunnerMock:
def __init__(self):
self.query = None
self.queries = []
@ray.method(num_returns=2)
async def handle_request(self, request_metadata, *args, **kwargs):
self.query = Query(args, kwargs, request_metadata)
self.queries.append(self.query)
return b"", "DONE"
def get_recent_call(self):
return self.query
def get_all_calls(self):
return self.queries
def clear_calls(self):
self.queries = []
async def reconfigure(self, user_config):
return
return TaskRunnerMock.remote()
@pytest.fixture
def task_runner_mock_actor():
yield mock_task_runner()
async def test_replica_set(ray_instance):
signal = SignalActor.remote()
@ray.remote(num_cpus=0)
class MockWorker:
_num_queries = 0
@ray.method(num_returns=2)
async def handle_request(self, request):
self._num_queries += 1
await signal.wait.remote()
return b"", "DONE"
async def num_queries(self):
return self._num_queries
rs = ReplicaSet(
"my_deployment",
asyncio.get_event_loop(),
)
replicas = [
RunningReplicaInfo(
deployment_name="my_deployment",
replica_tag=str(i),
actor_handle=MockWorker.remote(),
max_concurrent_queries=1,
)
for i in range(2)
]
rs.update_running_replicas(replicas)
query = Query([], {}, RequestMetadata("request-id", "endpoint"))
first_ref = await rs.assign_replica(query)
second_ref = await rs.assign_replica(query)
with pytest.raises(ray.exceptions.GetTimeoutError):
ray.get([first_ref, second_ref], timeout=1)
for replica in replicas:
while await replica.actor_handle.num_queries.remote() != 1:
await asyncio.sleep(1)
third_ref_pending_task = asyncio.get_event_loop().create_task(
rs.assign_replica(query)
)
# We should fail to assign a replica, so this coroutine should still be
# pending after some time.
await asyncio.sleep(0.2)
assert not third_ref_pending_task.done()
# Let's unblock the two replicas
await signal.send.remote()
assert await first_ref == "DONE"
assert await second_ref == "DONE"
third_ref = await third_ref_pending_task
await signal.send.remote()
assert await third_ref == "DONE"
# Finally, make sure that one of the replica processed the third query.
num_queries_set = {
(await replica.actor_handle.num_queries.remote()) for replica in replicas
}
assert num_queries_set == {2, 1}
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-s", __file__]))
| true | true |
f73217e8aa5f2356b52094b22ac6b982820b535f | 1,605 | py | Python | module-3/app/service/mythicalMysfitsService.py | kpiljoong/aws-modern-application-workshop | 9eb05451ecb28d01fbcf875d1fd9432c4e3aa8d5 | [
"Apache-2.0"
] | 17 | 2019-11-05T05:30:16.000Z | 2021-11-25T01:20:16.000Z | module-3/app/service/mythicalMysfitsService.py | kpiljoong/aws-modern-application-workshop | 9eb05451ecb28d01fbcf875d1fd9432c4e3aa8d5 | [
"Apache-2.0"
] | null | null | null | module-3/app/service/mythicalMysfitsService.py | kpiljoong/aws-modern-application-workshop | 9eb05451ecb28d01fbcf875d1fd9432c4e3aa8d5 | [
"Apache-2.0"
] | 17 | 2019-11-04T12:27:17.000Z | 2021-12-13T05:41:12.000Z | from flask import Flask, jsonify, json, Response, request
from flask_cors import CORS
import mysfitsTableClient
# A very basic API created using Flask that has two possible routes for requests.
app = Flask(__name__)
app.config['JSONIFY_PRETTYPRINT_REGULAR'] = False
CORS(app)
# The service basepath has a short response just to ensure that healthchecks
# sent to the service root will receive a healthy response.
@app.route("/")
def healthCheckResponse():
return jsonify({"message" : "Nothing here, used for health check. Try /mysfits instead."})
# Returns the data for all of the Mysfits to be displayed on
# the website. If no filter query string is provided, all mysfits are retrived
# and returned. If a querystring filter is provided, only those mysfits are queried.
@app.route("/mysfits")
def getMysfits():
filterCategory = request.args.get('filter')
if filterCategory:
filterValue = request.args.get('value')
queryParam = {
'filter': filterCategory,
'value': filterValue
}
# a filter query string was found, query only for those mysfits.
serviceResponse = mysfitsTableClient.queryMysfits(queryParam)
else:
# no filter was found, retrieve all mysfits.
serviceResponse = mysfitsTableClient.getAllMysfits()
flaskResponse = Response(serviceResponse)
flaskResponse.headers["Content-Type"] = "application/json"
return flaskResponse
# Run the service on the local server it has been deployed to,
# listening on port 8080.
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8080)
| 35.666667 | 94 | 0.720249 | from flask import Flask, jsonify, json, Response, request
from flask_cors import CORS
import mysfitsTableClient
app = Flask(__name__)
app.config['JSONIFY_PRETTYPRINT_REGULAR'] = False
CORS(app)
@app.route("/")
def healthCheckResponse():
return jsonify({"message" : "Nothing here, used for health check. Try /mysfits instead."})
@app.route("/mysfits")
def getMysfits():
filterCategory = request.args.get('filter')
if filterCategory:
filterValue = request.args.get('value')
queryParam = {
'filter': filterCategory,
'value': filterValue
}
serviceResponse = mysfitsTableClient.queryMysfits(queryParam)
else:
serviceResponse = mysfitsTableClient.getAllMysfits()
flaskResponse = Response(serviceResponse)
flaskResponse.headers["Content-Type"] = "application/json"
return flaskResponse
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8080)
| true | true |
f73218bda9737f9a09039fb3e086c4956b2a87d2 | 1,903 | py | Python | get_url.py | tracysmith/RGAPepPipe | f334c2a58f41d0b38c0d5884a430e24a21788304 | [
"MIT"
] | 3 | 2017-08-06T18:01:43.000Z | 2018-06-20T04:54:49.000Z | get_url.py | tracysmith/RGAPepPipe | f334c2a58f41d0b38c0d5884a430e24a21788304 | [
"MIT"
] | 28 | 2015-01-05T18:00:48.000Z | 2016-09-06T18:30:29.000Z | otherScripts/get_url.py | pepperell-lab/RGAPepPipe | 0122dca9aca75756ad412599c7922bf08edc7f6d | [
"MIT"
] | 2 | 2017-07-27T14:07:51.000Z | 2018-07-25T15:00:05.000Z | #!/usr/bin/python
import sys, argparse, os
from subprocess import call
from multiprocessing.dummy import Pool as ThreadPool
###################################################################
#This is a phython script to download fastq files from ENA
#You can use this directly with the enaFileParser output
###################################################################
class FullPaths(argparse.Action):
"""Expand user- and relative-paths"""
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest,
os.path.abspath(os.path.expanduser(values)))
def is_file(filename):
"""Checks if a file exists"""
if not os.path.isfile(filename):
msg = "{0} is not a file".format(filename)
raise argparse.ArgumentTypeError(msg)
else:
return filename
def get_args():
"""Parse command line arguments"""
parser = argparse.ArgumentParser(description='Download fastq files from ENA')
parser.add_argument("urlFile", help="ERPXXXXXX_download.txt generated from enaFileParser.py", action=FullPaths,
type=is_file)
parser.add_argument("-t", "--threads",
help="Number of threads to use (default: 1)",
type=int, default=1)
return parser.parse_args()
def make_urlList(urlFile):
urls = []
with open(urlFile, 'r') as infile:
for line in infile:
line=line.strip()
urls.append(line)
return urls
def download_url(url):
call('wget {url}'.format(url=url), shell=True)
ftp = url.split("/")
index = len(ftp)-1
filename = ftp[index]
call('gunzip {filename}'.format(filename=filename), shell=True)
args = get_args()
urls = make_urlList(args.urlFile)
#Make the Pool of workers
pool = ThreadPool(args.threads)
#Open the urls in their own threads and return the results
pool.map(download_url, urls)
pool.close()
pool.join()
| 31.716667 | 115 | 0.636889 |
import sys, argparse, os
from subprocess import call
from multiprocessing.dummy import Pool as ThreadPool
| true | true |
f73218c79517c1e795e724cc6b80bf59bad88d37 | 1,586 | py | Python | tempest/api/compute/servers/test_virtual_interfaces_negative.py | BeenzSyed/tempest | 7a64ee1216d844f6b99928b53f5c665b84cb8719 | [
"Apache-2.0"
] | null | null | null | tempest/api/compute/servers/test_virtual_interfaces_negative.py | BeenzSyed/tempest | 7a64ee1216d844f6b99928b53f5c665b84cb8719 | [
"Apache-2.0"
] | null | null | null | tempest/api/compute/servers/test_virtual_interfaces_negative.py | BeenzSyed/tempest | 7a64ee1216d844f6b99928b53f5c665b84cb8719 | [
"Apache-2.0"
] | null | null | null | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from tempest.api.compute import base
from tempest import exceptions
from tempest import test
class VirtualInterfacesNegativeTestJSON(base.BaseV2ComputeTest):
_interface = 'json'
@classmethod
def setUpClass(cls):
# For this test no network resources are needed
cls.set_network_resources()
super(VirtualInterfacesNegativeTestJSON, cls).setUpClass()
cls.client = cls.servers_client
@test.attr(type=['negative', 'gate'])
def test_list_virtual_interfaces_invalid_server_id(self):
# Negative test: Should not be able to GET virtual interfaces
# for an invalid server_id
invalid_server_id = str(uuid.uuid4())
self.assertRaises(exceptions.NotFound,
self.client.list_virtual_interfaces,
invalid_server_id)
class VirtualInterfacesNegativeTestXML(VirtualInterfacesNegativeTestJSON):
_interface = 'xml'
| 35.244444 | 78 | 0.716267 |
import uuid
from tempest.api.compute import base
from tempest import exceptions
from tempest import test
class VirtualInterfacesNegativeTestJSON(base.BaseV2ComputeTest):
_interface = 'json'
@classmethod
def setUpClass(cls):
cls.set_network_resources()
super(VirtualInterfacesNegativeTestJSON, cls).setUpClass()
cls.client = cls.servers_client
@test.attr(type=['negative', 'gate'])
def test_list_virtual_interfaces_invalid_server_id(self):
invalid_server_id = str(uuid.uuid4())
self.assertRaises(exceptions.NotFound,
self.client.list_virtual_interfaces,
invalid_server_id)
class VirtualInterfacesNegativeTestXML(VirtualInterfacesNegativeTestJSON):
_interface = 'xml'
| true | true |
f7321a754102fe3085336a9211af18b93a46aa05 | 49,633 | py | Python | geetools/ui/ipymap.py | guy1ziv2/gee_tools | 22a0fabe0be4f0a206a0c09e28638562bb36055f | [
"MIT"
] | null | null | null | geetools/ui/ipymap.py | guy1ziv2/gee_tools | 22a0fabe0be4f0a206a0c09e28638562bb36055f | [
"MIT"
] | null | null | null | geetools/ui/ipymap.py | guy1ziv2/gee_tools | 22a0fabe0be4f0a206a0c09e28638562bb36055f | [
"MIT"
] | null | null | null | # coding=utf-8
""" This module is designed to use ONLY in the Jupyter Notebook. It is
inspired on Tyler Erickson's contribution on
https://github.com/gee-community/ee-jupyter-contrib/blob/master/examples/getting-started/display-interactive-map.ipynb
"""
import ipyleaflet
from ipywidgets import HTML, Tab, Accordion, HBox, SelectMultiple, Select,\
Button, VBox, RadioButtons, Dropdown, Layout, \
FloatRangeSlider
from IPython.display import display
from traitlets import Dict, observe
import ee
if not ee.data._initialized: ee.Initialize()
from collections import OrderedDict
from .. import tools
from .maptool import inverse_coordinates, get_image_tile, get_geojson_tile, \
get_bounds, get_zoom, feature_properties_output
from . import maptool, ipytools
import threading
from copy import copy
import traceback
import sys
class Map(ipyleaflet.Map):
tab_children_dict = Dict()
EELayers = Dict()
def __init__(self, tabs=('Inspector', 'Layers', 'Assets', 'Tasks'),
**kwargs):
# Change defaults
kwargs.setdefault('center', [0, 0])
kwargs.setdefault('zoom', 2)
kwargs.setdefault('scroll_wheel_zoom', True)
kwargs.setdefault('max_zoom', 22)
super(Map, self).__init__(**kwargs)
self.is_shown = False
# Correct base layer name
baselayer = self.layers[0]
baselayer.name = 'OpenStreetMap'
self.layers = (baselayer,)
# Dictionary of map's handlers
self.handlers = {}
# Dictonary to hold tab's widgets
# (tab's name:widget)
self.tab_names = []
self.tab_children = []
self.tab_children_dict = OrderedDict(zip(self.tab_names,
self.tab_children))
# TABS
# Tab widget
self.tab_widget = Tab()
# Handler for Tab
self.tab_widget.observe(self.handle_change_tab)
self.tabs = tabs
if len(tabs) > 0:
# TODO: create widgets only if are in tuple
# Inspector Widget (Accordion)
self.inspector_wid = CustomInspector()
self.inspector_wid.main.selected_index = None # this will unselect all
# Task Manager Widget
task_manager = ipytools.TaskManager()
# Asset Manager Widget
asset_manager = ipytools.AssetManager(self)
# Layers
self.layers_widget = LayersWidget(map=self)
widgets = {'Inspector': self.inspector_wid,
'Layers': self.layers_widget,
'Assets': asset_manager,
'Tasks': task_manager,
}
handlers = {'Inspector': self.handle_inspector,
'Layers': None,
'Assets': None,
'Tasks': None,
}
# Add tabs and handlers
for tab in tabs:
if tab in widgets.keys():
widget = widgets[tab]
handler = handlers[tab]
self.addTab(tab, handler, widget)
else:
raise ValueError('Tab {} is not recognized. Choose one of {}'.format(tab, widgets.keys()))
# First handler: Inspector
self.on_interaction(self.handlers[tabs[0]])
# As I cannot create a Geometry with a GeoJSON string I do a workaround
self.draw_types = {'Polygon': ee.Geometry.Polygon,
'Point': ee.Geometry.Point,
'LineString': ee.Geometry.LineString,
}
# create EELayers
self.EELayers = OrderedDict()
def _add_EELayer(self, name, data):
''' add a pair of name, data to EELayers '''
copyEELayers = copy(self.EELayers)
copyEELayers[name] = data
self.EELayers = copyEELayers
def _remove_EELayer(self, name):
''' remove layer from EELayers '''
copyEELayers = copy(self.EELayers)
if name in copyEELayers:
copyEELayers.pop(name)
self.EELayers = copyEELayers
def move(self, layer_name, direction='up'):
''' Move one step up a layer '''
names = self.EELayers.keys()
values = self.EELayers.values()
if direction == 'up':
dir = 1
elif direction == 'down':
dir = -1
else:
dir = 0
if layer_name in names: # if layer exists
# index and value of layer to move
i = names.index(layer_name)
condition = (i < len(names)-1) if dir == 1 else (i > 0)
if condition: # if layer is not in the edge
ival = values[i]
# new index for layer
newi = i+dir
# get index and value that already exist in the new index
iname_before = names[newi]
ival_before = values[newi]
# Change order
# set layer and value in the new index
names[newi] = layer_name
values[newi] = ival
# set replaced layer and its value in the index of moving layer
names[i] = iname_before
values[i] = ival_before
newlayers = OrderedDict(zip(names, values))
self.EELayers = newlayers
@observe('EELayers')
def _ob_EELayers(self, change):
new = change['new']
proxy_layers = [self.layers[0]]
for val in new.values():
layer = val['layer']
proxy_layers.append(layer)
self.layers = tuple(proxy_layers)
# UPDATE INSPECTOR
# Clear options
self.inspector_wid.selector.options = {}
# Add layer to the Inspector Widget
self.inspector_wid.selector.options = new # self.EELayers
# UPDATE LAYERS WIDGET
# update Layers Widget
self.layers_widget.selector.options = {}
self.layers_widget.selector.options = new # self.EELayers
@property
def added_images(self):
return sum(
[1 for val in self.EELayers.values() if val['type'] == 'Image'])
@property
def added_geometries(self):
return sum(
[1 for val in self.EELayers.values() if val['type'] == 'Geometry'])
def task_widget(self):
with self.tasksWid:
while True:
list = ee.data.getTaskList()
def show(self, tabs=True, layer_control=True, draw_control=False):
""" Show the Map on the Notebook """
if not self.is_shown:
if layer_control:
# Layers Control
lc = ipyleaflet.LayersControl()
self.add_control(lc)
if draw_control:
# Draw Control
dc = ipyleaflet.DrawControl(# edit=False,
# marker={'shapeOptions': {}}
)
dc.on_draw(self.handle_draw)
self.add_control(dc)
if tabs:
display(self, self.tab_widget)
else:
display(self)
else:
# if len(tabs) > 0:
if tabs:
display(self, self.tab_widget)
else:
display(self)
self.is_shown = True
def show_tab(self, name):
""" Show only a Tab Widget by calling its name. This is useful mainly
in Jupyter Lab where you can see outputs in different tab_widget
:param name: the name of the tab to show
:type name: str
"""
try:
widget = self.tab_children_dict[name]
display(widget)
except:
print('Tab not found')
def addImage(self, image, visParams=None, name=None, show=True,
opacity=None, replace=True):
""" Add an ee.Image to the Map
:param image: Image to add to Map
:type image: ee.Image
:param visParams: visualization parameters. Can have the
following arguments: bands, min, max.
:type visParams: dict
:param name: name for the layer
:type name: str
:return: the name of the added layer
:rtype: str
"""
# Check if layer exists
if name in self.EELayers.keys():
if not replace:
msg = "Image with name '{}' exists already, please choose " \
"another name"
print(msg.format(name))
return
else:
# Get URL, attribution & vis params
params = get_image_tile(image, visParams, show, opacity)
# Remove Layer
self.removeLayer(name)
else:
# Get URL, attribution & vis params
params = get_image_tile(image, visParams, show, opacity)
layer = ipyleaflet.TileLayer(url=params['url'],
attribution=params['attribution'],
name=name)
EELayer = {'type': 'Image',
'object': image,
'visParams': params['visParams'],
'layer': layer}
# self._add_EELayer(name, EELayer)
# return name
return EELayer
def addMarker(self, marker, visParams=None, name=None, show=True,
opacity=None, replace=True,
inspect={'data':None, 'reducer':None, 'scale':None}):
''' General method to add Geometries, Features or FeatureCollections
as Markers '''
if isinstance(marker, ee.Geometry):
self.addGeometry(marker, visParams, name, show, opacity, replace,
inspect)
elif isinstance(marker, ee.Feature):
self.addFeature(marker, visParams, name, show, opacity, replace,
inspect)
elif isinstance(marker, ee.FeatureCollection):
geometry = marker.geometry()
self.addGeometry(marker, visParams, name, show, opacity, replace,
inspect)
def addFeature(self, feature, visParams=None, name=None, show=True,
opacity=None, replace=True,
inspect={'data':None, 'reducer':None, 'scale':None}):
""" Add a Feature to the Map
:param feature: the Feature to add to Map
:type feature: ee.Feature
:param visParams:
:type visParams: dict
:param name: name for the layer
:type name: str
:param inspect: when adding a geometry or a feature you can pop up data
from a desired layer. Params are:
:data: the EEObject where to get the data from
:reducer: the reducer to use
:scale: the scale to reduce
:type inspect: dict
:return: the name of the added layer
:rtype: str
"""
thename = name if name else 'Feature {}'.format(self.added_geometries)
# Check if layer exists
if thename in self.EELayers.keys():
if not replace:
print("Layer with name '{}' exists already, please choose another name".format(thename))
return
else:
self.removeLayer(thename)
params = get_geojson_tile(feature, thename, inspect)
layer = ipyleaflet.GeoJSON(data=params['geojson'],
name=thename,
popup=HTML(params['pop']))
self._add_EELayer(thename, {'type': 'Feature',
'object': feature,
'visParams': None,
'layer': layer})
return thename
def addGeometry(self, geometry, visParams=None, name=None, show=True,
opacity=None, replace=True,
inspect={'data':None, 'reducer':None, 'scale':None}):
""" Add a Geometry to the Map
:param geometry: the Geometry to add to Map
:type geometry: ee.Geometry
:param visParams:
:type visParams: dict
:param name: name for the layer
:type name: str
:param inspect: when adding a geometry or a feature you can pop up data
from a desired layer. Params are:
:data: the EEObject where to get the data from
:reducer: the reducer to use
:scale: the scale to reduce
:type inspect: dict
:return: the name of the added layer
:rtype: str
"""
thename = name if name else 'Geometry {}'.format(self.added_geometries)
# Check if layer exists
if thename in self.EELayers.keys():
if not replace:
print("Layer with name '{}' exists already, please choose another name".format(thename))
return
else:
self.removeLayer(thename)
params = get_geojson_tile(geometry, thename, inspect)
layer = ipyleaflet.GeoJSON(data=params['geojson'],
name=thename,
popup=HTML(params['pop']))
self._add_EELayer(thename, {'type': 'Geometry',
'object': geometry,
'visParams':None,
'layer': layer})
return thename
def addFeatureLayer(self, feature, visParams=None, name=None, show=True,
opacity=None, replace=True):
''' Paint a Feature on the map, but the layer underneath is the
actual added Feature '''
visParams = visParams if visParams else {}
if isinstance(feature, ee.Feature):
ty = 'Feature'
elif isinstance(feature, ee.FeatureCollection):
ty = 'FeatureCollection'
else:
print('The object is not a Feature or FeatureCollection')
return
fill_color = visParams.get('fill_color', None)
if 'outline_color' in visParams:
out_color = visParams['outline_color']
elif 'border_color' in visParams:
out_color = visParams['border_color']
else:
out_color = 'black'
outline = visParams.get('outline', 2)
proxy_layer = maptool.paint(feature, out_color, fill_color, outline)
thename = name if name else '{} {}'.format(ty, self.added_geometries)
img_params = {'bands':['vis-red', 'vis-green', 'vis-blue'],
'min': 0, 'max':255}
# Check if layer exists
if thename in self.EELayers.keys():
if not replace:
print("{} with name '{}' exists already, please choose another name".format(ty, thename))
return
else:
# Get URL, attribution & vis params
params = get_image_tile(proxy_layer, img_params, show, opacity)
# Remove Layer
self.removeLayer(thename)
else:
# Get URL, attribution & vis params
params = get_image_tile(proxy_layer, img_params, show, opacity)
layer = ipyleaflet.TileLayer(url=params['url'],
attribution=params['attribution'],
name=thename)
self._add_EELayer(thename, {'type': ty,
'object': feature,
'visParams': visParams,
'layer': layer})
return thename
def addMosaic(self, collection, visParams=None, name=None, show=False,
opacity=None, replace=True):
''' Add an ImageCollection to EELayer and its mosaic to the Map.
When using the inspector over this layer, it will print all values from
the collection '''
proxy = ee.ImageCollection(collection).sort('system:time_start')
mosaic = ee.Image(proxy.mosaic())
EELayer = self.addImage(mosaic, visParams, name, show, opacity, replace)
# modify EELayer
EELayer['type'] = 'ImageCollection'
EELayer['object'] = ee.ImageCollection(collection)
return EELayer
def addImageCollection(self, collection, visParams=None, nametags=['id'],
show=False, opacity=None):
""" Add every Image of an ImageCollection to the Map
:param collection: the ImageCollection
:type collection: ee.ImageCollection
:param visParams: visualization parameter for each image. See `addImage`
:type visParams: dict
:param nametags: tags that will be the name for each image. It must be
a list in which each element is a string. Each string can be any
Image property, or one of the following:
- system_date: the name will be the date of each Image
- id: the name will be the ID of each Image (Default)
:type nametags: list
:param show: If True, adds and shows the Image, otherwise only add it
:type show: bool
"""
size = collection.size().getInfo()
collist = collection.toList(size)
separation = ' '
for inx in range(size):
img = ee.Image(collist.get(inx))
name = ''
properties = img.propertyNames().getInfo()
for nametag in nametags:
if nametag == 'id':
newname = img.id().getInfo()
elif nametag == 'system_date':
newname = ee.Date(img.date()).format('YYYY-MM-dd').getInfo()
elif nametag in properties:
newname = "{}:{}{}".format(nametag, img.get(nametag).getInfo(), separation)
else:
newname = img.id().getInfo()
name += newname
self.addLayer(img, visParams, str(name), show, opacity)
def addLayer(self, eeObject, visParams=None, name=None, show=True,
opacity=None, replace=True, **kwargs):
""" Adds a given EE object to the map as a layer.
:param eeObject: Earth Engine object to add to map
:type eeObject: ee.Image || ee.Geometry || ee.Feature
:param replace: if True, if there is a layer with the same name, this
replace that layer.
:type replace: bool
For ee.Image and ee.ImageCollection see `addImage`
for ee.Geometry and ee.Feature see `addGeometry`
"""
visParams = visParams if visParams else {}
# CASE: ee.Image
if isinstance(eeObject, ee.Image):
image_name = name if name else 'Image {}'.format(self.added_images)
EELayer = self.addImage(eeObject, visParams=visParams,
name=image_name, show=show,
opacity=opacity, replace=replace)
self._add_EELayer(image_name, EELayer)
added_layer = EELayer
# CASE: ee.Geometry
elif isinstance(eeObject, ee.Geometry):
geom = eeObject if isinstance(eeObject, ee.Geometry) else eeObject.geometry()
kw = {'visParams':visParams, 'name':name, 'show':show, 'opacity':opacity}
if kwargs.get('inspect'): kw.setdefault('inspect', kwargs.get('inspect'))
added_layer = self.addGeometry(geom, replace=replace, **kw)
# CASE: ee.Feature & ee.FeatureCollection
elif isinstance(eeObject, ee.Feature) or isinstance(eeObject, ee.FeatureCollection):
feat = eeObject
kw = {'visParams':visParams, 'name':name, 'show':show, 'opacity':opacity}
added_layer = self.addFeatureLayer(feat, replace=replace, **kw)
# CASE: ee.ImageCollection
elif isinstance(eeObject, ee.ImageCollection):
'''
proxy = eeObject.sort('system:time_start')
mosaic = ee.Image(proxy.mosaic())
added_layer = self.addImage(mosaic, visParams=visParams, name=thename,
show=show, opacity=opacity, replace=replace)
'''
thename = name if name else 'ImageCollection {}'.format(self.added_images)
EELayer = self.addMosaic(eeObject, visParams, thename, show,
opacity, replace)
self._add_EELayer(thename, EELayer)
added_layer = EELayer
else:
added_layer = None
print("`addLayer` doesn't support adding {} objects to the map".format(type(eeObject)))
# return added_layer
def removeLayer(self, name):
""" Remove a layer by its name """
if name in self.EELayers.keys():
self._remove_EELayer(name)
else:
print('Layer {} is not present in the map'.format(name))
return
def getLayer(self, name):
""" Get a layer by its name
:param name: the name of the layer
:type name: str
:return: The complete EELayer which is a dict of
:type: the type of the layer
:object: the EE Object associated with the layer
:visParams: the visualization parameters of the layer
:layer: the TileLayer added to the Map (ipyleaflet.Map)
:rtype: dict
"""
if name in self.EELayers:
layer = self.EELayers[name]
return layer
else:
print('Layer {} is not present in the map'.format(name))
return
def getObject(self, name):
''' Get the EE Object from a layer's name '''
obj = self.getLayer(name)['object']
return obj
def getVisParams(self, name):
''' Get the Visualization Parameters from a layer's name '''
vis = self.getLayer(name)['visParams']
return vis
def centerObject(self, eeObject, zoom=None, method=1):
""" Center an eeObject
:param eeObject:
:param zoom:
:param method: experimetal methods to estimate zoom for fitting bounds
Currently: 1 or 2
:type: int
"""
bounds = get_bounds(eeObject)
if bounds:
try:
inverse = inverse_coordinates(bounds)
centroid = ee.Geometry.Polygon(inverse)\
.centroid().getInfo()['coordinates']
except:
centroid = [0, 0]
self.center = inverse_coordinates(centroid)
if zoom:
self.zoom = zoom
else:
self.zoom = get_zoom(bounds, method)
def getCenter(self):
""" Returns the coordinates at the center of the map.
No arguments.
Returns: Geometry.Point
:return:
"""
center = self.center
coords = inverse_coordinates(center)
return ee.Geometry.Point(coords)
def getBounds(self, asGeoJSON=True):
""" Returns the bounds of the current map view, as a list in the
format [west, south, east, north] in degrees.
Arguments:
asGeoJSON (Boolean, optional):
If true, returns map bounds as GeoJSON.
Returns: GeoJSONGeometry|List<Number>|String
"""
bounds = inverse_coordinates(self.bounds)
if asGeoJSON:
return ee.Geometry.Rectangle(bounds)
else:
return bounds
def _update_tab_children(self):
""" Update Tab children from tab_children_dict """
# Set tab_widget children
self.tab_widget.children = tuple(self.tab_children_dict.values())
# Set tab_widget names
for i, name in enumerate(self.tab_children_dict.keys()):
self.tab_widget.set_title(i, name)
def addTab(self, name, handler=None, widget=None):
""" Add a Tab to the Panel. The handler is for the Map
:param name: name for the new tab
:type name: str
:param handler: handle function for the new tab. Arguments of the
function are:
- type: the type of the event (click, mouseover, etc..)
- coordinates: coordinates where the event occurred [lon, lat]
- widget: the widget inside the Tab
- map: the Map instance
:param widget: widget inside the Tab. Defaults to HTML('')
:type widget: ipywidgets.Widget
"""
# Widget
wid = widget if widget else HTML('')
# Get tab's children as a list
# tab_children = list(self.tab_widget.children)
tab_children = self.tab_children_dict.values()
# Get a list of tab's titles
# titles = [self.tab_widget.get_title(i) for i, child in enumerate(tab_children)]
titles = self.tab_children_dict.keys()
# Check if tab already exists
if name not in titles:
ntabs = len(tab_children)
# UPDATE DICTS
# Add widget as a new children
self.tab_children_dict[name] = wid
# Set the handler for the new tab
if handler:
def proxy_handler(f):
def wrap(**kwargs):
# Add widget to handler arguments
kwargs['widget'] = self.tab_children_dict[name]
coords = kwargs['coordinates']
kwargs['coordinates'] = inverse_coordinates(coords)
kwargs['map'] = self
return f(**kwargs)
return wrap
self.handlers[name] = proxy_handler(handler)
else:
self.handlers[name] = handler
# Update tab children
self._update_tab_children()
else:
print('Tab {} already exists, please choose another name'.format(name))
def handle_change_tab(self, change):
""" Handle function to trigger when tab changes """
# Remove all handlers
if change['name'] == 'selected_index':
old = change['old']
new = change['new']
old_name = self.tab_widget.get_title(old)
new_name = self.tab_widget.get_title(new)
# Remove all handlers
for handl in self.handlers.values():
self.on_interaction(handl, True)
# Set new handler if not None
if new_name in self.handlers.keys():
handler = self.handlers[new_name]
if handler:
self.on_interaction(handler)
def handle_inspector(self, **change):
""" Handle function for the Inspector Widget """
# Get click coordinates
coords = change['coordinates']
event = change['type'] # event type
if event == 'click': # If the user clicked
# create a point where the user clicked
point = ee.Geometry.Point(coords)
# Get widget
thewidget = change['widget'].main # Accordion
# First Accordion row text (name)
first = 'Point {} at {} zoom'.format(coords, self.zoom)
namelist = [first]
wids4acc = [HTML('')] # first row has no content
# Get only Selected Layers in the Inspector Selector
selected_layers = dict(zip(self.inspector_wid.selector.label,
self.inspector_wid.selector.value))
length = len(selected_layers.keys())
i = 1
for name, obj in selected_layers.items(): # for every added layer
# Clear children // Loading
thewidget.children = [HTML('wait a second please..')]
thewidget.set_title(0, 'Loading {} of {}...'.format(i, length))
i += 1
# Image
if obj['type'] == 'Image':
# Get the image's values
try:
image = obj['object']
values = tools.image.get_value(image, point, scale=1,
side='client')
values = tools.dictionary.sort(values)
# Create the content
img_html = ''
for band, value in values.items():
img_html += '<b>{}</b>: {}</br>'.format(band,
value)
wid = HTML(img_html)
# append widget to list of widgets
wids4acc.append(wid)
namelist.append(name)
except Exception as e:
# wid = HTML(str(e).replace('<','{').replace('>','}'))
exc_type, exc_value, exc_traceback = sys.exc_info()
trace = traceback.format_exception(exc_type, exc_value,
exc_traceback)
wid = ErrorAccordion(e, trace)
wids4acc.append(wid)
namelist.append('ERROR at layer {}'.format(name))
# ImageCollection
if obj['type'] == 'ImageCollection':
# Get the values from all images
try:
collection = obj['object']
values = tools.image.get_values(collection, point, scale=1,
properties=['system:time_start'],
side='client')
# header
allbands = [val.keys() for bands, val in values.items()]
bands = []
for bandlist in allbands:
for band in bandlist:
if band not in bands:
bands.append(band)
header = ['image']+bands
# rows
rows = []
for imgid, val in values.items():
row = ['']*len(header)
row[0] = str(imgid)
for bandname, bandvalue in val.items():
pos = header.index(bandname) if bandname in header else None
if pos:
row[pos] = str(bandvalue)
rows.append(row)
# Create the content
html = maptool.create_html_table(header, rows)
wid = HTML(html)
# append widget to list of widgets
wids4acc.append(wid)
namelist.append(name)
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
trace = traceback.format_exception(exc_type, exc_value,
exc_traceback)
wid = ErrorAccordion(e, trace)
wids4acc.append(wid)
namelist.append('ERROR at layer {}'.format(name))
# Features
if obj['type'] == 'Feature':
try:
feat = obj['object']
feat_geom = feat.geometry()
if feat_geom.contains(point).getInfo():
info = feature_properties_output(feat)
wid = HTML(info)
# append widget to list of widgets
wids4acc.append(wid)
namelist.append(name)
except Exception as e:
# wid = HTML(str(e).replace('<','{').replace('>','}'))
exc_type, exc_value, exc_traceback = sys.exc_info()
trace = traceback.format_exception(exc_type, exc_value,
exc_traceback)
wid = ErrorAccordion(e, trace)
wids4acc.append(wid)
namelist.append('ERROR at layer {}'.format(name))
# FeatureCollections
if obj['type'] == 'FeatureCollection':
try:
fc = obj['object']
filtered = fc.filterBounds(point)
if filtered.size().getInfo() > 0:
feat = ee.Feature(filtered.first())
info = feature_properties_output(feat)
wid = HTML(info)
# append widget to list of widgets
wids4acc.append(wid)
namelist.append(name)
except Exception as e:
wid = HTML(str(e).replace('<','{').replace('>','}'))
wids4acc.append(wid)
namelist.append('ERROR at layer {}'.format(name))
# Set children and children's name of inspector widget
thewidget.children = wids4acc
for i, n in enumerate(namelist):
thewidget.set_title(i, n)
def handle_object_inspector(self, **change):
""" Handle function for the Object Inspector Widget
DEPRECATED
"""
event = change['type'] # event type
thewidget = change['widget']
if event == 'click': # If the user clicked
# Clear children // Loading
thewidget.children = [HTML('wait a second please..')]
thewidget.set_title(0, 'Loading...')
widgets = []
i = 0
for name, obj in self.EELayers.items(): # for every added layer
the_object = obj['object']
try:
properties = the_object.getInfo()
wid = ipytools.create_accordion(properties) # Accordion
wid.selected_index = None # this will unselect all
except Exception as e:
wid = HTML(str(e))
widgets.append(wid)
thewidget.set_title(i, name)
i += 1
thewidget.children = widgets
def handle_draw(self, dc_widget, action, geo_json):
""" Handles drawings """
ty = geo_json['geometry']['type']
coords = geo_json['geometry']['coordinates']
geom = self.draw_types[ty](coords)
if action == 'created':
self.addGeometry(geom)
elif action == 'deleted':
for key, val in self.EELayers.items():
if geom == val:
self.removeLayer(key)
class CustomInspector(HBox):
def __init__(self, **kwargs):
desc = 'Select one or more layers'
super(CustomInspector, self).__init__(description=desc, **kwargs)
self.selector = SelectMultiple()
self.main = Accordion()
self.children = [self.selector, self.main]
class ErrorAccordion(Accordion):
def __init__(self, error, traceback, **kwargs):
super(ErrorAccordion, self).__init__(**kwargs)
self.error = '{}'.format(error).replace('<','{').replace('>','}')
newtraceback = ''
for trace in traceback[1:]:
newtraceback += '{}'.format(trace).replace('<','{').replace('>','}')
newtraceback += '</br>'
self.traceback = newtraceback
self.errorWid = HTML(self.error)
self.traceWid = HTML(self.traceback)
self.children = (self.errorWid, self.traceWid)
self.set_title(0, 'ERROR')
self.set_title(1, 'TRACEBACK')
class LayersWidget(ipytools.RealBox):
def __init__(self, map=None, **kwargs):
super(LayersWidget, self).__init__(**kwargs)
self.map = map
self.selector = Select()
# define init EELayer
self.EELayer = None
# Buttons
self.center = Button(description='Center')
self.center.on_click(self.on_click_center)
self.remove = Button(description='Remove')
self.remove.on_click(self.on_click_remove)
self.show_prop = Button(description='Show Object')
self.show_prop.on_click(self.on_click_show_object)
self.vis = Button(description='Visualization')
self.vis.on_click(self.on_click_vis)
self.move_up = Button(description='Move up')
self.move_up.on_click(self.on_up)
self.move_down = Button(description='Move down')
self.move_down.on_click(self.on_down)
# Buttons Group 1
self.group1 = VBox([self.center, self.remove,
self.vis, self.show_prop])
# Buttons Group 2
self.group2 = VBox([self.move_up, self.move_down])
# self.children = [self.selector, self.group1]
self.items = [[self.selector, self.group1, self.group2]]
self.selector.observe(self.handle_selection, names='value')
def on_up(self, button=None):
if self.EELayer:
self.map.move(self.layer.name, 'up')
def on_down(self, button=None):
if self.EELayer:
self.map.move(self.layer.name, 'down')
def handle_selection(self, change):
new = change['new']
self.EELayer = new
# set original display
self.items = [[self.selector, self.group1, self.group2]]
if new:
self.layer = new['layer']
self.obj = new['object']
self.ty = new['type']
self.vis = new['visParams']
def on_click_show_object(self, button=None):
if self.EELayer:
loading = HTML('Loading <b>{}</b>...'.format(self.layer.name))
widget = VBox([loading])
# widget = ipytools.create_object_output(self.obj)
thread = threading.Thread(target=ipytools.create_async_output,
args=(self.obj, widget))
self.items = [[self.selector, self.group1],
[widget]]
thread.start()
def on_click_center(self, button=None):
if self.EELayer:
self.map.centerObject(self.obj)
def on_click_remove(self, button=None):
if self.EELayer:
self.map.removeLayer(self.layer.name)
def on_click_vis(self, button=None):
if self.EELayer:
# options
selector = self.selector
group1 = self.group1
# map
map = self.map
layer_name = self.layer.name
image = self.obj
# Image Bands
try:
info = self.obj.getInfo()
except Exception as e:
self.items = [[self.selector, self.group1],
[HTML(str(e))]]
return
# IMAGES
if self.ty == 'Image':
### image data ###
bands = info['bands']
imbands = [band['id'] for band in bands]
bands_type = [band['data_type']['precision'] for band in bands]
bands_min = []
bands_max = []
# as float bands don't hava an specific range, reduce region to get the
# real range
if 'float' in bands_type:
try:
minmax = image.reduceRegion(ee.Reducer.minMax())
for band in bands:
bandname = band['id']
try:
tmin = minmax.get('{}_min'.format(bandname)).getInfo() # 0
tmax = minmax.get('{}_max'.format(bandname)).getInfo() # 1
except:
tmin = 0
tmax = 1
bands_min.append(tmin)
bands_max.append(tmax)
except:
for band in bands:
dt = band['data_type']
try:
tmin = dt['min']
tmax = dt['max']
except:
tmin = 0
tmax = 1
bands_min.append(tmin)
bands_max.append(tmax)
else:
for band in bands:
dt = band['data_type']
try:
tmin = dt['min']
tmax = dt['max']
except:
tmin = 0
tmax = 1
bands_min.append(tmin)
bands_max.append(tmax)
# dict of {band: min} and {band:max}
min_dict = dict(zip(imbands, bands_min))
max_dict = dict(zip(imbands, bands_max))
######
# Layer data
layer_data = self.map.EELayers[layer_name]
visParams = layer_data['visParams']
# vis bands
visBands = visParams['bands'].split(',')
# vis min
visMin = visParams['min']
if isinstance(visMin, str):
visMin = [float(vis) for vis in visMin.split(',')]
else:
visMin = [visMin]
# vis max
visMax = visParams['max']
if isinstance(visMax, str):
visMax = [float(vis) for vis in visMax.split(',')]
else:
visMax = [visMax]
# dropdown handler
def handle_dropdown(band_slider):
def wrap(change):
new = change['new']
band_slider.min = min_dict[new]
band_slider.max = max_dict[new]
return wrap
def slider_1band(float=False, name='band'):
''' Create the widget for one band '''
# get params to set in slider and dropdown
vismin = visMin[0]
vismax = visMax[0]
band = visBands[0]
drop = Dropdown(description=name, options=imbands, value=band)
if float:
slider = ipytools.FloatBandWidget(min=min_dict[drop.value],
max=max_dict[drop.value])
else:
slider = FloatRangeSlider(min=min_dict[drop.value],
max=max_dict[drop.value],
value=[vismin, vismax],
step=0.01)
# set handler
drop.observe(handle_dropdown(slider), names=['value'])
# widget for band selector + slider
band_slider = HBox([drop, slider])
# return VBox([band_slider], layout=Layout(width='500px'))
return band_slider
def slider_3bands(float=False):
''' Create the widget for one band '''
# get params to set in slider and dropdown
if len(visMin) == 1:
visminR = visminG = visminB = visMin[0]
else:
visminR = visMin[0]
visminG = visMin[1]
visminB = visMin[2]
if len(visMax) == 1:
vismaxR = vismaxG = vismaxB = visMax[0]
else:
vismaxR = visMax[0]
vismaxG = visMax[1]
vismaxB = visMax[2]
if len(visBands) == 1:
visbandR = visbandG = visbandB = visBands[0]
else:
visbandR = visBands[0]
visbandG = visBands[1]
visbandB = visBands[2]
drop = Dropdown(description='red', options=imbands, value=visbandR)
drop2 = Dropdown(description='green', options=imbands, value=visbandG)
drop3 = Dropdown(description='blue', options=imbands, value=visbandB)
slider = FloatRangeSlider(min=min_dict[drop.value],
max=max_dict[drop.value],
value=[visminR, vismaxR],
step=0.01)
slider2 = FloatRangeSlider(min=min_dict[drop2.value],
max=max_dict[drop2.value],
value=[visminG, vismaxG],
step=0.01)
slider3 = FloatRangeSlider(min=min_dict[drop3.value],
max=max_dict[drop3.value],
value=[visminB, vismaxB],
step=0.01)
# set handlers
drop.observe(handle_dropdown(slider), names=['value'])
drop2.observe(handle_dropdown(slider2), names=['value'])
drop3.observe(handle_dropdown(slider3), names=['value'])
# widget for band selector + slider
band_slider = HBox([drop, slider])
band_slider2 = HBox([drop2, slider2])
band_slider3 = HBox([drop3, slider3])
return VBox([band_slider, band_slider2, band_slider3],
layout=Layout(width='700px'))
# Create widget for 1 or 3 bands
bands = RadioButtons(options=['1 band', '3 bands'],
layout=Layout(width='80px'))
# Create widget for band, min and max selection
selection = slider_1band()
# Apply button
apply = Button(description='Apply', layout=Layout(width='100px'))
# new row
new_row = [bands, selection, apply]
# update row of widgets
def update_row_items(new_row):
self.items = [[selector, group1],
new_row]
# handler for radio button (1 band / 3 bands)
def handle_radio_button(change):
new = change['new']
if new == '1 band':
# create widget
selection = slider_1band() # TODO
# update row of widgets
update_row_items([bands, selection, apply])
else:
red = slider_1band(name='red') # TODO
green = slider_1band(name='green')
blue = slider_1band(name='blue')
selection = VBox([red, green, blue])
# selection = slider_3bands()
update_row_items([bands, selection, apply])
def handle_apply(button):
radio = self.items[1][0].value # radio button
vbox = self.items[1][1]
print('vbox', vbox)
if radio == '1 band': # 1 band
hbox_band = vbox.children[0].children
band = hbox_band[0].value
min = hbox_band[1].value[0]
max = hbox_band[1].value[1]
map.addLayer(image, {'bands':[band], 'min':min, 'max':max},
layer_name)
else: # 3 bands
hbox_bandR = vbox.children[0].children
hbox_bandG = vbox.children[1].children
hbox_bandB = vbox.children[2].children
bandR = hbox_bandR[0].value
bandG = hbox_bandG[0].value
bandB = hbox_bandB[0].value
minR = hbox_bandR[1].value[0]
minG = hbox_bandG[1].value[0]
minB = hbox_bandB[1].value[0]
maxR = hbox_bandR[1].value[1]
maxG = hbox_bandG[1].value[1]
maxB = hbox_bandB[1].value[1]
map.addLayer(image, {'bands':[bandR, bandG, bandB],
'min':[float(minR), float(minG), float(minB)],
'max':[float(maxR), float(maxG), float(maxB)]},
layer_name)
bands.observe(handle_radio_button, names='value')
update_row_items(new_row)
apply.on_click(handle_apply) | 39.611333 | 118 | 0.499829 |
import ipyleaflet
from ipywidgets import HTML, Tab, Accordion, HBox, SelectMultiple, Select,\
Button, VBox, RadioButtons, Dropdown, Layout, \
FloatRangeSlider
from IPython.display import display
from traitlets import Dict, observe
import ee
if not ee.data._initialized: ee.Initialize()
from collections import OrderedDict
from .. import tools
from .maptool import inverse_coordinates, get_image_tile, get_geojson_tile, \
get_bounds, get_zoom, feature_properties_output
from . import maptool, ipytools
import threading
from copy import copy
import traceback
import sys
class Map(ipyleaflet.Map):
tab_children_dict = Dict()
EELayers = Dict()
def __init__(self, tabs=('Inspector', 'Layers', 'Assets', 'Tasks'),
**kwargs):
kwargs.setdefault('center', [0, 0])
kwargs.setdefault('zoom', 2)
kwargs.setdefault('scroll_wheel_zoom', True)
kwargs.setdefault('max_zoom', 22)
super(Map, self).__init__(**kwargs)
self.is_shown = False
baselayer = self.layers[0]
baselayer.name = 'OpenStreetMap'
self.layers = (baselayer,)
self.handlers = {}
# Dictonary to hold tab's widgets
self.tab_names = []
self.tab_children = []
self.tab_children_dict = OrderedDict(zip(self.tab_names,
self.tab_children))
# TABS
# Tab widget
self.tab_widget = Tab()
# Handler for Tab
self.tab_widget.observe(self.handle_change_tab)
self.tabs = tabs
if len(tabs) > 0:
# TODO: create widgets only if are in tuple
# Inspector Widget (Accordion)
self.inspector_wid = CustomInspector()
self.inspector_wid.main.selected_index = None # this will unselect all
# Task Manager Widget
task_manager = ipytools.TaskManager()
# Asset Manager Widget
asset_manager = ipytools.AssetManager(self)
# Layers
self.layers_widget = LayersWidget(map=self)
widgets = {'Inspector': self.inspector_wid,
'Layers': self.layers_widget,
'Assets': asset_manager,
'Tasks': task_manager,
}
handlers = {'Inspector': self.handle_inspector,
'Layers': None,
'Assets': None,
'Tasks': None,
}
# Add tabs and handlers
for tab in tabs:
if tab in widgets.keys():
widget = widgets[tab]
handler = handlers[tab]
self.addTab(tab, handler, widget)
else:
raise ValueError('Tab {} is not recognized. Choose one of {}'.format(tab, widgets.keys()))
# First handler: Inspector
self.on_interaction(self.handlers[tabs[0]])
# As I cannot create a Geometry with a GeoJSON string I do a workaround
self.draw_types = {'Polygon': ee.Geometry.Polygon,
'Point': ee.Geometry.Point,
'LineString': ee.Geometry.LineString,
}
# create EELayers
self.EELayers = OrderedDict()
def _add_EELayer(self, name, data):
copyEELayers = copy(self.EELayers)
copyEELayers[name] = data
self.EELayers = copyEELayers
def _remove_EELayer(self, name):
copyEELayers = copy(self.EELayers)
if name in copyEELayers:
copyEELayers.pop(name)
self.EELayers = copyEELayers
def move(self, layer_name, direction='up'):
names = self.EELayers.keys()
values = self.EELayers.values()
if direction == 'up':
dir = 1
elif direction == 'down':
dir = -1
else:
dir = 0
if layer_name in names: # if layer exists
# index and value of layer to move
i = names.index(layer_name)
condition = (i < len(names)-1) if dir == 1 else (i > 0)
if condition: # if layer is not in the edge
ival = values[i]
# new index for layer
newi = i+dir
# get index and value that already exist in the new index
iname_before = names[newi]
ival_before = values[newi]
# Change order
# set layer and value in the new index
names[newi] = layer_name
values[newi] = ival
# set replaced layer and its value in the index of moving layer
names[i] = iname_before
values[i] = ival_before
newlayers = OrderedDict(zip(names, values))
self.EELayers = newlayers
@observe('EELayers')
def _ob_EELayers(self, change):
new = change['new']
proxy_layers = [self.layers[0]]
for val in new.values():
layer = val['layer']
proxy_layers.append(layer)
self.layers = tuple(proxy_layers)
# UPDATE INSPECTOR
# Clear options
self.inspector_wid.selector.options = {}
# Add layer to the Inspector Widget
self.inspector_wid.selector.options = new # self.EELayers
# UPDATE LAYERS WIDGET
# update Layers Widget
self.layers_widget.selector.options = {}
self.layers_widget.selector.options = new # self.EELayers
@property
def added_images(self):
return sum(
[1 for val in self.EELayers.values() if val['type'] == 'Image'])
@property
def added_geometries(self):
return sum(
[1 for val in self.EELayers.values() if val['type'] == 'Geometry'])
def task_widget(self):
with self.tasksWid:
while True:
list = ee.data.getTaskList()
def show(self, tabs=True, layer_control=True, draw_control=False):
if not self.is_shown:
if layer_control:
# Layers Control
lc = ipyleaflet.LayersControl()
self.add_control(lc)
if draw_control:
# Draw Control
dc = ipyleaflet.DrawControl(# edit=False,
# marker={'shapeOptions': {}}
)
dc.on_draw(self.handle_draw)
self.add_control(dc)
if tabs:
display(self, self.tab_widget)
else:
display(self)
else:
# if len(tabs) > 0:
if tabs:
display(self, self.tab_widget)
else:
display(self)
self.is_shown = True
def show_tab(self, name):
try:
widget = self.tab_children_dict[name]
display(widget)
except:
print('Tab not found')
def addImage(self, image, visParams=None, name=None, show=True,
opacity=None, replace=True):
# Check if layer exists
if name in self.EELayers.keys():
if not replace:
msg = "Image with name '{}' exists already, please choose " \
"another name"
print(msg.format(name))
return
else:
# Get URL, attribution & vis params
params = get_image_tile(image, visParams, show, opacity)
# Remove Layer
self.removeLayer(name)
else:
# Get URL, attribution & vis params
params = get_image_tile(image, visParams, show, opacity)
layer = ipyleaflet.TileLayer(url=params['url'],
attribution=params['attribution'],
name=name)
EELayer = {'type': 'Image',
'object': image,
'visParams': params['visParams'],
'layer': layer}
# self._add_EELayer(name, EELayer)
# return name
return EELayer
def addMarker(self, marker, visParams=None, name=None, show=True,
opacity=None, replace=True,
inspect={'data':None, 'reducer':None, 'scale':None}):
if isinstance(marker, ee.Geometry):
self.addGeometry(marker, visParams, name, show, opacity, replace,
inspect)
elif isinstance(marker, ee.Feature):
self.addFeature(marker, visParams, name, show, opacity, replace,
inspect)
elif isinstance(marker, ee.FeatureCollection):
geometry = marker.geometry()
self.addGeometry(marker, visParams, name, show, opacity, replace,
inspect)
def addFeature(self, feature, visParams=None, name=None, show=True,
opacity=None, replace=True,
inspect={'data':None, 'reducer':None, 'scale':None}):
thename = name if name else 'Feature {}'.format(self.added_geometries)
# Check if layer exists
if thename in self.EELayers.keys():
if not replace:
print("Layer with name '{}' exists already, please choose another name".format(thename))
return
else:
self.removeLayer(thename)
params = get_geojson_tile(feature, thename, inspect)
layer = ipyleaflet.GeoJSON(data=params['geojson'],
name=thename,
popup=HTML(params['pop']))
self._add_EELayer(thename, {'type': 'Feature',
'object': feature,
'visParams': None,
'layer': layer})
return thename
def addGeometry(self, geometry, visParams=None, name=None, show=True,
opacity=None, replace=True,
inspect={'data':None, 'reducer':None, 'scale':None}):
thename = name if name else 'Geometry {}'.format(self.added_geometries)
# Check if layer exists
if thename in self.EELayers.keys():
if not replace:
print("Layer with name '{}' exists already, please choose another name".format(thename))
return
else:
self.removeLayer(thename)
params = get_geojson_tile(geometry, thename, inspect)
layer = ipyleaflet.GeoJSON(data=params['geojson'],
name=thename,
popup=HTML(params['pop']))
self._add_EELayer(thename, {'type': 'Geometry',
'object': geometry,
'visParams':None,
'layer': layer})
return thename
def addFeatureLayer(self, feature, visParams=None, name=None, show=True,
opacity=None, replace=True):
visParams = visParams if visParams else {}
if isinstance(feature, ee.Feature):
ty = 'Feature'
elif isinstance(feature, ee.FeatureCollection):
ty = 'FeatureCollection'
else:
print('The object is not a Feature or FeatureCollection')
return
fill_color = visParams.get('fill_color', None)
if 'outline_color' in visParams:
out_color = visParams['outline_color']
elif 'border_color' in visParams:
out_color = visParams['border_color']
else:
out_color = 'black'
outline = visParams.get('outline', 2)
proxy_layer = maptool.paint(feature, out_color, fill_color, outline)
thename = name if name else '{} {}'.format(ty, self.added_geometries)
img_params = {'bands':['vis-red', 'vis-green', 'vis-blue'],
'min': 0, 'max':255}
# Check if layer exists
if thename in self.EELayers.keys():
if not replace:
print("{} with name '{}' exists already, please choose another name".format(ty, thename))
return
else:
# Get URL, attribution & vis params
params = get_image_tile(proxy_layer, img_params, show, opacity)
# Remove Layer
self.removeLayer(thename)
else:
# Get URL, attribution & vis params
params = get_image_tile(proxy_layer, img_params, show, opacity)
layer = ipyleaflet.TileLayer(url=params['url'],
attribution=params['attribution'],
name=thename)
self._add_EELayer(thename, {'type': ty,
'object': feature,
'visParams': visParams,
'layer': layer})
return thename
def addMosaic(self, collection, visParams=None, name=None, show=False,
opacity=None, replace=True):
proxy = ee.ImageCollection(collection).sort('system:time_start')
mosaic = ee.Image(proxy.mosaic())
EELayer = self.addImage(mosaic, visParams, name, show, opacity, replace)
# modify EELayer
EELayer['type'] = 'ImageCollection'
EELayer['object'] = ee.ImageCollection(collection)
return EELayer
def addImageCollection(self, collection, visParams=None, nametags=['id'],
show=False, opacity=None):
size = collection.size().getInfo()
collist = collection.toList(size)
separation = ' '
for inx in range(size):
img = ee.Image(collist.get(inx))
name = ''
properties = img.propertyNames().getInfo()
for nametag in nametags:
if nametag == 'id':
newname = img.id().getInfo()
elif nametag == 'system_date':
newname = ee.Date(img.date()).format('YYYY-MM-dd').getInfo()
elif nametag in properties:
newname = "{}:{}{}".format(nametag, img.get(nametag).getInfo(), separation)
else:
newname = img.id().getInfo()
name += newname
self.addLayer(img, visParams, str(name), show, opacity)
def addLayer(self, eeObject, visParams=None, name=None, show=True,
opacity=None, replace=True, **kwargs):
visParams = visParams if visParams else {}
# CASE: ee.Image
if isinstance(eeObject, ee.Image):
image_name = name if name else 'Image {}'.format(self.added_images)
EELayer = self.addImage(eeObject, visParams=visParams,
name=image_name, show=show,
opacity=opacity, replace=replace)
self._add_EELayer(image_name, EELayer)
added_layer = EELayer
# CASE: ee.Geometry
elif isinstance(eeObject, ee.Geometry):
geom = eeObject if isinstance(eeObject, ee.Geometry) else eeObject.geometry()
kw = {'visParams':visParams, 'name':name, 'show':show, 'opacity':opacity}
if kwargs.get('inspect'): kw.setdefault('inspect', kwargs.get('inspect'))
added_layer = self.addGeometry(geom, replace=replace, **kw)
# CASE: ee.Feature & ee.FeatureCollection
elif isinstance(eeObject, ee.Feature) or isinstance(eeObject, ee.FeatureCollection):
feat = eeObject
kw = {'visParams':visParams, 'name':name, 'show':show, 'opacity':opacity}
added_layer = self.addFeatureLayer(feat, replace=replace, **kw)
# CASE: ee.ImageCollection
elif isinstance(eeObject, ee.ImageCollection):
'''
proxy = eeObject.sort('system:time_start')
mosaic = ee.Image(proxy.mosaic())
added_layer = self.addImage(mosaic, visParams=visParams, name=thename,
show=show, opacity=opacity, replace=replace)
'''
thename = name if name else 'ImageCollection {}'.format(self.added_images)
EELayer = self.addMosaic(eeObject, visParams, thename, show,
opacity, replace)
self._add_EELayer(thename, EELayer)
added_layer = EELayer
else:
added_layer = None
print("`addLayer` doesn't support adding {} objects to the map".format(type(eeObject)))
def removeLayer(self, name):
if name in self.EELayers.keys():
self._remove_EELayer(name)
else:
print('Layer {} is not present in the map'.format(name))
return
def getLayer(self, name):
if name in self.EELayers:
layer = self.EELayers[name]
return layer
else:
print('Layer {} is not present in the map'.format(name))
return
def getObject(self, name):
obj = self.getLayer(name)['object']
return obj
def getVisParams(self, name):
vis = self.getLayer(name)['visParams']
return vis
def centerObject(self, eeObject, zoom=None, method=1):
bounds = get_bounds(eeObject)
if bounds:
try:
inverse = inverse_coordinates(bounds)
centroid = ee.Geometry.Polygon(inverse)\
.centroid().getInfo()['coordinates']
except:
centroid = [0, 0]
self.center = inverse_coordinates(centroid)
if zoom:
self.zoom = zoom
else:
self.zoom = get_zoom(bounds, method)
def getCenter(self):
center = self.center
coords = inverse_coordinates(center)
return ee.Geometry.Point(coords)
def getBounds(self, asGeoJSON=True):
bounds = inverse_coordinates(self.bounds)
if asGeoJSON:
return ee.Geometry.Rectangle(bounds)
else:
return bounds
def _update_tab_children(self):
self.tab_widget.children = tuple(self.tab_children_dict.values())
for i, name in enumerate(self.tab_children_dict.keys()):
self.tab_widget.set_title(i, name)
def addTab(self, name, handler=None, widget=None):
wid = widget if widget else HTML('')
# tab_children = list(self.tab_widget.children)
tab_children = self.tab_children_dict.values()
# Get a list of tab's titles
titles = self.tab_children_dict.keys()
if name not in titles:
ntabs = len(tab_children)
self.tab_children_dict[name] = wid
if handler:
def proxy_handler(f):
def wrap(**kwargs):
kwargs['widget'] = self.tab_children_dict[name]
coords = kwargs['coordinates']
kwargs['coordinates'] = inverse_coordinates(coords)
kwargs['map'] = self
return f(**kwargs)
return wrap
self.handlers[name] = proxy_handler(handler)
else:
self.handlers[name] = handler
self._update_tab_children()
else:
print('Tab {} already exists, please choose another name'.format(name))
def handle_change_tab(self, change):
if change['name'] == 'selected_index':
old = change['old']
new = change['new']
old_name = self.tab_widget.get_title(old)
new_name = self.tab_widget.get_title(new)
for handl in self.handlers.values():
self.on_interaction(handl, True)
if new_name in self.handlers.keys():
handler = self.handlers[new_name]
if handler:
self.on_interaction(handler)
def handle_inspector(self, **change):
coords = change['coordinates']
event = change['type']
if event == 'click':
point = ee.Geometry.Point(coords)
thewidget = change['widget'].main
first = 'Point {} at {} zoom'.format(coords, self.zoom)
namelist = [first]
wids4acc = [HTML('')]
selected_layers = dict(zip(self.inspector_wid.selector.label,
self.inspector_wid.selector.value))
length = len(selected_layers.keys())
i = 1
for name, obj in selected_layers.items():
thewidget.children = [HTML('wait a second please..')]
thewidget.set_title(0, 'Loading {} of {}...'.format(i, length))
i += 1
if obj['type'] == 'Image':
try:
image = obj['object']
values = tools.image.get_value(image, point, scale=1,
side='client')
values = tools.dictionary.sort(values)
# Create the content
img_html = ''
for band, value in values.items():
img_html += '<b>{}</b>: {}</br>'.format(band,
value)
wid = HTML(img_html)
# append widget to list of widgets
wids4acc.append(wid)
namelist.append(name)
except Exception as e:
# wid = HTML(str(e).replace('<','{').replace('>','}'))
exc_type, exc_value, exc_traceback = sys.exc_info()
trace = traceback.format_exception(exc_type, exc_value,
exc_traceback)
wid = ErrorAccordion(e, trace)
wids4acc.append(wid)
namelist.append('ERROR at layer {}'.format(name))
# ImageCollection
if obj['type'] == 'ImageCollection':
# Get the values from all images
try:
collection = obj['object']
values = tools.image.get_values(collection, point, scale=1,
properties=['system:time_start'],
side='client')
# header
allbands = [val.keys() for bands, val in values.items()]
bands = []
for bandlist in allbands:
for band in bandlist:
if band not in bands:
bands.append(band)
header = ['image']+bands
# rows
rows = []
for imgid, val in values.items():
row = ['']*len(header)
row[0] = str(imgid)
for bandname, bandvalue in val.items():
pos = header.index(bandname) if bandname in header else None
if pos:
row[pos] = str(bandvalue)
rows.append(row)
# Create the content
html = maptool.create_html_table(header, rows)
wid = HTML(html)
# append widget to list of widgets
wids4acc.append(wid)
namelist.append(name)
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
trace = traceback.format_exception(exc_type, exc_value,
exc_traceback)
wid = ErrorAccordion(e, trace)
wids4acc.append(wid)
namelist.append('ERROR at layer {}'.format(name))
# Features
if obj['type'] == 'Feature':
try:
feat = obj['object']
feat_geom = feat.geometry()
if feat_geom.contains(point).getInfo():
info = feature_properties_output(feat)
wid = HTML(info)
# append widget to list of widgets
wids4acc.append(wid)
namelist.append(name)
except Exception as e:
# wid = HTML(str(e).replace('<','{').replace('>','}'))
exc_type, exc_value, exc_traceback = sys.exc_info()
trace = traceback.format_exception(exc_type, exc_value,
exc_traceback)
wid = ErrorAccordion(e, trace)
wids4acc.append(wid)
namelist.append('ERROR at layer {}'.format(name))
# FeatureCollections
if obj['type'] == 'FeatureCollection':
try:
fc = obj['object']
filtered = fc.filterBounds(point)
if filtered.size().getInfo() > 0:
feat = ee.Feature(filtered.first())
info = feature_properties_output(feat)
wid = HTML(info)
# append widget to list of widgets
wids4acc.append(wid)
namelist.append(name)
except Exception as e:
wid = HTML(str(e).replace('<','{').replace('>','}'))
wids4acc.append(wid)
namelist.append('ERROR at layer {}'.format(name))
# Set children and children's name of inspector widget
thewidget.children = wids4acc
for i, n in enumerate(namelist):
thewidget.set_title(i, n)
def handle_object_inspector(self, **change):
event = change['type']
thewidget = change['widget']
if event == 'click':
thewidget.children = [HTML('wait a second please..')]
thewidget.set_title(0, 'Loading...')
widgets = []
i = 0
for name, obj in self.EELayers.items():
the_object = obj['object']
try:
properties = the_object.getInfo()
wid = ipytools.create_accordion(properties)
wid.selected_index = None
except Exception as e:
wid = HTML(str(e))
widgets.append(wid)
thewidget.set_title(i, name)
i += 1
thewidget.children = widgets
def handle_draw(self, dc_widget, action, geo_json):
ty = geo_json['geometry']['type']
coords = geo_json['geometry']['coordinates']
geom = self.draw_types[ty](coords)
if action == 'created':
self.addGeometry(geom)
elif action == 'deleted':
for key, val in self.EELayers.items():
if geom == val:
self.removeLayer(key)
class CustomInspector(HBox):
def __init__(self, **kwargs):
desc = 'Select one or more layers'
super(CustomInspector, self).__init__(description=desc, **kwargs)
self.selector = SelectMultiple()
self.main = Accordion()
self.children = [self.selector, self.main]
class ErrorAccordion(Accordion):
def __init__(self, error, traceback, **kwargs):
super(ErrorAccordion, self).__init__(**kwargs)
self.error = '{}'.format(error).replace('<','{').replace('>','}')
newtraceback = ''
for trace in traceback[1:]:
newtraceback += '{}'.format(trace).replace('<','{').replace('>','}')
newtraceback += '</br>'
self.traceback = newtraceback
self.errorWid = HTML(self.error)
self.traceWid = HTML(self.traceback)
self.children = (self.errorWid, self.traceWid)
self.set_title(0, 'ERROR')
self.set_title(1, 'TRACEBACK')
class LayersWidget(ipytools.RealBox):
def __init__(self, map=None, **kwargs):
super(LayersWidget, self).__init__(**kwargs)
self.map = map
self.selector = Select()
self.EELayer = None
self.center = Button(description='Center')
self.center.on_click(self.on_click_center)
self.remove = Button(description='Remove')
self.remove.on_click(self.on_click_remove)
self.show_prop = Button(description='Show Object')
self.show_prop.on_click(self.on_click_show_object)
self.vis = Button(description='Visualization')
self.vis.on_click(self.on_click_vis)
self.move_up = Button(description='Move up')
self.move_up.on_click(self.on_up)
self.move_down = Button(description='Move down')
self.move_down.on_click(self.on_down)
self.group1 = VBox([self.center, self.remove,
self.vis, self.show_prop])
self.group2 = VBox([self.move_up, self.move_down])
self.items = [[self.selector, self.group1, self.group2]]
self.selector.observe(self.handle_selection, names='value')
def on_up(self, button=None):
if self.EELayer:
self.map.move(self.layer.name, 'up')
def on_down(self, button=None):
if self.EELayer:
self.map.move(self.layer.name, 'down')
def handle_selection(self, change):
new = change['new']
self.EELayer = new
self.items = [[self.selector, self.group1, self.group2]]
if new:
self.layer = new['layer']
self.obj = new['object']
self.ty = new['type']
self.vis = new['visParams']
def on_click_show_object(self, button=None):
if self.EELayer:
loading = HTML('Loading <b>{}</b>...'.format(self.layer.name))
widget = VBox([loading])
thread = threading.Thread(target=ipytools.create_async_output,
args=(self.obj, widget))
self.items = [[self.selector, self.group1],
[widget]]
thread.start()
def on_click_center(self, button=None):
if self.EELayer:
self.map.centerObject(self.obj)
def on_click_remove(self, button=None):
if self.EELayer:
self.map.removeLayer(self.layer.name)
def on_click_vis(self, button=None):
if self.EELayer:
selector = self.selector
group1 = self.group1
map = self.map
layer_name = self.layer.name
image = self.obj
try:
info = self.obj.getInfo()
except Exception as e:
self.items = [[self.selector, self.group1],
[HTML(str(e))]]
return
if self.ty == 'Image':
imbands = [band['id'] for band in bands]
bands_type = [band['data_type']['precision'] for band in bands]
bands_min = []
bands_max = []
# real range
if 'float' in bands_type:
try:
minmax = image.reduceRegion(ee.Reducer.minMax())
for band in bands:
bandname = band['id']
try:
tmin = minmax.get('{}_min'.format(bandname)).getInfo() # 0
tmax = minmax.get('{}_max'.format(bandname)).getInfo() # 1
except:
tmin = 0
tmax = 1
bands_min.append(tmin)
bands_max.append(tmax)
except:
for band in bands:
dt = band['data_type']
try:
tmin = dt['min']
tmax = dt['max']
except:
tmin = 0
tmax = 1
bands_min.append(tmin)
bands_max.append(tmax)
else:
for band in bands:
dt = band['data_type']
try:
tmin = dt['min']
tmax = dt['max']
except:
tmin = 0
tmax = 1
bands_min.append(tmin)
bands_max.append(tmax)
# dict of {band: min} and {band:max}
min_dict = dict(zip(imbands, bands_min))
max_dict = dict(zip(imbands, bands_max))
######
# Layer data
layer_data = self.map.EELayers[layer_name]
visParams = layer_data['visParams']
# vis bands
visBands = visParams['bands'].split(',')
# vis min
visMin = visParams['min']
if isinstance(visMin, str):
visMin = [float(vis) for vis in visMin.split(',')]
else:
visMin = [visMin]
# vis max
visMax = visParams['max']
if isinstance(visMax, str):
visMax = [float(vis) for vis in visMax.split(',')]
else:
visMax = [visMax]
# dropdown handler
def handle_dropdown(band_slider):
def wrap(change):
new = change['new']
band_slider.min = min_dict[new]
band_slider.max = max_dict[new]
return wrap
def slider_1band(float=False, name='band'):
# get params to set in slider and dropdown
vismin = visMin[0]
vismax = visMax[0]
band = visBands[0]
drop = Dropdown(description=name, options=imbands, value=band)
if float:
slider = ipytools.FloatBandWidget(min=min_dict[drop.value],
max=max_dict[drop.value])
else:
slider = FloatRangeSlider(min=min_dict[drop.value],
max=max_dict[drop.value],
value=[vismin, vismax],
step=0.01)
# set handler
drop.observe(handle_dropdown(slider), names=['value'])
# widget for band selector + slider
band_slider = HBox([drop, slider])
# return VBox([band_slider], layout=Layout(width='500px'))
return band_slider
def slider_3bands(float=False):
# get params to set in slider and dropdown
if len(visMin) == 1:
visminR = visminG = visminB = visMin[0]
else:
visminR = visMin[0]
visminG = visMin[1]
visminB = visMin[2]
if len(visMax) == 1:
vismaxR = vismaxG = vismaxB = visMax[0]
else:
vismaxR = visMax[0]
vismaxG = visMax[1]
vismaxB = visMax[2]
if len(visBands) == 1:
visbandR = visbandG = visbandB = visBands[0]
else:
visbandR = visBands[0]
visbandG = visBands[1]
visbandB = visBands[2]
drop = Dropdown(description='red', options=imbands, value=visbandR)
drop2 = Dropdown(description='green', options=imbands, value=visbandG)
drop3 = Dropdown(description='blue', options=imbands, value=visbandB)
slider = FloatRangeSlider(min=min_dict[drop.value],
max=max_dict[drop.value],
value=[visminR, vismaxR],
step=0.01)
slider2 = FloatRangeSlider(min=min_dict[drop2.value],
max=max_dict[drop2.value],
value=[visminG, vismaxG],
step=0.01)
slider3 = FloatRangeSlider(min=min_dict[drop3.value],
max=max_dict[drop3.value],
value=[visminB, vismaxB],
step=0.01)
# set handlers
drop.observe(handle_dropdown(slider), names=['value'])
drop2.observe(handle_dropdown(slider2), names=['value'])
drop3.observe(handle_dropdown(slider3), names=['value'])
# widget for band selector + slider
band_slider = HBox([drop, slider])
band_slider2 = HBox([drop2, slider2])
band_slider3 = HBox([drop3, slider3])
return VBox([band_slider, band_slider2, band_slider3],
layout=Layout(width='700px'))
# Create widget for 1 or 3 bands
bands = RadioButtons(options=['1 band', '3 bands'],
layout=Layout(width='80px'))
# Create widget for band, min and max selection
selection = slider_1band()
# Apply button
apply = Button(description='Apply', layout=Layout(width='100px'))
# new row
new_row = [bands, selection, apply]
# update row of widgets
def update_row_items(new_row):
self.items = [[selector, group1],
new_row]
# handler for radio button (1 band / 3 bands)
def handle_radio_button(change):
new = change['new']
if new == '1 band':
# create widget
selection = slider_1band() # TODO
# update row of widgets
update_row_items([bands, selection, apply])
else:
red = slider_1band(name='red') # TODO
green = slider_1band(name='green')
blue = slider_1band(name='blue')
selection = VBox([red, green, blue])
# selection = slider_3bands()
update_row_items([bands, selection, apply])
def handle_apply(button):
radio = self.items[1][0].value # radio button
vbox = self.items[1][1]
print('vbox', vbox)
if radio == '1 band': # 1 band
hbox_band = vbox.children[0].children
band = hbox_band[0].value
min = hbox_band[1].value[0]
max = hbox_band[1].value[1]
map.addLayer(image, {'bands':[band], 'min':min, 'max':max},
layer_name)
else: # 3 bands
hbox_bandR = vbox.children[0].children
hbox_bandG = vbox.children[1].children
hbox_bandB = vbox.children[2].children
bandR = hbox_bandR[0].value
bandG = hbox_bandG[0].value
bandB = hbox_bandB[0].value
minR = hbox_bandR[1].value[0]
minG = hbox_bandG[1].value[0]
minB = hbox_bandB[1].value[0]
maxR = hbox_bandR[1].value[1]
maxG = hbox_bandG[1].value[1]
maxB = hbox_bandB[1].value[1]
map.addLayer(image, {'bands':[bandR, bandG, bandB],
'min':[float(minR), float(minG), float(minB)],
'max':[float(maxR), float(maxG), float(maxB)]},
layer_name)
bands.observe(handle_radio_button, names='value')
update_row_items(new_row)
apply.on_click(handle_apply) | true | true |
f7321a82ba7c73b42aae51ba6c7f94a79eb940ad | 7,521 | py | Python | src/scanoss/cyclonedx.py | tardyp/scanoss.py | 88ad27e36dd00f420fed08a240f5bbd62169778c | [
"MIT"
] | 8 | 2021-08-19T12:35:58.000Z | 2022-03-23T02:44:36.000Z | src/scanoss/cyclonedx.py | tardyp/scanoss.py | 88ad27e36dd00f420fed08a240f5bbd62169778c | [
"MIT"
] | 4 | 2021-10-31T10:21:11.000Z | 2022-03-24T15:24:54.000Z | src/scanoss/cyclonedx.py | tardyp/scanoss.py | 88ad27e36dd00f420fed08a240f5bbd62169778c | [
"MIT"
] | 1 | 2021-08-19T12:36:02.000Z | 2021-08-19T12:36:02.000Z | """
SPDX-License-Identifier: MIT
Copyright (c) 2021, SCANOSS
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import json
import os.path
import sys
import hashlib
import time
class CycloneDx:
"""
CycloneDX management class
Handle all interaction with CycloneDX formatting
"""
def __init__(self, debug: bool = False, output_file: str = None):
"""
Initialise the CycloneDX class
"""
self.output_file = output_file
self.debug = debug
@staticmethod
def print_stderr(*args, **kwargs):
"""
Print the given message to STDERR
"""
print(*args, file=sys.stderr, **kwargs)
def print_msg(self, *args, **kwargs):
"""
Print message if quite mode is not enabled
"""
if not self.quiet:
self.print_stderr(*args, **kwargs)
def print_debug(self, *args, **kwargs):
"""
Print debug message if enabled
"""
if self.debug:
self.print_stderr(*args, **kwargs)
def parse(self, data: json):
"""
Parse the given input (raw/plain) JSON string and return CycloneDX summary
:param data: json - JSON object
:return: CycloneDX dictionary
"""
if not data:
self.print_stderr('ERROR: No JSON data provided to parse.')
return None
self.print_debug(f'Processing raw results into CycloneDX format...')
cdx = {}
for f in data:
file_details = data.get(f)
# print(f'File: {f}: {file_details}')
for d in file_details:
id_details = d.get("id")
if not id_details or id_details == 'none':
# print(f'No ID for {f}')
continue
purl = None
purls = d.get('purl')
if not purls:
self.print_stderr(f'Purl block missing for {f}: {file_details}')
continue
for p in purls:
self.print_debug(f'Purl: {p}')
purl = p
break
if not purl:
self.print_stderr(f'Warning: No PURL found for {f}: {file_details}')
continue
if cdx.get(purl):
self.print_debug(f'Component {purl} already stored: {cdx.get(purl)}')
continue
fd = {}
# print(f'Vendor: {d.get("vendor")}, Comp: {d.get("component")}, Ver: {d.get("version")},'
# f' Latest: {d.get("latest")} ID: {d.get("id")}')
for field in ['id', 'vendor', 'component', 'version', 'latest']:
fd[field] = d.get(field)
licenses = d.get('licenses')
fdl = []
for lic in licenses:
# print(f'License: {lic.get("name")}')
fdl.append({'id':lic.get("name")})
fd['licenses'] = fdl
cdx[p] = fd
# print(f'License summary: {cdx}')
return cdx
def produce_from_file(self, json_file: str, output_file: str = None) -> bool:
"""
Parse plain/raw input JSON file and produce CycloneDX output
:param json_file:
:param output_file:
:return: True if successful, False otherwise
"""
if not json_file:
self.print_stderr('ERROR: No JSON file provided to parse.')
return False
if not os.path.isfile(json_file):
self.print_stderr(f'ERROR: JSON file does not exist or is not a file: {json_file}')
return False
success = True
with open(json_file, 'r') as f:
success = self.produce_from_str(f.read(), output_file)
return success
def produce_from_json(self, data: json, output_file: str = None) -> bool:
"""
Produce the CycloneDX output from the input JSON object
:param data: JSON object
:param output_file: Output file (optional)
:return: True if successful, False otherwise
"""
cdx = self.parse(data)
if not cdx:
self.print_stderr('ERROR: No CycloneDX data returned for the JSON string provided.')
return False
md5hex = hashlib.md5(f'{time.time()}'.encode('utf-8')).hexdigest()
data = {}
data['bomFormat'] = 'CycloneDX'
data['specVersion'] = '1.2'
data['serialNumber'] = f'scanoss:SCANOSS-PY - SCANOSS CLI-{md5hex}'
data['version'] = '1'
data['components'] = []
for purl in cdx:
comp = cdx.get(purl)
lic = []
licenses = comp.get('licenses')
if licenses:
for l in licenses:
lic.append({'license': { 'id': l.get('id')}})
m_type = 'Snippet' if comp.get('id') == 'snippet' else 'Library'
data['components'].append({
'type': m_type,
'name': comp.get('component'),
'publisher': comp.get('vendor'),
'version': comp.get('version'),
'purl': purl,
'licenses': lic
# 'licenses': [{
# 'license': {
# 'id': comp.get('license')
# }
# }]
})
# End for loop
file = sys.stdout
if not output_file and self.output_file:
output_file = self.output_file
if output_file:
file = open(output_file, 'w')
print(json.dumps(data, indent=2), file=file)
if output_file:
file.close()
return True
def produce_from_str(self, json_str: str, output_file: str = None) -> bool:
"""
Produce CycloneDX output from input JSON string
:param json_str: input JSON string
:param output_file: Output file (optional)
:return: True if successful, False otherwise
"""
if not json_str:
self.print_stderr('ERROR: No JSON string provided to parse.')
return False
data = None
try:
data = json.loads(json_str)
except Exception as e:
self.print_stderr(f'ERROR: Problem parsing input JSON: {e}')
return False
else:
return self.produce_from_json(data, output_file)
return False
#
# End of CycloneDX Class
# | 36.867647 | 106 | 0.550725 | import json
import os.path
import sys
import hashlib
import time
class CycloneDx:
def __init__(self, debug: bool = False, output_file: str = None):
self.output_file = output_file
self.debug = debug
@staticmethod
def print_stderr(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def print_msg(self, *args, **kwargs):
if not self.quiet:
self.print_stderr(*args, **kwargs)
def print_debug(self, *args, **kwargs):
if self.debug:
self.print_stderr(*args, **kwargs)
def parse(self, data: json):
if not data:
self.print_stderr('ERROR: No JSON data provided to parse.')
return None
self.print_debug(f'Processing raw results into CycloneDX format...')
cdx = {}
for f in data:
file_details = data.get(f)
for d in file_details:
id_details = d.get("id")
if not id_details or id_details == 'none':
continue
purl = None
purls = d.get('purl')
if not purls:
self.print_stderr(f'Purl block missing for {f}: {file_details}')
continue
for p in purls:
self.print_debug(f'Purl: {p}')
purl = p
break
if not purl:
self.print_stderr(f'Warning: No PURL found for {f}: {file_details}')
continue
if cdx.get(purl):
self.print_debug(f'Component {purl} already stored: {cdx.get(purl)}')
continue
fd = {}
for field in ['id', 'vendor', 'component', 'version', 'latest']:
fd[field] = d.get(field)
licenses = d.get('licenses')
fdl = []
for lic in licenses:
fdl.append({'id':lic.get("name")})
fd['licenses'] = fdl
cdx[p] = fd
return cdx
def produce_from_file(self, json_file: str, output_file: str = None) -> bool:
if not json_file:
self.print_stderr('ERROR: No JSON file provided to parse.')
return False
if not os.path.isfile(json_file):
self.print_stderr(f'ERROR: JSON file does not exist or is not a file: {json_file}')
return False
success = True
with open(json_file, 'r') as f:
success = self.produce_from_str(f.read(), output_file)
return success
def produce_from_json(self, data: json, output_file: str = None) -> bool:
cdx = self.parse(data)
if not cdx:
self.print_stderr('ERROR: No CycloneDX data returned for the JSON string provided.')
return False
md5hex = hashlib.md5(f'{time.time()}'.encode('utf-8')).hexdigest()
data = {}
data['bomFormat'] = 'CycloneDX'
data['specVersion'] = '1.2'
data['serialNumber'] = f'scanoss:SCANOSS-PY - SCANOSS CLI-{md5hex}'
data['version'] = '1'
data['components'] = []
for purl in cdx:
comp = cdx.get(purl)
lic = []
licenses = comp.get('licenses')
if licenses:
for l in licenses:
lic.append({'license': { 'id': l.get('id')}})
m_type = 'Snippet' if comp.get('id') == 'snippet' else 'Library'
data['components'].append({
'type': m_type,
'name': comp.get('component'),
'publisher': comp.get('vendor'),
'version': comp.get('version'),
'purl': purl,
'licenses': lic
})
file = sys.stdout
if not output_file and self.output_file:
output_file = self.output_file
if output_file:
file = open(output_file, 'w')
print(json.dumps(data, indent=2), file=file)
if output_file:
file.close()
return True
def produce_from_str(self, json_str: str, output_file: str = None) -> bool:
if not json_str:
self.print_stderr('ERROR: No JSON string provided to parse.')
return False
data = None
try:
data = json.loads(json_str)
except Exception as e:
self.print_stderr(f'ERROR: Problem parsing input JSON: {e}')
return False
else:
return self.produce_from_json(data, output_file)
return False
| true | true |
f7321afb7e4193fbb1ac06c566dac390abb97b12 | 8,689 | py | Python | keras/applications/vgg16.py | asanoboy/keras | e467ee5a1a00afdfa1cb7f5508fdbfd2c5eab1e5 | [
"MIT"
] | 7 | 2017-06-02T19:07:36.000Z | 2021-07-23T21:01:44.000Z | keras/applications/vgg16.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | keras/applications/vgg16.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 4 | 2017-05-27T02:37:54.000Z | 2017-08-05T16:01:31.000Z | # -*- coding: utf-8 -*-
"""VGG16 model for Keras.
# Reference
- [Very Deep Convolutional Networks for Large-Scale Image Recognition](https://arxiv.org/abs/1409.1556)
"""
from __future__ import print_function
from __future__ import absolute_import
import os
import warnings
from ..models import Model
from ..layers import Flatten
from ..layers import Dense
from ..layers import Input
from ..layers import Conv2D
from ..layers import MaxPooling2D
from ..layers import GlobalAveragePooling2D
from ..layers import GlobalMaxPooling2D
from ..engine.topology import get_source_inputs
from ..utils import layer_utils
from ..utils.data_utils import get_file
from .. import backend as K
from .imagenet_utils import decode_predictions
from .imagenet_utils import preprocess_input
from .imagenet_utils import _obtain_input_shape
WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels.h5'
WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5'
def VGG16(include_top=True, weights='imagenet',
input_tensor=None, input_shape=None,
pooling=None,
classes=1000):
"""Instantiates the VGG16 architecture.
Optionally loads weights pre-trained
on ImageNet. Note that when using TensorFlow,
for best performance you should set
`image_data_format='channels_last'` in your Keras config
at ~/.keras/keras.json.
The model and the weights are compatible with both
TensorFlow and Theano. The data format
convention used by the model is the one
specified in your Keras config file.
# Arguments
include_top: whether to include the 3 fully-connected
layers at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `channels_last` data format)
or `(3, 224, 224)` (with `channels_first` data format).
It should have exactly 3 input channels,
and width and height should be no smaller than 48.
E.g. `(200, 200, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
"""
if not (weights in {'imagenet', None} or os.path.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as imagenet with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = _obtain_input_shape(input_shape,
default_size=224,
min_size=48,
data_format=K.image_data_format(),
require_flatten=include_top,
weights=weights)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
# Block 1
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(img_input)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
# Block 2
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
# Block 3
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
# Block 4
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
# Block 5
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
if include_top:
# Classification block
x = Flatten(name='flatten')(x)
x = Dense(4096, activation='relu', name='fc1')(x)
x = Dense(4096, activation='relu', name='fc2')(x)
x = Dense(classes, activation='softmax', name='predictions')(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs, x, name='vgg16')
# load weights
if weights == 'imagenet':
if include_top:
weights_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels.h5',
WEIGHTS_PATH,
cache_subdir='models',
file_hash='64373286793e3c8b2b4e3219cbf3544b')
else:
weights_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5',
WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
file_hash='6d6bbae143d832006294945121d1f1fc')
model.load_weights(weights_path)
if K.backend() == 'theano':
layer_utils.convert_all_kernels_in_model(model)
if K.image_data_format() == 'channels_first':
if include_top:
maxpool = model.get_layer(name='block5_pool')
shape = maxpool.output_shape[1:]
dense = model.get_layer(name='fc1')
layer_utils.convert_dense_weights_data_format(dense, shape, 'channels_first')
if K.backend() == 'tensorflow':
warnings.warn('You are using the TensorFlow backend, yet you '
'are using the Theano '
'image data format convention '
'(`image_data_format="channels_first"`). '
'For best performance, set '
'`image_data_format="channels_last"` in '
'your Keras config '
'at ~/.keras/keras.json.')
elif weights is not None:
model.load_weights(weights)
return model
| 43.663317 | 145 | 0.605478 |
from __future__ import print_function
from __future__ import absolute_import
import os
import warnings
from ..models import Model
from ..layers import Flatten
from ..layers import Dense
from ..layers import Input
from ..layers import Conv2D
from ..layers import MaxPooling2D
from ..layers import GlobalAveragePooling2D
from ..layers import GlobalMaxPooling2D
from ..engine.topology import get_source_inputs
from ..utils import layer_utils
from ..utils.data_utils import get_file
from .. import backend as K
from .imagenet_utils import decode_predictions
from .imagenet_utils import preprocess_input
from .imagenet_utils import _obtain_input_shape
WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels.h5'
WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5'
def VGG16(include_top=True, weights='imagenet',
input_tensor=None, input_shape=None,
pooling=None,
classes=1000):
if not (weights in {'imagenet', None} or os.path.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as imagenet with `include_top`'
' as true, `classes` should be 1000')
input_shape = _obtain_input_shape(input_shape,
default_size=224,
min_size=48,
data_format=K.image_data_format(),
require_flatten=include_top,
weights=weights)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(img_input)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
if include_top:
x = Flatten(name='flatten')(x)
x = Dense(4096, activation='relu', name='fc1')(x)
x = Dense(4096, activation='relu', name='fc2')(x)
x = Dense(classes, activation='softmax', name='predictions')(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = GlobalMaxPooling2D()(x)
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
model = Model(inputs, x, name='vgg16')
if weights == 'imagenet':
if include_top:
weights_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels.h5',
WEIGHTS_PATH,
cache_subdir='models',
file_hash='64373286793e3c8b2b4e3219cbf3544b')
else:
weights_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5',
WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
file_hash='6d6bbae143d832006294945121d1f1fc')
model.load_weights(weights_path)
if K.backend() == 'theano':
layer_utils.convert_all_kernels_in_model(model)
if K.image_data_format() == 'channels_first':
if include_top:
maxpool = model.get_layer(name='block5_pool')
shape = maxpool.output_shape[1:]
dense = model.get_layer(name='fc1')
layer_utils.convert_dense_weights_data_format(dense, shape, 'channels_first')
if K.backend() == 'tensorflow':
warnings.warn('You are using the TensorFlow backend, yet you '
'are using the Theano '
'image data format convention '
'(`image_data_format="channels_first"`). '
'For best performance, set '
'`image_data_format="channels_last"` in '
'your Keras config '
'at ~/.keras/keras.json.')
elif weights is not None:
model.load_weights(weights)
return model
| true | true |
f7321b2a7c56ae6bf342d7170057105d25b67432 | 166 | py | Python | demo/demo1.py | sharangdhar/Testit | e0be60933144beb20a728df807e7c60f77917a2c | [
"MIT"
] | null | null | null | demo/demo1.py | sharangdhar/Testit | e0be60933144beb20a728df807e7c60f77917a2c | [
"MIT"
] | null | null | null | demo/demo1.py | sharangdhar/Testit | e0be60933144beb20a728df807e7c60f77917a2c | [
"MIT"
] | null | null | null | # true if n is prime
def isPrime(n):
if n <= 1 or int(n) != n:
return False
for x in range(2, int(n*.5)+1):
if n%x == 0:
return False
return True
| 18.444444 | 33 | 0.548193 |
def isPrime(n):
if n <= 1 or int(n) != n:
return False
for x in range(2, int(n*.5)+1):
if n%x == 0:
return False
return True
| true | true |
f7321b5ba3eb4d439f4753d430002c7a1fb1a908 | 216 | py | Python | Codechef/helping_chef.py | Ritz2626/Hacktoberfest-2 | a2c48a23c62532227b0b4cd88783dcddaca98519 | [
"MIT"
] | null | null | null | Codechef/helping_chef.py | Ritz2626/Hacktoberfest-2 | a2c48a23c62532227b0b4cd88783dcddaca98519 | [
"MIT"
] | null | null | null | Codechef/helping_chef.py | Ritz2626/Hacktoberfest-2 | a2c48a23c62532227b0b4cd88783dcddaca98519 | [
"MIT"
] | null | null | null | try:
t=int(input(''))
while t>0:
n=int(input(''))
if n<10:
print('What an obedient servant you are!')
else:
print('-1')
t=t-1
except Exception as e:
pass
| 18 | 51 | 0.462963 | try:
t=int(input(''))
while t>0:
n=int(input(''))
if n<10:
print('What an obedient servant you are!')
else:
print('-1')
t=t-1
except Exception as e:
pass
| true | true |
f7321c7afea8a5dc0c9d73994a9d89d4dc165398 | 2,158 | py | Python | phonemeconversion.py | AustinCasteel/pocketsphinx_kws | ae0067e9a728e7e48a5153b9272cb8c52bcb20e6 | [
"MIT"
] | 194 | 2018-07-28T14:54:35.000Z | 2022-03-18T12:40:10.000Z | plugins/stt/pocketsphinx-stt/phonemeconversion.py | HoltTechnologyCorporation/Naomi | 16d5f6ba03ea96c3fa13ed4e2c1f082041d9de31 | [
"MIT"
] | 239 | 2018-07-13T16:15:25.000Z | 2022-03-31T17:55:01.000Z | plugins/stt/pocketsphinx-stt/phonemeconversion.py | Longshotpro2/Naomi | 9330c63fe24606dc45194d297c665f37a4ec10f7 | [
"MIT"
] | 64 | 2018-07-26T02:18:33.000Z | 2022-01-07T06:53:01.000Z | # -*- coding: utf-8 -*-
import logging
XSAMPA_TO_ARPABET_MAPPING = {
# stop
'p': 'P',
'b': 'B',
't': 'T',
'd': 'D',
'k': 'K',
'g': 'G',
'?': 'Q',
# 2 consonants
'pf': 'PF',
'ts': 'TS',
'tS': 'CH',
'dZ': 'JH',
# fricative
'f': 'F',
'v': 'V',
'T': 'TH',
'D': 'DH',
's': 'S',
'z': 'Z',
'S': 'SH',
'Z': 'ZH',
'C': 'CC',
'j': 'Y',
'x': 'X',
'R': 'RR',
'h': 'HH',
'H': 'HHH',
# nasal
'm': 'M',
'n': 'N',
'N': 'NG',
# liquid
'l': 'L',
'r': 'R',
# glide
'w': 'W',
# front vowels
'i': 'IY',
'i:': 'IIH',
'I': 'IH',
'y': 'UE',
'y:': 'YYH',
'Y': 'YY',
'e': 'EE',
'e:': 'EEH',
'2': 'OH',
'2:': 'OHH',
'9': 'OE',
'E': 'EH',
'E:': 'EHH',
'{': 'AE',
'{:': 'AEH',
'a': 'AH',
'a:': 'AAH',
'3': 'ER',
'3:': 'ERH',
# central vowels
'V': 'VV',
'@': 'AX',
'6': 'EX',
# back vowels
'u': 'UH',
'u:': 'UUH',
'U': 'UU',
'o': 'AO',
'o:': 'OOH',
'O': 'OO',
'O:': 'OOOH',
'A': 'AA',
'A:': 'AAAH',
'Q': 'QQ',
# diphtongs vowels
'aI': 'AY',
'OI': 'OI',
'aU': 'AW',
'OY': 'OY',
# Fuzzy stuff
'c': 'K',
'q': 'K'
}
MAX_PHONE_LENGTH = max([len(x) for x in XSAMPA_TO_ARPABET_MAPPING.keys()])
def xsampa_to_arpabet(xsampa_string, sep=' '):
logger = logging.getLogger(__name__)
s = xsampa_string.replace('-', '').replace('\'', '').replace(' ', '')
result = []
i = 0
while i < len(s):
num_remaining_chars = len(s) - i
phone_length = (MAX_PHONE_LENGTH
if MAX_PHONE_LENGTH > num_remaining_chars
else num_remaining_chars)
for j in range(phone_length, 0, -1):
phone = s[i:i + j]
if phone in XSAMPA_TO_ARPABET_MAPPING:
result.append(XSAMPA_TO_ARPABET_MAPPING[phone])
i += j
break
else:
logger.warning("Phone not found: '%s'", s[i])
i += 1
return sep.join(result)
| 17.544715 | 74 | 0.376738 |
import logging
XSAMPA_TO_ARPABET_MAPPING = {
'p': 'P',
'b': 'B',
't': 'T',
'd': 'D',
'k': 'K',
'g': 'G',
'?': 'Q',
'pf': 'PF',
'ts': 'TS',
'tS': 'CH',
'dZ': 'JH',
'f': 'F',
'v': 'V',
'T': 'TH',
'D': 'DH',
's': 'S',
'z': 'Z',
'S': 'SH',
'Z': 'ZH',
'C': 'CC',
'j': 'Y',
'x': 'X',
'R': 'RR',
'h': 'HH',
'H': 'HHH',
'm': 'M',
'n': 'N',
'N': 'NG',
'l': 'L',
'r': 'R',
'w': 'W',
'i': 'IY',
'i:': 'IIH',
'I': 'IH',
'y': 'UE',
'y:': 'YYH',
'Y': 'YY',
'e': 'EE',
'e:': 'EEH',
'2': 'OH',
'2:': 'OHH',
'9': 'OE',
'E': 'EH',
'E:': 'EHH',
'{': 'AE',
'{:': 'AEH',
'a': 'AH',
'a:': 'AAH',
'3': 'ER',
'3:': 'ERH',
'V': 'VV',
'@': 'AX',
'6': 'EX',
'u': 'UH',
'u:': 'UUH',
'U': 'UU',
'o': 'AO',
'o:': 'OOH',
'O': 'OO',
'O:': 'OOOH',
'A': 'AA',
'A:': 'AAAH',
'Q': 'QQ',
'aI': 'AY',
'OI': 'OI',
'aU': 'AW',
'OY': 'OY',
'c': 'K',
'q': 'K'
}
MAX_PHONE_LENGTH = max([len(x) for x in XSAMPA_TO_ARPABET_MAPPING.keys()])
def xsampa_to_arpabet(xsampa_string, sep=' '):
logger = logging.getLogger(__name__)
s = xsampa_string.replace('-', '').replace('\'', '').replace(' ', '')
result = []
i = 0
while i < len(s):
num_remaining_chars = len(s) - i
phone_length = (MAX_PHONE_LENGTH
if MAX_PHONE_LENGTH > num_remaining_chars
else num_remaining_chars)
for j in range(phone_length, 0, -1):
phone = s[i:i + j]
if phone in XSAMPA_TO_ARPABET_MAPPING:
result.append(XSAMPA_TO_ARPABET_MAPPING[phone])
i += j
break
else:
logger.warning("Phone not found: '%s'", s[i])
i += 1
return sep.join(result)
| true | true |
f7321d390042f6c5d74c536e7d5991129b32374f | 19,784 | py | Python | kplr/mast.py | danielrios12/kplr---acesso-a-dados-do-kepler | 4c6a823ad6a88ccd2d5cf8d9eed912a1e57489a2 | [
"MIT"
] | 35 | 2015-01-21T22:38:12.000Z | 2020-08-05T21:15:19.000Z | kplr/mast.py | danielrios12/kplr---acesso-a-dados-do-kepler | 4c6a823ad6a88ccd2d5cf8d9eed912a1e57489a2 | [
"MIT"
] | 12 | 2015-03-17T18:54:15.000Z | 2021-08-06T18:19:13.000Z | kplr/mast.py | danielrios12/kplr---acesso-a-dados-do-kepler | 4c6a823ad6a88ccd2d5cf8d9eed912a1e57489a2 | [
"MIT"
] | 17 | 2015-02-11T19:49:00.000Z | 2019-10-15T18:06:28.000Z | # -*- coding: utf-8 -*-
"""
Adapters for the field names/types returned by the MAST API.
"""
from __future__ import (division, print_function, absolute_import,
unicode_literals)
__all__ = ["koi_adapter", "planet_adapter", "star_adapter", "dataset_adapter",
"epic_adapter"]
import logging
import six
try:
unicode
except NameError:
unicode = str
class Adapter(object):
"""
An :class:`Adapter` is a callable that maps a dictionary to another
dictionary with different keys and specified data types. Missing/invalid
values will be mapped to ``None``.
:param parameters:
A dictionary of mappers. The keys should be the keys that will be in
the input dictionary and the values should be 2-tuples with the output
key and the callable type converter.
"""
def __init__(self, parameters):
self._parameters = parameters
# Add some general purpose parameters.
self._parameters["Ang Sep (')"] = ("angular_separation", float)
def __call__(self, row):
row = dict(row)
final = {}
for longname, (shortname, conv) in self._parameters.items():
try:
final[shortname] = conv(row.pop(longname, None))
except (ValueError, TypeError):
final[shortname] = None
for k in row:
logging.warn("Unrecognized parameter: '{0}'".format(k))
return final
koi_adapter = Adapter({
"Kepler ID": ("kepid", int),
"KOI Name": ("kepoi_name", six.text_type),
"KOI Number": ("kepoi", six.text_type),
"Kepler Disposition": ("koi_pdisposition", six.text_type),
"NExScI Disposition": ("koi_disposition", six.text_type),
"RA (J2000)": ("degree_ra", float),
"Dec (J2000)": ("degree_dec", float),
"Time of Transit Epoch": ("koi_time0bk", float),
"Time err1": ("koi_time0bk_err1", float),
"Time_err2": ("koi_time0bk_err2", float),
"Period": ("koi_period", float),
"Period err1": ("koi_period_err1", float),
"Period err2": ("koi_period_err2", float),
"Transit Depth": ("koi_depth", float),
"Depth err1": ("koi_depth_err1", float),
"Depth err2": ("koi_depth_err2", float),
"Duration": ("koi_duration", float),
"Duration err1": ("koi_duration_err1", float),
"Duration err2": ("koi_duration_err2", float),
"Ingress Duration": ("koi_ingress", float),
"Ingress err1": ("koi_ingress_err1", float),
"Ingress err2": ("koi_ingress_err2", float),
"Impact Parameter": ("koi_impact", float),
"Impact Parameter err1": ("koi_impact_err1", float),
"Impact Parameter err2": ("koi_impact_err2", float),
"Inclination": ("koi_incl", float),
"Inclination err1": ("koi_incl_err1", float),
"Inclination err2": ("koi_incl_err2", float),
"Semi-major Axis": ("koi_sma", float),
"Semi-major Axus err1": ("koi_sma_err1", float),
"Semi-major Axis err2": ("koi_sma_err2", float),
"Eccentricity": ("koi_eccen", float),
"Eccentricity err1": ("koi_eccen_err1", float),
"Eccentricity err2": ("koi_eccen_err2", float),
"Long of Periastron": ("koi_longp", float),
"Long err1": ("koi_longp_err1", float),
"Long err2": ("koi_longp_err2", float),
"r/R": ("koi_ror", float),
"r/R err1": ("koi_ror_err1", float),
"r/R err2": ("koi_ror_err2", float),
"a/R": ("koi_dor", float),
"a/R err1": ("koi_dor_err1", float),
"a/R err2": ("koi_dor_err2", float),
"Planet Radius": ("koi_prad", float),
"Planet Radius err1": ("koi_prad_err1", float),
"Planet Radius err2": ("koi_prad_err2", float),
"Teq": ("koi_teq", int),
"Teq err1": ("koi_teq_err1", int),
"Teq err2": ("koi_teq_err2", int),
"Teff": ("koi_steff", int),
"Teff err1": ("koi_steff_err1", int),
"Teff err2": ("koi_steff_err2", int),
"log(g)": ("koi_slogg", float),
"log(g) err1": ("koi_slogg_err1", float),
"log(g) err2": ("koi_slogg_err2", float),
"Metallicity": ("koi_smet", float),
"Metallicity err1": ("koi_smet_err1", float),
"Metallicity err2": ("koi_smet_err2", float),
"Stellar Radius": ("koi_srad", float),
"Stellar Radius err1": ("koi_srad_err1", float),
"Stellar Radius err2": ("koi_srad_err2", float),
"Stellar Mass": ("koi_smass", float),
"Stellar Mass err2": ("koi_smass_err2", float),
"Stellar Mass err1": ("koi_smass_err1", float),
"Age": ("koi_sage", float),
"Age err1": ("koi_sage_err1", float),
"Age err2": ("koi_sage_err2", float),
"Provenance": ("koi_sparprov", six.text_type),
"Quarters": ("koi_quarters", six.text_type),
"Limb Darkening Model": ("koi_limbdark_mod", six.text_type),
"Limb Darkening Coeff1": ("koi_ldm_coeff1", float),
"Limb Darkening Coeff2": ("koi_ldm_coeff2", float),
"Limb Darkening Coeff3": ("koi_ldm_coeff3", float),
"Limb Darkening Coeff4": ("koi_ldm_coeff4", float),
"Transit Number": ("koi_num_transits", int),
"Max single event sigma": ("koi_max_sngle_ev", float),
"Max Multievent sigma": ("koi_max_mult_ev", float),
"KOI count": ("koi_count", int),
"Binary Discrimination": ("koi_bin_oedp_sig", float),
"False Positive Bkgnd ID": ("koi_fp_bkgid", six.text_type),
"J-band diff": ("koi_fp_djmag", six.text_type),
"Comments": ("koi_comment", six.text_type),
"Transit Model": ("koi_trans_mod", six.text_type),
"Transit Model SNR": ("koi_model_snr", float),
"Transit Model DOF": ("koi_model_dof", float),
"Transit Model chisq": ("koi_model_chisq", float),
"FWM motion signif.": ("koi_fwm_stat_sig", float),
"gmag": ("koi_gmag", float),
"gmag err": ("koi_gmag_err", float),
"rmag": ("koi_rmag", float),
"rmag err": ("koi_rmag_err", float),
"imag": ("koi_imag", float),
"imag err": ("koi_imag_err", float),
"zmag": ("koi_zmag", float),
"zmag err": ("koi_zmag_err", float),
"Jmag": ("koi_jmag", float),
"Jmag err": ("koi_jmag_err", float),
"Hmag": ("koi_hmag", float),
"Hmag err": ("koi_hmag_err", float),
"Kmag": ("koi_kmag", float),
"Kmag err": ("koi_kmag_err", float),
"kepmag": ("koi_kepmag", float),
"kepmag err": ("koi_kepmag_err", float),
"Delivery Name": ("koi_delivname", six.text_type),
"FWM SRA": ("koi_fwm_sra", float),
"FWM SRA err": ("koi_fwm_sra_err", float),
"FWM SDec": ("koi_fwm_sdec", float),
"FWM SDec err": ("koi_fwm_sdec_err", float),
"FWM SRAO": ("koi_fwm_srao", float),
"FWM SRAO err": ("koi_fwm_srao_err", float),
"FWM SDeco": ("koi_fwm_sdeco", float),
"FWM SDeco err": ("koi_fwm_sdeco_err", float),
"FWM PRAO": ("koi_fwm_prao", float),
"FWM PRAO err": ("koi_fwm_prao_err", float),
"FWM PDeco": ("koi_fwm_pdeco", float),
"FWM PDeco err": ("koi_fwm_pdeco_err", float),
"Dicco MRA": ("koi_dicco_mra", float),
"Dicco MRA err": ("koi_dicco_mra_err", float),
"Dicco MDec": ("koi_dicco_mdec", float),
"Dicco MDec err": ("koi_dicco_mdec_err", float),
"Dicco MSky": ("koi_dicco_msky", float),
"Dicco MSky err": ("koi_dicco_msky_err", float),
"Dicco FRA": ("koi_dicco_fra", float),
"Dicco FRA err": ("koi_dicco_fra_err", float),
"Dicco FDec": ("koi_dicco_fdec", float),
"Dicco FDec err": ("koi_dicco_fdec_err", float),
"Dicco FSky": ("koi_dicco_fsky", float),
"Dicco FSky err": ("koi_dicco_fsky_err", float),
"Dikco MRA": ("koi_dikco_mra", float),
"Dikco MRA err": ("koi_dikco_mra_err", float),
"Dikco MDec": ("koi_dikco_mdec", float),
"Dikco MDec err": ("koi_dikco_mdec_err", float),
"Dikco MSky": ("koi_dikco_msky", float),
"Dikco MSky err": ("koi_dikco_msky_err", float),
"Dikco FRA": ("koi_dikco_fra", float),
"Dikco FRA err": ("koi_dikco_fra_err", float),
"Dikco FDec": ("koi_dikco_fdec", float),
"Dikco FDec err": ("koi_dikco_fdec_err", float),
"Dikco FSky": ("koi_dikco_fsky", float),
"Dikco FSky err": ("koi_dikco_fsky_err", float),
"Last Update": ("rowupdate", six.text_type),
})
planet_adapter = Adapter({
"Planet Name": ("kepler_name", six.text_type),
"Kepler ID": ("kepid", int),
"KOI Name": ("kepoi_name", six.text_type),
"Alt Name": ("alt_name", six.text_type),
"KOI Number": ("koi_number", six.text_type), # Just `koi` in API.
"RA (J2000)": ("degree_ra", float),
"RA Error": ("ra_err", float),
"Dec (J2000)": ("degree_dec", float),
"Dec Error": ("dec_err", float),
"2mass Name": ("tm_designation", six.text_type),
"Planet temp": ("koi_teq", int),
"Planet Radius": ("koi_prad", float),
"Transit duration": ("koi_duration", float),
"Period": ("koi_period", float),
"Period err1": ("koi_period_err1", float),
"Ingress Duration": ("koi_ingress", float),
"Impact Parameter": ("koi_impact", float),
"Inclination": ("koi_incl", float),
"Provenance": ("koi_sparprov", six.text_type),
"a/R": ("koi_dor", float),
"Transit Number": ("koi_num_transits", int),
"Transit Model": ("koi_trans_mod", six.text_type),
"Time of transit": ("koi_time0bk", float),
"Time of transit err1": ("koi_time0bk_err1", float),
"Transit Depth": ("koi_depth", float),
"Semi-major Axis": ("koi_sma", float),
"r/R": ("koi_ror", float),
"r/R err1": ("koi_ror_err1", float),
"Age": ("koi_sage", float),
"Metallicity": ("koi_smet", float),
"Stellar Mass": ("koi_smass", float),
"Stellar Radius": ("koi_srad", float),
"Stellar Teff": ("koi_steff", int),
"Logg": ("koi_slogg", float),
"KEP Mag": ("koi_kepmag", float),
"g Mag": ("koi_gmag", float),
"r Mag": ("koi_rmag", float),
"i Mag": ("koi_imag", float),
"z Mag": ("koi_zmag", float),
"J Mag": ("koi_jmag", float),
"H Mag": ("koi_hmag", float),
"K Mag": ("koi_kmag", float),
"KOI List": ("koi_list_flag", six.text_type),
"Last Update": ("koi_vet_date", six.text_type),
})
star_adapter = Adapter({
"Kepler ID": ("kic_kepler_id", int),
"RA (J2000)": ("kic_degree_ra", float),
"Dec (J2000)": ("kic_dec", float),
"RA PM (arcsec/yr)": ("kic_pmra", float),
"Dec PM (arcsec/yr)": ("kic_pmdec", float),
"u Mag": ("kic_umag", float),
"g Mag": ("kic_gmag", float),
"r Mag": ("kic_rmag", float),
"i Mag": ("kic_imag", float),
"z Mag": ("kic_zmag", float),
"Gred Mag": ("kic_gredmag", float),
"D51 Mag": ("kic_d51mag", float),
"J Mag": ("kic_jmag", float),
"H Mag": ("kic_hmag", float),
"K Mag": ("kic_kmag", float),
"Kepler Mag": ("kic_kepmag", float),
"2MASS ID": ("kic_2mass_id", six.text_type),
"2MASS Designation": ("kic_tmid", int),
"SCP ID": ("kic_scpid", int),
"Alt ID": ("kic_altid", int),
"Alt ID Source": ("kic_altsource", int),
"Star/Gal ID": ("kic_galaxy", int),
"Isolated/Blend ID": ("kic_blend", int),
"Var. ID": ("kic_variable", int),
"Teff (deg K)": ("kic_teff", int),
"Log G (cm/s/s)": ("kic_logg", float),
"Metallicity (solar=0.0)": ("kic_feh", float),
"E(B-V)": ("kic_ebminusv", float),
"A_V": ("kic_av", float),
"Radius (solar=1.0)": ("kic_radius", float),
"Kepmag Source": ("kic_cq", six.text_type),
"Photometry Qual": ("kic_pq", int),
"Astrophysics Qual": ("kic_aq", int),
"Catalog key": ("kic_catkey", int),
"Scp Key": ("kic_scpkey", int),
"Parallax (arcsec)": ("kic_parallax", float),
"Gal Lon (deg)": ("kic_glon", float),
"Gal Lat (deg)": ("kic_glat", float),
"Total PM (arcsec/yr)": ("kic_pmtotal", float),
"g-r color": ("kic_grcolor", float),
"J-K color": ("kic_jkcolor", float),
"g-K color": ("kic_gkcolor", float),
"RA hours (J2000)": ("kic_ra", float),
"Flag": ("flag", int),
})
dataset_adapter = Adapter({
"Kepler ID": ("ktc_kepler_id", int),
"Investigation ID": ("ktc_investigation_id", six.text_type),
"Pep ID": ("sci_pep_id", int),
"Dataset Name": ("sci_data_set_name", six.text_type),
"Quarter": ("sci_data_quarter", int),
"Data Release": ("sci_data_rel", int),
"RA (J2000)": ("sci_ra", float),
"Dec (J2000)": ("sci_dec", float),
"Target Type": ("ktc_target_type", six.text_type),
"Archive Class": ("sci_archive_class", six.text_type),
"Ref": ("refnum", int),
"Actual Start Time": ("sci_start_time", six.text_type),
"Actual End Time": ("sci_end_time", six.text_type),
"Release Date": ("sci_release_date", six.text_type),
"RA PM": ("kic_pmra", float),
"Dec PM": ("kic_pmdec", float),
"U Mag": ("kic_umag", float),
"G Mag": ("kic_gmag", float),
"R Mag": ("kic_rmag", float),
"I Mag": ("kic_imag", float),
"Z Mag": ("kic_zmag", float),
"GRed Mag": ("kic_gredmag", float),
"D51 Mag": ("kic_d51mag", float),
"J Mag": ("twoMass_jmag", float),
"H Mag": ("twoMass_hmag", float),
"K Mag": ("twoMass_kmag", float),
"KEP Mag": ("kic_kepmag", float),
"2MASS ID": ("twoMass_2mass_id", six.text_type),
"2MASS Designation": ("twoMass_tmid", int),
"2MASS conflict flag": ("twoMass_conflictFlag", six.text_type),
"SCP ID": ("kic_scpid", int),
"Alt ID": ("kic_altid", int),
"Alt ID Source": ("kic_altsource", int),
"Star/Gal ID": ("kic_galaxy", int),
"Isolated/Blend ID": ("kic_blend", int),
"Var. ID": ("kic_variable", int),
"Teff": ("kic_teff", int),
"Log G": ("kic_logg", float),
"Metallicity": ("kic_feh", float),
"E(B-V)": ("kic_ebminusv", float),
"A_V": ("kic_av", float),
"Radius": ("kic_radius", float),
"Kepmag Source": ("kic_cq", six.text_type),
"Photometry Qual": ("kic_pq", int),
"Astrophysics Qual": ("kic_aq", int),
"Catalog key": ("kic_catkey", int),
"Scp Key": ("kic_scpkey", int),
"Parallax": ("kic_parallax", float),
"Gal Lon": ("kic_glon", float),
"Gal Lat": ("kic_glat", float),
"Total PM": ("kic_pmtotal", float),
"G-R color": ("kic_grcolor", float),
"J-K color": ("twoMass_jkcolor", float),
"G-K color": ("twoMass_gkcolor", float),
"Processing Date": ("sci_generation_date", six.text_type),
"crowding": ("sci_crowdsap", float),
"contamination": ("sci_contamination", float),
"flux fraction": ("sci_flfrcsap", float),
"cdpp3": ("sci_Cdpp3_0", float),
"cdpp6": ("sci_Cdpp6_0", float),
"cdpp12": ("sci_Cdpp12_0", float),
"Module": ("sci_module", int),
"Output": ("sci_output", int),
"Channel": ("sci_channel", int),
"Skygroup_ID": ("sci_skygroup_id", int),
"Condition flag": ("condition_flag", six.text_type),
})
epic_adapter = Adapter({
"EPIC": ("id", int),
"RA": ("k2_ra", float),
"Dec": ("k2_dec", float),
"KepMag": ("kp", float),
"HIP": ("hip", int),
"TYC": ("tyc", six.text_type),
"UCAC": ("ucac", six.text_type),
"2MASS": ("twomass", six.text_type),
"SDSS": ("sdss", six.text_type),
"Object type": ("objtype", six.text_type),
"Kepflag": ("kepflag", six.text_type),
"pmra": ("pmra", float),
"e_pmra": ("e_pmra", float),
"pmdec": ("pmdec", float),
"e_pmdec": ("e_pmdec", float),
"plx": ("plx", float),
"e_plx": ("e_plx", float),
"Bmag": ("bmag", float),
"e_Bmag": ("e_bmag", float),
"Vmag": ("vmag", float),
"e_Vmag": ("e_vmag", float),
"umag": ("umag", float),
"e_umag": ("e_umag", float),
"gmag": ("gmag", float),
"e_gmag": ("e_gmag", float),
"rmag": ("rmag", float),
"e_rmag": ("e_rmag", float),
"imag": ("imag", float),
"e_imag": ("e_imag", float),
"zmag": ("zmag", float),
"e_zmag": ("e_zmag", float),
"Jmag": ("jmag", float),
"e_Jmag": ("e_jmag", float),
"Hmag": ("hmag", float),
"e_Hmag": ("e_hmag", float),
"Kmag": ("kmag", float),
"e_Kmag": ("e_kmag", float),
"w1mag": ("w1mag", float),
"e_w1mag": ("e_w1mag", float),
"w2mag": ("w2mag", float),
"e_w2mag": ("e_w2mag", float),
"w3mag": ("w3mag", float),
"e_w3mag": ("e_w3mag", float),
"w4mag": ("w4mag", float),
"e_w4mag": ("e_w4mag", float),
"Teff": ("teff", float),
"e_teff": ("e_teff", float),
"logg": ("logg", float),
"e_logg": ("e_logg", float),
"[Fe/H]": ("feh", float),
"e_[Fe/H]": ("e_feh", float),
"Radius": ("rad", float),
"e_rad": ("e_rad", float),
"mass": ("mass", float),
"e_mass": ("e_mass", float),
"rho": ("rho", float),
"e_rho": ("e_rho", float),
"lum": ("lum", float),
"e_lum": ("e_lum", float),
"Distance": ("d", float),
"e_d": ("e_d", float),
"E(B-V)": ("ebv", float),
"2MASS Flag": ("mflg", six.text_type),
"Nearest Neighbor": ("prox", float),
"Nomad ID": ("nomad", six.text_type),
})
k2_dataset_adapter = Adapter({
"K2 ID": ("ktc_k2_id", int),
"Dataset Name": ("sci_data_set_name", six.text_type),
"Campaign": ("sci_campaign", int),
"Object type": ("objtype", six.text_type),
"Data Release": ("sci_data_rel", int),
"RA (J2000)": ("sci_ra", float),
"Dec (J2000)": ("sci_dec", float),
"Target Type": ("ktc_target_type", six.text_type),
"Archive Class": ("sci_archive_class", six.text_type),
"Ref": ("refnum", int),
"Actual Start Time": ("sci_start_time", six.text_type),
"Actual End Time": ("sci_end_time", six.text_type),
"Investigation ID": ("ktc_investigation_id", six.text_type),
"RA PM": ("pmRA", float),
"RA PM Err": ("e_pmRA", float),
"Dec PM": ("pmDEC", float),
"Dec PM Err": ("e_pmDEC", float),
"Plx": ("plx", float),
"Plx Err": ("e_plx", float),
"U Mag": ("umag", float),
"U Mag Err": ("e_umag", float),
"B Mag": ("bmag", float),
"B Mag Err": ("e_bmag", float),
"V Mag": ("vmag", float),
"V Mag Err": ("e_vmag", float),
"G Mag": ("gmag", float),
"G Mag Err": ("e_gmag", float),
"R Mag": ("rmag", float),
"R Mag Err": ("e_rmag", float),
"I Mag": ("imag", float),
"I Mag Err": ("e_imag", float),
"Z Mag": ("zmag", float),
"Z Mag Err": ("e_zmag", float),
"J Mag": ("jmag", float),
"J Mag Err": ("e_jmag", float),
"H Mag": ("hmag", float),
"H Mag Err": ("e_hmag", float),
"K Mag": ("kmag", float),
"K Mag Err": ("e_kmag", float),
"KEP Mag": ("kp", float),
"Kep Flag": ("kepflag", six.text_type),
"Hip ID": ("hip", int),
"Tyc ID": ("tyc", six.text_type),
"SDSS ID": ("sdss", six.text_type),
"UCAC ID": ("ucac", six.text_type),
"2MASS ID": ("twoMass", six.text_type),
"2MASS Flag": ("mflg", six.text_type),
"Processing Date": ("sci_generation_date", six.text_type),
"crowding": ("sci_crowdsap", float),
"contamination": ("sci_contamination", float),
"flux fraction": ("sci_flfrcsap", float),
"cdpp3": ("sci_Cdpp3_0", float),
"cdpp6": ("sci_Cdpp6_0", float),
"cdpp12": ("sci_Cdpp12_0", float),
"Module": ("sci_module", int),
"Output": ("sci_output", int),
"Channel": ("sci_channel", int),
"Nearest Neighbor": ("prox", float),
"Nomad ID": ("nomad", six.text_type),
})
target_adapter = Adapter({
"masterRA": ("masterRA", float),
"masterDec": ("masterDec", float),
"Kepler_ID":("kic_kepler_id", int),
"2MASS_ID":("twomass_2mass_id", str),
"U_UBV":("U_UBV", float),
"gr":("gr", float),
"Parallax (arcsec)":("kic_parallax", float),
"Channel_0": ("Channel_0", int),
"Channel_1": ("Channel_1", int),
"Channel_2": ("Channel_2", int),
"Channel_3": ("Channel_3", int),
"Module_0": ("Module_0", int),
"Module_1": ("Module_1", int),
"Module_2": ("Module_2", int),
"Module_3": ("Module_3", int),
"Row_0": ("Row_0", int),
"Row_1": ("Row_1", int),
"Row_2": ("Row_2", int),
"Row_3": ("Row_3", int),
"Column_0": ("Column_0", int),
"Column_1": ("Column_1", int),
"Column_2": ("Column_2", int),
"Column_3": ("Column_3", int),
}) | 38.19305 | 78 | 0.584361 |
from __future__ import (division, print_function, absolute_import,
unicode_literals)
__all__ = ["koi_adapter", "planet_adapter", "star_adapter", "dataset_adapter",
"epic_adapter"]
import logging
import six
try:
unicode
except NameError:
unicode = str
class Adapter(object):
def __init__(self, parameters):
self._parameters = parameters
self._parameters["Ang Sep (')"] = ("angular_separation", float)
def __call__(self, row):
row = dict(row)
final = {}
for longname, (shortname, conv) in self._parameters.items():
try:
final[shortname] = conv(row.pop(longname, None))
except (ValueError, TypeError):
final[shortname] = None
for k in row:
logging.warn("Unrecognized parameter: '{0}'".format(k))
return final
koi_adapter = Adapter({
"Kepler ID": ("kepid", int),
"KOI Name": ("kepoi_name", six.text_type),
"KOI Number": ("kepoi", six.text_type),
"Kepler Disposition": ("koi_pdisposition", six.text_type),
"NExScI Disposition": ("koi_disposition", six.text_type),
"RA (J2000)": ("degree_ra", float),
"Dec (J2000)": ("degree_dec", float),
"Time of Transit Epoch": ("koi_time0bk", float),
"Time err1": ("koi_time0bk_err1", float),
"Time_err2": ("koi_time0bk_err2", float),
"Period": ("koi_period", float),
"Period err1": ("koi_period_err1", float),
"Period err2": ("koi_period_err2", float),
"Transit Depth": ("koi_depth", float),
"Depth err1": ("koi_depth_err1", float),
"Depth err2": ("koi_depth_err2", float),
"Duration": ("koi_duration", float),
"Duration err1": ("koi_duration_err1", float),
"Duration err2": ("koi_duration_err2", float),
"Ingress Duration": ("koi_ingress", float),
"Ingress err1": ("koi_ingress_err1", float),
"Ingress err2": ("koi_ingress_err2", float),
"Impact Parameter": ("koi_impact", float),
"Impact Parameter err1": ("koi_impact_err1", float),
"Impact Parameter err2": ("koi_impact_err2", float),
"Inclination": ("koi_incl", float),
"Inclination err1": ("koi_incl_err1", float),
"Inclination err2": ("koi_incl_err2", float),
"Semi-major Axis": ("koi_sma", float),
"Semi-major Axus err1": ("koi_sma_err1", float),
"Semi-major Axis err2": ("koi_sma_err2", float),
"Eccentricity": ("koi_eccen", float),
"Eccentricity err1": ("koi_eccen_err1", float),
"Eccentricity err2": ("koi_eccen_err2", float),
"Long of Periastron": ("koi_longp", float),
"Long err1": ("koi_longp_err1", float),
"Long err2": ("koi_longp_err2", float),
"r/R": ("koi_ror", float),
"r/R err1": ("koi_ror_err1", float),
"r/R err2": ("koi_ror_err2", float),
"a/R": ("koi_dor", float),
"a/R err1": ("koi_dor_err1", float),
"a/R err2": ("koi_dor_err2", float),
"Planet Radius": ("koi_prad", float),
"Planet Radius err1": ("koi_prad_err1", float),
"Planet Radius err2": ("koi_prad_err2", float),
"Teq": ("koi_teq", int),
"Teq err1": ("koi_teq_err1", int),
"Teq err2": ("koi_teq_err2", int),
"Teff": ("koi_steff", int),
"Teff err1": ("koi_steff_err1", int),
"Teff err2": ("koi_steff_err2", int),
"log(g)": ("koi_slogg", float),
"log(g) err1": ("koi_slogg_err1", float),
"log(g) err2": ("koi_slogg_err2", float),
"Metallicity": ("koi_smet", float),
"Metallicity err1": ("koi_smet_err1", float),
"Metallicity err2": ("koi_smet_err2", float),
"Stellar Radius": ("koi_srad", float),
"Stellar Radius err1": ("koi_srad_err1", float),
"Stellar Radius err2": ("koi_srad_err2", float),
"Stellar Mass": ("koi_smass", float),
"Stellar Mass err2": ("koi_smass_err2", float),
"Stellar Mass err1": ("koi_smass_err1", float),
"Age": ("koi_sage", float),
"Age err1": ("koi_sage_err1", float),
"Age err2": ("koi_sage_err2", float),
"Provenance": ("koi_sparprov", six.text_type),
"Quarters": ("koi_quarters", six.text_type),
"Limb Darkening Model": ("koi_limbdark_mod", six.text_type),
"Limb Darkening Coeff1": ("koi_ldm_coeff1", float),
"Limb Darkening Coeff2": ("koi_ldm_coeff2", float),
"Limb Darkening Coeff3": ("koi_ldm_coeff3", float),
"Limb Darkening Coeff4": ("koi_ldm_coeff4", float),
"Transit Number": ("koi_num_transits", int),
"Max single event sigma": ("koi_max_sngle_ev", float),
"Max Multievent sigma": ("koi_max_mult_ev", float),
"KOI count": ("koi_count", int),
"Binary Discrimination": ("koi_bin_oedp_sig", float),
"False Positive Bkgnd ID": ("koi_fp_bkgid", six.text_type),
"J-band diff": ("koi_fp_djmag", six.text_type),
"Comments": ("koi_comment", six.text_type),
"Transit Model": ("koi_trans_mod", six.text_type),
"Transit Model SNR": ("koi_model_snr", float),
"Transit Model DOF": ("koi_model_dof", float),
"Transit Model chisq": ("koi_model_chisq", float),
"FWM motion signif.": ("koi_fwm_stat_sig", float),
"gmag": ("koi_gmag", float),
"gmag err": ("koi_gmag_err", float),
"rmag": ("koi_rmag", float),
"rmag err": ("koi_rmag_err", float),
"imag": ("koi_imag", float),
"imag err": ("koi_imag_err", float),
"zmag": ("koi_zmag", float),
"zmag err": ("koi_zmag_err", float),
"Jmag": ("koi_jmag", float),
"Jmag err": ("koi_jmag_err", float),
"Hmag": ("koi_hmag", float),
"Hmag err": ("koi_hmag_err", float),
"Kmag": ("koi_kmag", float),
"Kmag err": ("koi_kmag_err", float),
"kepmag": ("koi_kepmag", float),
"kepmag err": ("koi_kepmag_err", float),
"Delivery Name": ("koi_delivname", six.text_type),
"FWM SRA": ("koi_fwm_sra", float),
"FWM SRA err": ("koi_fwm_sra_err", float),
"FWM SDec": ("koi_fwm_sdec", float),
"FWM SDec err": ("koi_fwm_sdec_err", float),
"FWM SRAO": ("koi_fwm_srao", float),
"FWM SRAO err": ("koi_fwm_srao_err", float),
"FWM SDeco": ("koi_fwm_sdeco", float),
"FWM SDeco err": ("koi_fwm_sdeco_err", float),
"FWM PRAO": ("koi_fwm_prao", float),
"FWM PRAO err": ("koi_fwm_prao_err", float),
"FWM PDeco": ("koi_fwm_pdeco", float),
"FWM PDeco err": ("koi_fwm_pdeco_err", float),
"Dicco MRA": ("koi_dicco_mra", float),
"Dicco MRA err": ("koi_dicco_mra_err", float),
"Dicco MDec": ("koi_dicco_mdec", float),
"Dicco MDec err": ("koi_dicco_mdec_err", float),
"Dicco MSky": ("koi_dicco_msky", float),
"Dicco MSky err": ("koi_dicco_msky_err", float),
"Dicco FRA": ("koi_dicco_fra", float),
"Dicco FRA err": ("koi_dicco_fra_err", float),
"Dicco FDec": ("koi_dicco_fdec", float),
"Dicco FDec err": ("koi_dicco_fdec_err", float),
"Dicco FSky": ("koi_dicco_fsky", float),
"Dicco FSky err": ("koi_dicco_fsky_err", float),
"Dikco MRA": ("koi_dikco_mra", float),
"Dikco MRA err": ("koi_dikco_mra_err", float),
"Dikco MDec": ("koi_dikco_mdec", float),
"Dikco MDec err": ("koi_dikco_mdec_err", float),
"Dikco MSky": ("koi_dikco_msky", float),
"Dikco MSky err": ("koi_dikco_msky_err", float),
"Dikco FRA": ("koi_dikco_fra", float),
"Dikco FRA err": ("koi_dikco_fra_err", float),
"Dikco FDec": ("koi_dikco_fdec", float),
"Dikco FDec err": ("koi_dikco_fdec_err", float),
"Dikco FSky": ("koi_dikco_fsky", float),
"Dikco FSky err": ("koi_dikco_fsky_err", float),
"Last Update": ("rowupdate", six.text_type),
})
planet_adapter = Adapter({
"Planet Name": ("kepler_name", six.text_type),
"Kepler ID": ("kepid", int),
"KOI Name": ("kepoi_name", six.text_type),
"Alt Name": ("alt_name", six.text_type),
"KOI Number": ("koi_number", six.text_type), # Just `koi` in API.
"RA (J2000)": ("degree_ra", float),
"RA Error": ("ra_err", float),
"Dec (J2000)": ("degree_dec", float),
"Dec Error": ("dec_err", float),
"2mass Name": ("tm_designation", six.text_type),
"Planet temp": ("koi_teq", int),
"Planet Radius": ("koi_prad", float),
"Transit duration": ("koi_duration", float),
"Period": ("koi_period", float),
"Period err1": ("koi_period_err1", float),
"Ingress Duration": ("koi_ingress", float),
"Impact Parameter": ("koi_impact", float),
"Inclination": ("koi_incl", float),
"Provenance": ("koi_sparprov", six.text_type),
"a/R": ("koi_dor", float),
"Transit Number": ("koi_num_transits", int),
"Transit Model": ("koi_trans_mod", six.text_type),
"Time of transit": ("koi_time0bk", float),
"Time of transit err1": ("koi_time0bk_err1", float),
"Transit Depth": ("koi_depth", float),
"Semi-major Axis": ("koi_sma", float),
"r/R": ("koi_ror", float),
"r/R err1": ("koi_ror_err1", float),
"Age": ("koi_sage", float),
"Metallicity": ("koi_smet", float),
"Stellar Mass": ("koi_smass", float),
"Stellar Radius": ("koi_srad", float),
"Stellar Teff": ("koi_steff", int),
"Logg": ("koi_slogg", float),
"KEP Mag": ("koi_kepmag", float),
"g Mag": ("koi_gmag", float),
"r Mag": ("koi_rmag", float),
"i Mag": ("koi_imag", float),
"z Mag": ("koi_zmag", float),
"J Mag": ("koi_jmag", float),
"H Mag": ("koi_hmag", float),
"K Mag": ("koi_kmag", float),
"KOI List": ("koi_list_flag", six.text_type),
"Last Update": ("koi_vet_date", six.text_type),
})
star_adapter = Adapter({
"Kepler ID": ("kic_kepler_id", int),
"RA (J2000)": ("kic_degree_ra", float),
"Dec (J2000)": ("kic_dec", float),
"RA PM (arcsec/yr)": ("kic_pmra", float),
"Dec PM (arcsec/yr)": ("kic_pmdec", float),
"u Mag": ("kic_umag", float),
"g Mag": ("kic_gmag", float),
"r Mag": ("kic_rmag", float),
"i Mag": ("kic_imag", float),
"z Mag": ("kic_zmag", float),
"Gred Mag": ("kic_gredmag", float),
"D51 Mag": ("kic_d51mag", float),
"J Mag": ("kic_jmag", float),
"H Mag": ("kic_hmag", float),
"K Mag": ("kic_kmag", float),
"Kepler Mag": ("kic_kepmag", float),
"2MASS ID": ("kic_2mass_id", six.text_type),
"2MASS Designation": ("kic_tmid", int),
"SCP ID": ("kic_scpid", int),
"Alt ID": ("kic_altid", int),
"Alt ID Source": ("kic_altsource", int),
"Star/Gal ID": ("kic_galaxy", int),
"Isolated/Blend ID": ("kic_blend", int),
"Var. ID": ("kic_variable", int),
"Teff (deg K)": ("kic_teff", int),
"Log G (cm/s/s)": ("kic_logg", float),
"Metallicity (solar=0.0)": ("kic_feh", float),
"E(B-V)": ("kic_ebminusv", float),
"A_V": ("kic_av", float),
"Radius (solar=1.0)": ("kic_radius", float),
"Kepmag Source": ("kic_cq", six.text_type),
"Photometry Qual": ("kic_pq", int),
"Astrophysics Qual": ("kic_aq", int),
"Catalog key": ("kic_catkey", int),
"Scp Key": ("kic_scpkey", int),
"Parallax (arcsec)": ("kic_parallax", float),
"Gal Lon (deg)": ("kic_glon", float),
"Gal Lat (deg)": ("kic_glat", float),
"Total PM (arcsec/yr)": ("kic_pmtotal", float),
"g-r color": ("kic_grcolor", float),
"J-K color": ("kic_jkcolor", float),
"g-K color": ("kic_gkcolor", float),
"RA hours (J2000)": ("kic_ra", float),
"Flag": ("flag", int),
})
dataset_adapter = Adapter({
"Kepler ID": ("ktc_kepler_id", int),
"Investigation ID": ("ktc_investigation_id", six.text_type),
"Pep ID": ("sci_pep_id", int),
"Dataset Name": ("sci_data_set_name", six.text_type),
"Quarter": ("sci_data_quarter", int),
"Data Release": ("sci_data_rel", int),
"RA (J2000)": ("sci_ra", float),
"Dec (J2000)": ("sci_dec", float),
"Target Type": ("ktc_target_type", six.text_type),
"Archive Class": ("sci_archive_class", six.text_type),
"Ref": ("refnum", int),
"Actual Start Time": ("sci_start_time", six.text_type),
"Actual End Time": ("sci_end_time", six.text_type),
"Release Date": ("sci_release_date", six.text_type),
"RA PM": ("kic_pmra", float),
"Dec PM": ("kic_pmdec", float),
"U Mag": ("kic_umag", float),
"G Mag": ("kic_gmag", float),
"R Mag": ("kic_rmag", float),
"I Mag": ("kic_imag", float),
"Z Mag": ("kic_zmag", float),
"GRed Mag": ("kic_gredmag", float),
"D51 Mag": ("kic_d51mag", float),
"J Mag": ("twoMass_jmag", float),
"H Mag": ("twoMass_hmag", float),
"K Mag": ("twoMass_kmag", float),
"KEP Mag": ("kic_kepmag", float),
"2MASS ID": ("twoMass_2mass_id", six.text_type),
"2MASS Designation": ("twoMass_tmid", int),
"2MASS conflict flag": ("twoMass_conflictFlag", six.text_type),
"SCP ID": ("kic_scpid", int),
"Alt ID": ("kic_altid", int),
"Alt ID Source": ("kic_altsource", int),
"Star/Gal ID": ("kic_galaxy", int),
"Isolated/Blend ID": ("kic_blend", int),
"Var. ID": ("kic_variable", int),
"Teff": ("kic_teff", int),
"Log G": ("kic_logg", float),
"Metallicity": ("kic_feh", float),
"E(B-V)": ("kic_ebminusv", float),
"A_V": ("kic_av", float),
"Radius": ("kic_radius", float),
"Kepmag Source": ("kic_cq", six.text_type),
"Photometry Qual": ("kic_pq", int),
"Astrophysics Qual": ("kic_aq", int),
"Catalog key": ("kic_catkey", int),
"Scp Key": ("kic_scpkey", int),
"Parallax": ("kic_parallax", float),
"Gal Lon": ("kic_glon", float),
"Gal Lat": ("kic_glat", float),
"Total PM": ("kic_pmtotal", float),
"G-R color": ("kic_grcolor", float),
"J-K color": ("twoMass_jkcolor", float),
"G-K color": ("twoMass_gkcolor", float),
"Processing Date": ("sci_generation_date", six.text_type),
"crowding": ("sci_crowdsap", float),
"contamination": ("sci_contamination", float),
"flux fraction": ("sci_flfrcsap", float),
"cdpp3": ("sci_Cdpp3_0", float),
"cdpp6": ("sci_Cdpp6_0", float),
"cdpp12": ("sci_Cdpp12_0", float),
"Module": ("sci_module", int),
"Output": ("sci_output", int),
"Channel": ("sci_channel", int),
"Skygroup_ID": ("sci_skygroup_id", int),
"Condition flag": ("condition_flag", six.text_type),
})
epic_adapter = Adapter({
"EPIC": ("id", int),
"RA": ("k2_ra", float),
"Dec": ("k2_dec", float),
"KepMag": ("kp", float),
"HIP": ("hip", int),
"TYC": ("tyc", six.text_type),
"UCAC": ("ucac", six.text_type),
"2MASS": ("twomass", six.text_type),
"SDSS": ("sdss", six.text_type),
"Object type": ("objtype", six.text_type),
"Kepflag": ("kepflag", six.text_type),
"pmra": ("pmra", float),
"e_pmra": ("e_pmra", float),
"pmdec": ("pmdec", float),
"e_pmdec": ("e_pmdec", float),
"plx": ("plx", float),
"e_plx": ("e_plx", float),
"Bmag": ("bmag", float),
"e_Bmag": ("e_bmag", float),
"Vmag": ("vmag", float),
"e_Vmag": ("e_vmag", float),
"umag": ("umag", float),
"e_umag": ("e_umag", float),
"gmag": ("gmag", float),
"e_gmag": ("e_gmag", float),
"rmag": ("rmag", float),
"e_rmag": ("e_rmag", float),
"imag": ("imag", float),
"e_imag": ("e_imag", float),
"zmag": ("zmag", float),
"e_zmag": ("e_zmag", float),
"Jmag": ("jmag", float),
"e_Jmag": ("e_jmag", float),
"Hmag": ("hmag", float),
"e_Hmag": ("e_hmag", float),
"Kmag": ("kmag", float),
"e_Kmag": ("e_kmag", float),
"w1mag": ("w1mag", float),
"e_w1mag": ("e_w1mag", float),
"w2mag": ("w2mag", float),
"e_w2mag": ("e_w2mag", float),
"w3mag": ("w3mag", float),
"e_w3mag": ("e_w3mag", float),
"w4mag": ("w4mag", float),
"e_w4mag": ("e_w4mag", float),
"Teff": ("teff", float),
"e_teff": ("e_teff", float),
"logg": ("logg", float),
"e_logg": ("e_logg", float),
"[Fe/H]": ("feh", float),
"e_[Fe/H]": ("e_feh", float),
"Radius": ("rad", float),
"e_rad": ("e_rad", float),
"mass": ("mass", float),
"e_mass": ("e_mass", float),
"rho": ("rho", float),
"e_rho": ("e_rho", float),
"lum": ("lum", float),
"e_lum": ("e_lum", float),
"Distance": ("d", float),
"e_d": ("e_d", float),
"E(B-V)": ("ebv", float),
"2MASS Flag": ("mflg", six.text_type),
"Nearest Neighbor": ("prox", float),
"Nomad ID": ("nomad", six.text_type),
})
k2_dataset_adapter = Adapter({
"K2 ID": ("ktc_k2_id", int),
"Dataset Name": ("sci_data_set_name", six.text_type),
"Campaign": ("sci_campaign", int),
"Object type": ("objtype", six.text_type),
"Data Release": ("sci_data_rel", int),
"RA (J2000)": ("sci_ra", float),
"Dec (J2000)": ("sci_dec", float),
"Target Type": ("ktc_target_type", six.text_type),
"Archive Class": ("sci_archive_class", six.text_type),
"Ref": ("refnum", int),
"Actual Start Time": ("sci_start_time", six.text_type),
"Actual End Time": ("sci_end_time", six.text_type),
"Investigation ID": ("ktc_investigation_id", six.text_type),
"RA PM": ("pmRA", float),
"RA PM Err": ("e_pmRA", float),
"Dec PM": ("pmDEC", float),
"Dec PM Err": ("e_pmDEC", float),
"Plx": ("plx", float),
"Plx Err": ("e_plx", float),
"U Mag": ("umag", float),
"U Mag Err": ("e_umag", float),
"B Mag": ("bmag", float),
"B Mag Err": ("e_bmag", float),
"V Mag": ("vmag", float),
"V Mag Err": ("e_vmag", float),
"G Mag": ("gmag", float),
"G Mag Err": ("e_gmag", float),
"R Mag": ("rmag", float),
"R Mag Err": ("e_rmag", float),
"I Mag": ("imag", float),
"I Mag Err": ("e_imag", float),
"Z Mag": ("zmag", float),
"Z Mag Err": ("e_zmag", float),
"J Mag": ("jmag", float),
"J Mag Err": ("e_jmag", float),
"H Mag": ("hmag", float),
"H Mag Err": ("e_hmag", float),
"K Mag": ("kmag", float),
"K Mag Err": ("e_kmag", float),
"KEP Mag": ("kp", float),
"Kep Flag": ("kepflag", six.text_type),
"Hip ID": ("hip", int),
"Tyc ID": ("tyc", six.text_type),
"SDSS ID": ("sdss", six.text_type),
"UCAC ID": ("ucac", six.text_type),
"2MASS ID": ("twoMass", six.text_type),
"2MASS Flag": ("mflg", six.text_type),
"Processing Date": ("sci_generation_date", six.text_type),
"crowding": ("sci_crowdsap", float),
"contamination": ("sci_contamination", float),
"flux fraction": ("sci_flfrcsap", float),
"cdpp3": ("sci_Cdpp3_0", float),
"cdpp6": ("sci_Cdpp6_0", float),
"cdpp12": ("sci_Cdpp12_0", float),
"Module": ("sci_module", int),
"Output": ("sci_output", int),
"Channel": ("sci_channel", int),
"Nearest Neighbor": ("prox", float),
"Nomad ID": ("nomad", six.text_type),
})
target_adapter = Adapter({
"masterRA": ("masterRA", float),
"masterDec": ("masterDec", float),
"Kepler_ID":("kic_kepler_id", int),
"2MASS_ID":("twomass_2mass_id", str),
"U_UBV":("U_UBV", float),
"gr":("gr", float),
"Parallax (arcsec)":("kic_parallax", float),
"Channel_0": ("Channel_0", int),
"Channel_1": ("Channel_1", int),
"Channel_2": ("Channel_2", int),
"Channel_3": ("Channel_3", int),
"Module_0": ("Module_0", int),
"Module_1": ("Module_1", int),
"Module_2": ("Module_2", int),
"Module_3": ("Module_3", int),
"Row_0": ("Row_0", int),
"Row_1": ("Row_1", int),
"Row_2": ("Row_2", int),
"Row_3": ("Row_3", int),
"Column_0": ("Column_0", int),
"Column_1": ("Column_1", int),
"Column_2": ("Column_2", int),
"Column_3": ("Column_3", int),
}) | true | true |
f7321eca94435f51e5be0a02668db295e22b7a07 | 5,713 | py | Python | mavsim_python/chap3/mav_dynamics.py | sethmnielsen/mavsim_template_files | 453ec4f7d38fc2d1162198b554834b5bdb7de96f | [
"MIT"
] | null | null | null | mavsim_python/chap3/mav_dynamics.py | sethmnielsen/mavsim_template_files | 453ec4f7d38fc2d1162198b554834b5bdb7de96f | [
"MIT"
] | null | null | null | mavsim_python/chap3/mav_dynamics.py | sethmnielsen/mavsim_template_files | 453ec4f7d38fc2d1162198b554834b5bdb7de96f | [
"MIT"
] | null | null | null | """
mav_dynamics
- this file implements the dynamic equations of motion for MAV
- use unit quaternion for the attitude state
part of mavsimPy
- Beard & McLain, PUP, 2012
- Update history:
12/17/2018 - RWB
1/14/2019 - RWB
"""
import sys
sys.path.append('..')
import numpy as np
# load message types
from message_types.msg_state import msg_state
import parameters.aerosonde_parameters as MAV
from tools.rotations import Quaternion2Euler
from IPython.core.debugger import Pdb
class mav_dynamics:
def __init__(self, Ts):
self.ts_simulation = Ts
# set initial states based on parameter file
self.reset_state()
self.msg_true_state = msg_state()
###################################
# public functions
def reset_state(self):
# _state is the 13x1 internal state of the aircraft that is being propagated:
# _state = [pn, pe, pd, u, v, w, e0, e1, e2, e3, p, q, r]
self._state = np.array([[MAV.pn0], # (0)
[MAV.pe0], # (1)
[MAV.pd0], # (2)
[MAV.u0], # (3)
[MAV.v0], # (4)
[MAV.w0], # (5)
[MAV.e0], # (6)
[MAV.e1], # (7)
[MAV.e2], # (8)
[MAV.e3], # (9)
[MAV.p0], # (10)
[MAV.q0], # (11)
[MAV.r0]]) # (12)
def update_state(self, forces_moments):
'''
Integrate the differential equations defining dynamics.
Inputs are the forces and moments on the aircraft.
Ts is the time step between function calls.
'''
# Integrate ODE using Runge-Kutta RK4 algorithm
time_step = self.ts_simulation
k1 = self._derivatives(self._state, forces_moments)
k2 = self._derivatives(self._state + time_step/2.*k1, forces_moments)
k3 = self._derivatives(self._state + time_step/2.*k2, forces_moments)
k4 = self._derivatives(self._state + time_step*k3, forces_moments)
self._state += time_step/6 * (k1 + 2*k2 + 2*k3 + k4)
# normalize the quaternion
e0 = self._state.item(6)
e1 = self._state.item(7)
e2 = self._state.item(8)
e3 = self._state.item(9)
normE = np.sqrt(e0**2+e1**2+e2**2+e3**2)
self._state[6][0] = self._state.item(6)/normE
self._state[7][0] = self._state.item(7)/normE
self._state[8][0] = self._state.item(8)/normE
self._state[9][0] = self._state.item(9)/normE
# update the message class for the true state
self._update_msg_true_state()
###################################
# private functions
def _derivatives(self, state, forces_moments):
"""
for the dynamics xdot = f(x, u), returns f(x, u)
"""
# extract the states
pn = state.item(0)
pe = state.item(1)
pd = state.item(2)
u = state.item(3)
v = state.item(4)
w = state.item(5)
e0 = state.item(6)
e1 = state.item(7)
e2 = state.item(8)
e3 = state.item(9)
p = state.item(10)
q = state.item(11)
r = state.item(12)
# extract forces/moments
fx = forces_moments.item(0)
fy = forces_moments.item(1)
fz = forces_moments.item(2)
l = forces_moments.item(3)
m = forces_moments.item(4)
n = forces_moments.item(5)
# position kinematics
R_vb = np.array([[e1**2+e0**2-e2**2-e3**2, 2*(e1*e2-e3*e0), 2*(e1*e3+e2*e0)],
[2*(e1*e2+e3*e0), e2**2+e0**2-e1**2-e3**2, 2*(e2*e3-e1*e0)],
[2*(e1*e3-e2*e0), 2*(e2*e3+e1*e0), e3**2+e0**2-e1**2-e2**2]])
pn_dot, pe_dot, pd_dot = R_vb @ np.array([u, v, w])
# position dynamics
vec_pos = np.array([r*v - q*w, p*w - r*u, q*u - p*v])
u_dot, v_dot, w_dot = vec_pos + 1/MAV.mass * np.array([fx, fy, fz])
# rotational kinematics
mat_rot = np.array([[0, -p, -q, -r],
[p, 0, r, -q],
[q, -r, 0, p],
[r, q, -p, 0]])
e0_dot, e1_dot, e2_dot, e3_dot = 0.5*mat_rot @ np.array([e0,e1,e2,e3])
# rotatonal dynamics
G = MAV.gamma
G1 = MAV.gamma1
G2 = MAV.gamma2
G3 = MAV.gamma3
G4 = MAV.gamma4
G5 = MAV.gamma5
G6 = MAV.gamma6
G7 = MAV.gamma7
G8 = MAV.gamma8
vec_rot = np.array([G1*p*q - G2*q*r, G5*p*r - G6*(p**2-r**2), G7*p*q - G1*q*r])
vec_rot2 = np.array([G3*l + G4*n, m/MAV.Jy, G4*l + G8*n])
p_dot, q_dot, r_dot = vec_rot + vec_rot2
# collect the derivative of the states
x_dot = np.array([[pn_dot, pe_dot, pd_dot, u_dot, v_dot, w_dot,
e0_dot, e1_dot, e2_dot, e3_dot, p_dot, q_dot, r_dot]]).T
return x_dot
def _update_msg_true_state(self):
# update the true state message:
phi, theta, psi = Quaternion2Euler(self._state[6:10])
self.msg_true_state.pn = self._state.item(0)
self.msg_true_state.pe = self._state.item(1)
self.msg_true_state.h = -self._state.item(2)
self.msg_true_state.phi = phi
self.msg_true_state.theta = theta
self.msg_true_state.psi = psi
self.msg_true_state.p = self._state.item(10)
self.msg_true_state.q = self._state.item(11)
self.msg_true_state.r = self._state.item(12)
| 35.70625 | 87 | 0.514266 | import sys
sys.path.append('..')
import numpy as np
from message_types.msg_state import msg_state
import parameters.aerosonde_parameters as MAV
from tools.rotations import Quaternion2Euler
from IPython.core.debugger import Pdb
class mav_dynamics:
def __init__(self, Ts):
self.ts_simulation = Ts
self.reset_state()
self.msg_true_state = msg_state()
[MAV.r0]])
def update_state(self, forces_moments):
time_step = self.ts_simulation
k1 = self._derivatives(self._state, forces_moments)
k2 = self._derivatives(self._state + time_step/2.*k1, forces_moments)
k3 = self._derivatives(self._state + time_step/2.*k2, forces_moments)
k4 = self._derivatives(self._state + time_step*k3, forces_moments)
self._state += time_step/6 * (k1 + 2*k2 + 2*k3 + k4)
e0 = self._state.item(6)
e1 = self._state.item(7)
e2 = self._state.item(8)
e3 = self._state.item(9)
normE = np.sqrt(e0**2+e1**2+e2**2+e3**2)
self._state[6][0] = self._state.item(6)/normE
self._state[7][0] = self._state.item(7)/normE
self._state[8][0] = self._state.item(8)/normE
self._state[9][0] = self._state.item(9)/normE
self._update_msg_true_state()
em(4)
n = forces_moments.item(5)
R_vb = np.array([[e1**2+e0**2-e2**2-e3**2, 2*(e1*e2-e3*e0), 2*(e1*e3+e2*e0)],
[2*(e1*e2+e3*e0), e2**2+e0**2-e1**2-e3**2, 2*(e2*e3-e1*e0)],
[2*(e1*e3-e2*e0), 2*(e2*e3+e1*e0), e3**2+e0**2-e1**2-e2**2]])
pn_dot, pe_dot, pd_dot = R_vb @ np.array([u, v, w])
vec_pos = np.array([r*v - q*w, p*w - r*u, q*u - p*v])
u_dot, v_dot, w_dot = vec_pos + 1/MAV.mass * np.array([fx, fy, fz])
mat_rot = np.array([[0, -p, -q, -r],
[p, 0, r, -q],
[q, -r, 0, p],
[r, q, -p, 0]])
e0_dot, e1_dot, e2_dot, e3_dot = 0.5*mat_rot @ np.array([e0,e1,e2,e3])
G = MAV.gamma
G1 = MAV.gamma1
G2 = MAV.gamma2
G3 = MAV.gamma3
G4 = MAV.gamma4
G5 = MAV.gamma5
G6 = MAV.gamma6
G7 = MAV.gamma7
G8 = MAV.gamma8
vec_rot = np.array([G1*p*q - G2*q*r, G5*p*r - G6*(p**2-r**2), G7*p*q - G1*q*r])
vec_rot2 = np.array([G3*l + G4*n, m/MAV.Jy, G4*l + G8*n])
p_dot, q_dot, r_dot = vec_rot + vec_rot2
x_dot = np.array([[pn_dot, pe_dot, pd_dot, u_dot, v_dot, w_dot,
e0_dot, e1_dot, e2_dot, e3_dot, p_dot, q_dot, r_dot]]).T
return x_dot
def _update_msg_true_state(self):
phi, theta, psi = Quaternion2Euler(self._state[6:10])
self.msg_true_state.pn = self._state.item(0)
self.msg_true_state.pe = self._state.item(1)
self.msg_true_state.h = -self._state.item(2)
self.msg_true_state.phi = phi
self.msg_true_state.theta = theta
self.msg_true_state.psi = psi
self.msg_true_state.p = self._state.item(10)
self.msg_true_state.q = self._state.item(11)
self.msg_true_state.r = self._state.item(12)
| true | true |
f7321f3d1f5fe29a720bf5116f40781e9a3db6af | 6,868 | py | Python | docs/conf.py | VictorCoCo/flask-ask | 526b3a272fdd6e1438e2191c5ab08ff20853817d | [
"Apache-2.0"
] | null | null | null | docs/conf.py | VictorCoCo/flask-ask | 526b3a272fdd6e1438e2191c5ab08ff20853817d | [
"Apache-2.0"
] | null | null | null | docs/conf.py | VictorCoCo/flask-ask | 526b3a272fdd6e1438e2191c5ab08ff20853817d | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Flask documentation build configuration file, created by
# sphinx-quickstart on Tue Apr 6 15:24:58 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath("_themes"))
sys.path.append(os.path.abspath("."))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.intersphinx", "flaskdocext"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"Flask-Ask"
copyright = u"2016, John Wheeler"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = "flask"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {"github_fork": "johnwheeler/flask-ask"}
html_sidebars = {
"index": ["globaltoc.html", "links.html", "stayinformed.html"],
"**": ["sidebarlogo.html", "globaltoc.html", "links.html", "stayinformed.html"],
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["_themes"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar. Do not set, template magic!
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = "flask-favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {
# 'index': ['sidebarintro.html', 'sourcelink.html', 'searchbox.html'],
# '**': ['sidebarlogo.html', 'localtoc.html', 'relations.html',
# 'sourcelink.html', 'searchbox.html']
# }
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
html_use_modindex = False
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
# epub_title = ''
# epub_author = ''
# epub_publisher = ''
# epub_copyright = ''
# The language of the text. It defaults to the language option
# or en if the language is not set.
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
# epub_identifier = ''
# A unique identification for the text.
# epub_uid = ''
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_post_files = []
# A list of files that should not be packed into the epub file.
# epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
# epub_tocdepth = 3
| 34.512563 | 84 | 0.71491 |
from __future__ import print_function
import sys, os
sys.path.append(os.path.abspath("_themes"))
sys.path.append(os.path.abspath("."))
extensions = ["sphinx.ext.autodoc", "sphinx.ext.intersphinx", "flaskdocext"]
templates_path = ["_templates"]
source_suffix = ".rst"
master_doc = "index"
project = u"Flask-Ask"
copyright = u"2016, John Wheeler"
exclude_patterns = ["_build"]
html_theme = "flask"
html_theme_options = {"github_fork": "johnwheeler/flask-ask"}
html_sidebars = {
"index": ["globaltoc.html", "links.html", "stayinformed.html"],
"**": ["sidebarlogo.html", "globaltoc.html", "links.html", "stayinformed.html"],
}
html_theme_path = ["_themes"]
html_static_path = ["_static"]
html_use_modindex = False
html_show_sphinx = False
| true | true |
f7321f77d6d135374ca6addceb6958ab720ca656 | 1,435 | py | Python | solutions/previous_solution_python/leetcode_210.py | YuhanShi53/Leetcode_solutions | cdcad34656d25d6af09b226e17250c6070305ab0 | [
"MIT"
] | null | null | null | solutions/previous_solution_python/leetcode_210.py | YuhanShi53/Leetcode_solutions | cdcad34656d25d6af09b226e17250c6070305ab0 | [
"MIT"
] | null | null | null | solutions/previous_solution_python/leetcode_210.py | YuhanShi53/Leetcode_solutions | cdcad34656d25d6af09b226e17250c6070305ab0 | [
"MIT"
] | null | null | null | """ Leetcode 210 - Course Schedule II
https://leetcode.com/problems/course-schedule-ii/
1. Topological-Sorting & BFS: Time: O(E+V) Space: O(E+V)
"""
from typing import List
class Solution1:
""" 1. Topological Sorting & BFS """
def find_order(self, numCourses: int,
prerequisites: List[List[int]]) -> List[int]:
if numCourses == 1:
return [0]
out_dict = {}
in_dict = {}
for x in range(numCourses):
in_dict[x] = 0
for pair in prerequisites:
if out_dict.get(pair[0], 0):
out_dict[pair[0]].append(pair[1])
else:
out_dict[pair[0]] = [pair[1]]
in_dict[pair[1]] += 1
courses_without_in = []
order = []
for item in in_dict.items():
if item[1] == 0:
courses_without_in.append(item[0])
while courses_without_in:
course_no_pre = courses_without_in.pop()
order.append(course_no_pre)
for x in out_dict.get(course_no_pre, []):
in_dict[x] -= 1
if in_dict[x] == 0:
courses_without_in.insert(0, x)
return order[::-1] if len(order) == numCourses else []
if __name__ == '__main__':
num_courses = 3
prerequisites = [[0, 1], [0, 2], [1, 2]]
res = Solution1().find_order(num_courses, prerequisites)
print(res)
| 25.625 | 64 | 0.531707 |
from typing import List
class Solution1:
def find_order(self, numCourses: int,
prerequisites: List[List[int]]) -> List[int]:
if numCourses == 1:
return [0]
out_dict = {}
in_dict = {}
for x in range(numCourses):
in_dict[x] = 0
for pair in prerequisites:
if out_dict.get(pair[0], 0):
out_dict[pair[0]].append(pair[1])
else:
out_dict[pair[0]] = [pair[1]]
in_dict[pair[1]] += 1
courses_without_in = []
order = []
for item in in_dict.items():
if item[1] == 0:
courses_without_in.append(item[0])
while courses_without_in:
course_no_pre = courses_without_in.pop()
order.append(course_no_pre)
for x in out_dict.get(course_no_pre, []):
in_dict[x] -= 1
if in_dict[x] == 0:
courses_without_in.insert(0, x)
return order[::-1] if len(order) == numCourses else []
if __name__ == '__main__':
num_courses = 3
prerequisites = [[0, 1], [0, 2], [1, 2]]
res = Solution1().find_order(num_courses, prerequisites)
print(res)
| true | true |
f7321f8b7e649e817c57077e164139a3d84e2925 | 1,003 | py | Python | users/models.py | Sundaybrian/hood-watch | 728283260336bf164d66832dd6b8fe4aa3e60a33 | [
"MIT"
] | null | null | null | users/models.py | Sundaybrian/hood-watch | 728283260336bf164d66832dd6b8fe4aa3e60a33 | [
"MIT"
] | 11 | 2020-06-05T22:55:53.000Z | 2022-03-11T23:59:17.000Z | users/models.py | Sundaybrian/hood-watch | 728283260336bf164d66832dd6b8fe4aa3e60a33 | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth.models import User
from hood.models import NeighbourHood,Business,Location
from PIL import Image
# Create your models here.
class Profile(models.Model):
user=models.OneToOneField(User,on_delete=models.CASCADE)
image=models.ImageField(default='naomi.jpg',upload_to='profile_pics')
bio=models.TextField(blank=True)
neighbourhood=models.ForeignKey(NeighbourHood,on_delete=models.DO_NOTHING,null=True)
location=models.ForeignKey(Location,on_delete=models.DO_NOTHING,null=True)
def __str__(self):
return f'{self.user.username} Profile'
def save(self,*args,**kwargs):
'''
overriding the save method inorder to resize the profile images
'''
super(Profile,self).save(*args,**kwargs)
img=Image.open(self.image)
if img.height>300 or img.width>300:
output_size=(300,300)
img.thumbnail(output_size)
img.save(self.image.path)
| 31.34375 | 88 | 0.698903 | from django.db import models
from django.contrib.auth.models import User
from hood.models import NeighbourHood,Business,Location
from PIL import Image
class Profile(models.Model):
user=models.OneToOneField(User,on_delete=models.CASCADE)
image=models.ImageField(default='naomi.jpg',upload_to='profile_pics')
bio=models.TextField(blank=True)
neighbourhood=models.ForeignKey(NeighbourHood,on_delete=models.DO_NOTHING,null=True)
location=models.ForeignKey(Location,on_delete=models.DO_NOTHING,null=True)
def __str__(self):
return f'{self.user.username} Profile'
def save(self,*args,**kwargs):
super(Profile,self).save(*args,**kwargs)
img=Image.open(self.image)
if img.height>300 or img.width>300:
output_size=(300,300)
img.thumbnail(output_size)
img.save(self.image.path)
| true | true |
f732221b585588f0d4ac5cd124691e264ed6d756 | 179 | py | Python | apps/node/src/app/main/routes/__init__.py | next-fernandocerezal/PyGrid | b82793b0beecd26338c102573a9891c2e86707c8 | [
"Apache-2.0"
] | 1 | 2021-10-05T18:57:02.000Z | 2021-10-05T18:57:02.000Z | apps/node/src/app/main/routes/__init__.py | next-fernandocerezal/PyGrid | b82793b0beecd26338c102573a9891c2e86707c8 | [
"Apache-2.0"
] | null | null | null | apps/node/src/app/main/routes/__init__.py | next-fernandocerezal/PyGrid | b82793b0beecd26338c102573a9891c2e86707c8 | [
"Apache-2.0"
] | null | null | null | from .data_centric.routes import *
from .general import *
from .model_centric.routes import *
from .user_related import *
from .role_related import *
from .group_related import *
| 25.571429 | 35 | 0.787709 | from .data_centric.routes import *
from .general import *
from .model_centric.routes import *
from .user_related import *
from .role_related import *
from .group_related import *
| true | true |
f7322312f3d2f82f8c036e5d17590635e08e0c15 | 1,784 | py | Python | unrecognized.py | timvandermeij/sentiment-analysis | 39a1bdbe56248c9dbbc25107ed339621ef0d65df | [
"MIT"
] | 12 | 2015-06-04T10:33:22.000Z | 2021-07-25T07:49:04.000Z | unrecognized.py | timvandermeij/sentiment-analysis | 39a1bdbe56248c9dbbc25107ed339621ef0d65df | [
"MIT"
] | null | null | null | unrecognized.py | timvandermeij/sentiment-analysis | 39a1bdbe56248c9dbbc25107ed339621ef0d65df | [
"MIT"
] | 3 | 2015-06-17T08:28:04.000Z | 2020-01-16T00:21:30.000Z | import sys
import linecache
from analyze import Analyzer
from classify import Classifier
from utils import Utilities
from sklearn.ensemble import RandomForestRegressor
def main(argv):
# Constants for the analyzer and the classifier
dataset = 'commit_comments-dump.2015-01-29.json'
group = 'id'
model_file = 'model.pickle'
# Create the analyzer
analyzer = Analyzer(group)
# Create the classifier
algorithm_class = RandomForestRegressor
algorithm_parameters = {
'n_estimators': 100,
'n_jobs': 2,
'min_samples_split': 10
}
classifier = Classifier(group, model_file)
classifier.create_model(train=True, class_name=algorithm_class, parameters=algorithm_parameters)
# Compare analyzer output with classifier output and identify differences
unrecognized_negative = {}
unrecognized_positive = {}
predictions = classifier.predict()
line = 0 # Dataset line
i = 0 # Prediction ID (+1)
file = open(dataset, 'rb')
for data in Utilities.read_json(file, 'id', group):
line = line + 1
if line % 1000 == 0:
print(line)
if not classifier.filter(data):
continue
i = i + 1
message = data['message']
score = analyzer.analyze(message)[0]
if score == 0:
continue
diff = predictions[i-1] - score
if abs(diff) < 1.0:
continue
target = unrecognized_negative if diff < 0 else unrecognized_positive
target[line] = diff
result = sorted(unrecognized_positive.items(), key=lambda x: x[1])
for item in result:
print("{}: {}: {}".format(item[0], item[1], linecache.getline(dataset, item[0])[:-1]))
if __name__ == "__main__":
main(sys.argv[1:])
| 29.733333 | 100 | 0.640695 | import sys
import linecache
from analyze import Analyzer
from classify import Classifier
from utils import Utilities
from sklearn.ensemble import RandomForestRegressor
def main(argv):
dataset = 'commit_comments-dump.2015-01-29.json'
group = 'id'
model_file = 'model.pickle'
analyzer = Analyzer(group)
algorithm_class = RandomForestRegressor
algorithm_parameters = {
'n_estimators': 100,
'n_jobs': 2,
'min_samples_split': 10
}
classifier = Classifier(group, model_file)
classifier.create_model(train=True, class_name=algorithm_class, parameters=algorithm_parameters)
unrecognized_negative = {}
unrecognized_positive = {}
predictions = classifier.predict()
line = 0
i = 0
file = open(dataset, 'rb')
for data in Utilities.read_json(file, 'id', group):
line = line + 1
if line % 1000 == 0:
print(line)
if not classifier.filter(data):
continue
i = i + 1
message = data['message']
score = analyzer.analyze(message)[0]
if score == 0:
continue
diff = predictions[i-1] - score
if abs(diff) < 1.0:
continue
target = unrecognized_negative if diff < 0 else unrecognized_positive
target[line] = diff
result = sorted(unrecognized_positive.items(), key=lambda x: x[1])
for item in result:
print("{}: {}: {}".format(item[0], item[1], linecache.getline(dataset, item[0])[:-1]))
if __name__ == "__main__":
main(sys.argv[1:])
| true | true |
f732239fbe0f1f3f991aa2396e2c32ab3b933115 | 13,202 | py | Python | pipelines/ASR/opts.py | csalt-research/OpenASR-py | 9aea6753689d87d321260d7eb0ea0544e1b3403a | [
"MIT"
] | 2 | 2019-11-29T15:46:14.000Z | 2021-05-28T06:54:41.000Z | pipelines/DAT/opts.py | csalt-research/OpenASR-py | 9aea6753689d87d321260d7eb0ea0544e1b3403a | [
"MIT"
] | null | null | null | pipelines/DAT/opts.py | csalt-research/OpenASR-py | 9aea6753689d87d321260d7eb0ea0544e1b3403a | [
"MIT"
] | null | null | null | def build_preprocess_parser(parser):
preprocess_opts(parser)
return parser
def build_train_parser(parser):
model_opts(parser)
general_opts(parser)
train_opts(parser)
translate_opts(parser)
return parser
def build_test_parser(parser):
general_opts(parser)
translate_opts(parser)
return parser
def model_opts(parser):
# Embedding
group = parser.add_argument_group('Model - Embeddings')
group.add('--embedding_size', type=int, default=256,
help='Token embedding size for target')
group.add('--share_dec_weights', action='store_true',
help="Use a shared weight matrix for the input and "
"output word embeddings in the decoder.")
# Embedding features
group = parser.add_argument_group('Model - Embedding Features')
group.add('--feat_merge', '-feat_merge', type=str, default='concat',
choices=['concat', 'sum', 'mlp'],
help="Merge action for incorporating features embeddings. "
"Options [concat|sum|mlp].")
group.add('--feat_vec_size', '-feat_vec_size', type=int, default=-1,
help="If specified, feature embedding sizes "
"will be set to this. Otherwise, feat_vec_exponent "
"will be used.")
group.add('--feat_vec_exponent', '-feat_vec_exponent',
type=float, default=0.7,
help="If -feat_merge_size is not set, feature "
"embedding sizes will be set to N^feat_vec_exponent "
"where N is the number of values the feature takes.")
# Encoder
group = parser.add_argument_group('Model - Encoder')
group.add('--enc_rnn_type', type=str, default='LSTM', choices=['LSTM', 'GRU'],
help="Type of encoder RNN layer to use.")
group.add('--enc_layers', type=int, default=3,
help='Number of layers in the encoder')
group.add('--enc_rnn_size', type=int, default=512,
help="Size of encoder rnn hidden states.")
group.add('--brnn', action='store_true',
help="Whether to use bidirectional encoder.")
group.add('--enc_pooling', type=str, default='2',
help="The amount of pooling of audio encoder, "
"either the same amount of pooling across all layers "
"indicated by a single number, or different amounts of "
"pooling per layer separated by comma.")
group.add('--enc_dropout', type=float, default=0.0,
help="Dropout probability for encoder.")
# Decoder
group = parser.add_argument_group('Model - Decoder')
group.add('--dec_rnn_type', type=str, default='LSTM', choices=['LSTM', 'GRU'],
help="Type of decoder RNN layer to use.")
group.add('--dec_layers', type=int, default=2,
help='Number of layers in the decoder')
group.add('--dec_rnn_size', type=int, default=256,
help="Size of decoder rnn hidden states.")
group.add('--dec_dropout', type=float, default=0.0,
help="Dropout probability for decoder.")
group.add('--init_sched_sampling_rate', type=float, default=0.0,
help="Initial rate for scheduled sampling")
# Attention
group = parser.add_argument_group('Model - Attention')
group.add('--attention_type', type=str, default='general',
choices=['dot', 'general', 'mlp'],
help="The attention type to use: "
"dotprod or general (Luong) or MLP (Bahdanau)")
# Bridge
group = parser.add_argument_group('Model - Bridge')
group.add('--bridge_type', type=str, default='zero',
choices=['copy', 'mlp', 'zero'],
help="The bridge type to use between encoder and decoder.")
def preprocess_opts(parser):
# Data
group = parser.add_argument_group('Data')
group.add('--src_train', required=True, nargs='+',
help="Path(s) to the training source data")
group.add('--tgt_train', required=True, nargs='+',
help="Path(s) to the training target data")
group.add('--src_valid', required=True, nargs='+',
help="Path(s) to the validation source data")
group.add('--tgt_valid', required=True, nargs='+',
help="Path(s) to the validation target data")
group.add('--src_test', required=True, nargs='+',
help="Path(s) to the test source data")
group.add('--tgt_test', required=True, nargs='+',
help="Path(s) to the test target data")
group.add('--src_dir', default="",
help="Source directory for audio files.")
group.add('--save_dir', required=True,
help="Directory for saving the prepared data")
group.add('--shard_size', type=int, default=6000,
help="Divide src_corpus and tgt_corpus into "
"smaller multiple src_copus and tgt corpus files, then "
"build shards, each shard will have "
"opt.shard_size samples except last shard. "
"shard_size=0 means no segmentation "
"shard_size>0 means segment dataset into multiple shards, "
"each shard has shard_size samples")
# Vocab
group = parser.add_argument_group('Vocab')
group.add('--vocab', type=str, required=True,
help="File to be used for building vocabulary.")
group.add('--max_vocab_size', type=int, default=50000,
help="Maximum size of the vocabulary")
# Audio processing
group = parser.add_argument_group('Audio')
group.add('--sample_rate', type=int, default=16000,
help="Sample rate.")
group.add('--window_size', type=float, default=.02,
help="Window size for spectrogram in seconds.")
group.add('--window_stride', type=float, default=.01,
help="Window stride for spectrogram in seconds.")
group.add('--window', default='hamming',
help="Window type for spectrogram generation. "
"Passed to librosa as argument.")
group.add('--feat_type', default='mfcc', choices=['fbank', 'stft', 'mfcc'],
help="Type of audio features to be extracted")
group.add('--normalize_audio', action='store_true',
help="Whether to perform mean-variance normalization on features.")
def general_opts(parser):
group = parser.add_argument_group('General')
group.add('--data', type=str, required=True,
help='Path prefix to .pt files generated by preprocess.py')
group.add('--checkpoint', type=str, default='',
help='Path to checkpoint of pretrained model')
group.add('--seed', type=int, default=1234,
help="Random seed used for the experiments reproducibility.")
def train_opts(parser):
# Initialization
group = parser.add_argument_group('Initialization')
group.add('--param_init', type=float, default=0.1,
help="Init parameters with uniform distribution "
"with support (-param_init, param_init). "
"Use 0 to not use initialization")
group.add('--param_init_glorot', action='store_true',
help="Init parameters with xavier_uniform.")
# Optimization
group = parser.add_argument_group('Optimization')
group.add('--train_batch_size', type=int, default=32,
help='Batch size for training')
group.add('--bucket_size', type=int, default=256,
help="Shuffle this many examples to reduce padding.")
group.add('--bptt', type=int, default=0,
help="Number of timesteps for truncated BPTT. Set to 0 to disable.")
group.add('--train_steps', type=int, default=100000,
help='Number of training steps')
group.add('--eval_steps', type=int, default=10000,
help='Perfom validation every X steps')
group.add('--shard_size', type=int, default=0,
help="Maximum batches of words in a sequence to run "
"the generator on in parallel. Higher is faster, but "
"uses more memory. Set to 0 to disable.")
group.add('--single_pass', action='store_true',
help="Make a single pass over the training dataset.")
group.add('--optim', default='sgd',
choices=['sgd', 'adagrad', 'adadelta', 'adam'],
help="Optimization method.")
group.add('--adagrad_accumulator_init', type=float, default=0,
help="Initializes the accumulator values in adagrad. "
"Mirrors the initial_accumulator_value option "
"in the tensorflow adagrad (use 0.1 for their default).")
group.add('--max_grad_norm', type=float, default=10,
help="If the norm of the gradient vector exceeds this, "
"renormalize it to have the norm equal to "
"max_grad_norm")
group.add('--adam_beta1', type=float, default=0.9,
help="The beta1 parameter used by Adam. "
"Almost without exception a value of 0.9 is used in "
"the literature, seemingly giving good results, "
"so we would discourage changing this value from "
"the default without due consideration.")
group.add('--adam_beta2', type=float, default=0.999,
help='The beta2 parameter used by Adam. '
'Typically a value of 0.999 is recommended, as this is '
'the value suggested by the original paper describing '
'Adam, and is also the value adopted in other frameworks '
'such as Tensorflow and Kerras, i.e. see: '
'https://www.tensorflow.org/api_docs/python/tf/train/Adam'
'Optimizer or '
'https://keras.io/optimizers/ . '
'Whereas recently the paper "Attention is All You Need" '
'suggested a value of 0.98 for beta2, this parameter may '
'not work well for normal models / default '
'baselines.')
group.add('--learning_rate', type=float, default=1.0,
help="Starting learning rate. "
"Recommended settings: sgd = 1, adagrad = 0.1, "
"adadelta = 1, adam = 0.001")
group.add('--learning_rate_decay', type=float, default=0.5,
help="If update_learning_rate, decay learning rate by "
"this much if steps have gone past "
"start_decay_steps")
group.add('--start_decay_steps', type=int, default=50000,
help="Start decaying every decay_steps after start_decay_steps")
group.add('--decay_steps', type=int, default=10000,
help="Decay every decay_steps")
group.add('--decay_method', type=str, default="none",
choices=['noam', 'noamwd', 'rsqrt', 'none'],
help="Use a custom decay rate.")
group.add('--warmup_steps', type=int, default=4000,
help="Number of warmup steps for custom decay.")
group = parser.add_argument_group('Logging')
group.add('--log_every', type=int, default=50,
help="Print stats at this interval.")
group.add("--tensorboard_dir", type=str, default="",
help="Log directory for Tensorboard. "
"This is also the name of the run.")
group.add("--save_dir", type=str, default="saved",
help="Directory for saving checkpoints.")
def translate_opts(parser):
group = parser.add_argument_group('Translation')
group.add('--eval_batch_size', type=int, default=32,
help='Batch size for evaluation')
group.add('--eval_split', choices=['train', 'valid', 'test'], default='test',
help='Split to be used for evaluation')
group.add('--n_best', type=int, default=1,
help='Number of hypotheses to return for each sample')
group.add('--min_length', type=int, default=0,
help='Minimum length of generated transcription')
group.add('--max_length', type=int, default=100,
help='Maximum length of generated transcription')
group.add('--ratio', type=float, default=0.,
help='If greater than 0, used for estimating transcription '
'length from length of encoded sequence')
group.add('--beam_size', type=int, default=8,
help='Size of beam during search')
group.add('--block_ngram_repeat', type=int, default=0,
help='Block hypotheses containing as many consecutive '
'repetitions of ngrams/tokens')
group.add('--excluded_toks', type=str, default='',
help='Comma-separated list of tokens not to be '
'blocked during decoding')
group.add('--out', type=str, default='',
help='File for writing generated hypotheses')
group.add('--verbose', action='store_true',
help='Print the best transcription as it is generated')
group.add('--attn_debug', action='store_true',
help='Print the attention heatmap for each sample')
| 50.197719 | 82 | 0.604302 | def build_preprocess_parser(parser):
preprocess_opts(parser)
return parser
def build_train_parser(parser):
model_opts(parser)
general_opts(parser)
train_opts(parser)
translate_opts(parser)
return parser
def build_test_parser(parser):
general_opts(parser)
translate_opts(parser)
return parser
def model_opts(parser):
group = parser.add_argument_group('Model - Embeddings')
group.add('--embedding_size', type=int, default=256,
help='Token embedding size for target')
group.add('--share_dec_weights', action='store_true',
help="Use a shared weight matrix for the input and "
"output word embeddings in the decoder.")
group = parser.add_argument_group('Model - Embedding Features')
group.add('--feat_merge', '-feat_merge', type=str, default='concat',
choices=['concat', 'sum', 'mlp'],
help="Merge action for incorporating features embeddings. "
"Options [concat|sum|mlp].")
group.add('--feat_vec_size', '-feat_vec_size', type=int, default=-1,
help="If specified, feature embedding sizes "
"will be set to this. Otherwise, feat_vec_exponent "
"will be used.")
group.add('--feat_vec_exponent', '-feat_vec_exponent',
type=float, default=0.7,
help="If -feat_merge_size is not set, feature "
"embedding sizes will be set to N^feat_vec_exponent "
"where N is the number of values the feature takes.")
group = parser.add_argument_group('Model - Encoder')
group.add('--enc_rnn_type', type=str, default='LSTM', choices=['LSTM', 'GRU'],
help="Type of encoder RNN layer to use.")
group.add('--enc_layers', type=int, default=3,
help='Number of layers in the encoder')
group.add('--enc_rnn_size', type=int, default=512,
help="Size of encoder rnn hidden states.")
group.add('--brnn', action='store_true',
help="Whether to use bidirectional encoder.")
group.add('--enc_pooling', type=str, default='2',
help="The amount of pooling of audio encoder, "
"either the same amount of pooling across all layers "
"indicated by a single number, or different amounts of "
"pooling per layer separated by comma.")
group.add('--enc_dropout', type=float, default=0.0,
help="Dropout probability for encoder.")
group = parser.add_argument_group('Model - Decoder')
group.add('--dec_rnn_type', type=str, default='LSTM', choices=['LSTM', 'GRU'],
help="Type of decoder RNN layer to use.")
group.add('--dec_layers', type=int, default=2,
help='Number of layers in the decoder')
group.add('--dec_rnn_size', type=int, default=256,
help="Size of decoder rnn hidden states.")
group.add('--dec_dropout', type=float, default=0.0,
help="Dropout probability for decoder.")
group.add('--init_sched_sampling_rate', type=float, default=0.0,
help="Initial rate for scheduled sampling")
group = parser.add_argument_group('Model - Attention')
group.add('--attention_type', type=str, default='general',
choices=['dot', 'general', 'mlp'],
help="The attention type to use: "
"dotprod or general (Luong) or MLP (Bahdanau)")
group = parser.add_argument_group('Model - Bridge')
group.add('--bridge_type', type=str, default='zero',
choices=['copy', 'mlp', 'zero'],
help="The bridge type to use between encoder and decoder.")
def preprocess_opts(parser):
group = parser.add_argument_group('Data')
group.add('--src_train', required=True, nargs='+',
help="Path(s) to the training source data")
group.add('--tgt_train', required=True, nargs='+',
help="Path(s) to the training target data")
group.add('--src_valid', required=True, nargs='+',
help="Path(s) to the validation source data")
group.add('--tgt_valid', required=True, nargs='+',
help="Path(s) to the validation target data")
group.add('--src_test', required=True, nargs='+',
help="Path(s) to the test source data")
group.add('--tgt_test', required=True, nargs='+',
help="Path(s) to the test target data")
group.add('--src_dir', default="",
help="Source directory for audio files.")
group.add('--save_dir', required=True,
help="Directory for saving the prepared data")
group.add('--shard_size', type=int, default=6000,
help="Divide src_corpus and tgt_corpus into "
"smaller multiple src_copus and tgt corpus files, then "
"build shards, each shard will have "
"opt.shard_size samples except last shard. "
"shard_size=0 means no segmentation "
"shard_size>0 means segment dataset into multiple shards, "
"each shard has shard_size samples")
group = parser.add_argument_group('Vocab')
group.add('--vocab', type=str, required=True,
help="File to be used for building vocabulary.")
group.add('--max_vocab_size', type=int, default=50000,
help="Maximum size of the vocabulary")
group = parser.add_argument_group('Audio')
group.add('--sample_rate', type=int, default=16000,
help="Sample rate.")
group.add('--window_size', type=float, default=.02,
help="Window size for spectrogram in seconds.")
group.add('--window_stride', type=float, default=.01,
help="Window stride for spectrogram in seconds.")
group.add('--window', default='hamming',
help="Window type for spectrogram generation. "
"Passed to librosa as argument.")
group.add('--feat_type', default='mfcc', choices=['fbank', 'stft', 'mfcc'],
help="Type of audio features to be extracted")
group.add('--normalize_audio', action='store_true',
help="Whether to perform mean-variance normalization on features.")
def general_opts(parser):
group = parser.add_argument_group('General')
group.add('--data', type=str, required=True,
help='Path prefix to .pt files generated by preprocess.py')
group.add('--checkpoint', type=str, default='',
help='Path to checkpoint of pretrained model')
group.add('--seed', type=int, default=1234,
help="Random seed used for the experiments reproducibility.")
def train_opts(parser):
group = parser.add_argument_group('Initialization')
group.add('--param_init', type=float, default=0.1,
help="Init parameters with uniform distribution "
"with support (-param_init, param_init). "
"Use 0 to not use initialization")
group.add('--param_init_glorot', action='store_true',
help="Init parameters with xavier_uniform.")
group = parser.add_argument_group('Optimization')
group.add('--train_batch_size', type=int, default=32,
help='Batch size for training')
group.add('--bucket_size', type=int, default=256,
help="Shuffle this many examples to reduce padding.")
group.add('--bptt', type=int, default=0,
help="Number of timesteps for truncated BPTT. Set to 0 to disable.")
group.add('--train_steps', type=int, default=100000,
help='Number of training steps')
group.add('--eval_steps', type=int, default=10000,
help='Perfom validation every X steps')
group.add('--shard_size', type=int, default=0,
help="Maximum batches of words in a sequence to run "
"the generator on in parallel. Higher is faster, but "
"uses more memory. Set to 0 to disable.")
group.add('--single_pass', action='store_true',
help="Make a single pass over the training dataset.")
group.add('--optim', default='sgd',
choices=['sgd', 'adagrad', 'adadelta', 'adam'],
help="Optimization method.")
group.add('--adagrad_accumulator_init', type=float, default=0,
help="Initializes the accumulator values in adagrad. "
"Mirrors the initial_accumulator_value option "
"in the tensorflow adagrad (use 0.1 for their default).")
group.add('--max_grad_norm', type=float, default=10,
help="If the norm of the gradient vector exceeds this, "
"renormalize it to have the norm equal to "
"max_grad_norm")
group.add('--adam_beta1', type=float, default=0.9,
help="The beta1 parameter used by Adam. "
"Almost without exception a value of 0.9 is used in "
"the literature, seemingly giving good results, "
"so we would discourage changing this value from "
"the default without due consideration.")
group.add('--adam_beta2', type=float, default=0.999,
help='The beta2 parameter used by Adam. '
'Typically a value of 0.999 is recommended, as this is '
'the value suggested by the original paper describing '
'Adam, and is also the value adopted in other frameworks '
'such as Tensorflow and Kerras, i.e. see: '
'https://www.tensorflow.org/api_docs/python/tf/train/Adam'
'Optimizer or '
'https://keras.io/optimizers/ . '
'Whereas recently the paper "Attention is All You Need" '
'suggested a value of 0.98 for beta2, this parameter may '
'not work well for normal models / default '
'baselines.')
group.add('--learning_rate', type=float, default=1.0,
help="Starting learning rate. "
"Recommended settings: sgd = 1, adagrad = 0.1, "
"adadelta = 1, adam = 0.001")
group.add('--learning_rate_decay', type=float, default=0.5,
help="If update_learning_rate, decay learning rate by "
"this much if steps have gone past "
"start_decay_steps")
group.add('--start_decay_steps', type=int, default=50000,
help="Start decaying every decay_steps after start_decay_steps")
group.add('--decay_steps', type=int, default=10000,
help="Decay every decay_steps")
group.add('--decay_method', type=str, default="none",
choices=['noam', 'noamwd', 'rsqrt', 'none'],
help="Use a custom decay rate.")
group.add('--warmup_steps', type=int, default=4000,
help="Number of warmup steps for custom decay.")
group = parser.add_argument_group('Logging')
group.add('--log_every', type=int, default=50,
help="Print stats at this interval.")
group.add("--tensorboard_dir", type=str, default="",
help="Log directory for Tensorboard. "
"This is also the name of the run.")
group.add("--save_dir", type=str, default="saved",
help="Directory for saving checkpoints.")
def translate_opts(parser):
group = parser.add_argument_group('Translation')
group.add('--eval_batch_size', type=int, default=32,
help='Batch size for evaluation')
group.add('--eval_split', choices=['train', 'valid', 'test'], default='test',
help='Split to be used for evaluation')
group.add('--n_best', type=int, default=1,
help='Number of hypotheses to return for each sample')
group.add('--min_length', type=int, default=0,
help='Minimum length of generated transcription')
group.add('--max_length', type=int, default=100,
help='Maximum length of generated transcription')
group.add('--ratio', type=float, default=0.,
help='If greater than 0, used for estimating transcription '
'length from length of encoded sequence')
group.add('--beam_size', type=int, default=8,
help='Size of beam during search')
group.add('--block_ngram_repeat', type=int, default=0,
help='Block hypotheses containing as many consecutive '
'repetitions of ngrams/tokens')
group.add('--excluded_toks', type=str, default='',
help='Comma-separated list of tokens not to be '
'blocked during decoding')
group.add('--out', type=str, default='',
help='File for writing generated hypotheses')
group.add('--verbose', action='store_true',
help='Print the best transcription as it is generated')
group.add('--attn_debug', action='store_true',
help='Print the attention heatmap for each sample')
| true | true |
f73225b22c2dc40bcb99cfec0c85cf7a9faf4caa | 276 | py | Python | src/main.py | Hudson-Newey/Global-Search | 3095b0002e44994142fa4b815cf8e56b05f012e2 | [
"MIT"
] | 1 | 2020-09-25T05:38:11.000Z | 2020-09-25T05:38:11.000Z | src/main.py | Grathium-Industries/Global-Search | 3095b0002e44994142fa4b815cf8e56b05f012e2 | [
"MIT"
] | null | null | null | src/main.py | Grathium-Industries/Global-Search | 3095b0002e44994142fa4b815cf8e56b05f012e2 | [
"MIT"
] | null | null | null | # readfile "rf()" function
def rf(filename):
return open(filename, "r").read()
# import external files
exec(rf("translate.py"))
exec(rf("parse.py"))
exec(rf("fileServer.py"))
# main body
# arg1 defines live server port
# main calling point of program
startServer(8080)
| 18.4 | 37 | 0.702899 |
def rf(filename):
return open(filename, "r").read()
exec(rf("translate.py"))
exec(rf("parse.py"))
exec(rf("fileServer.py"))
startServer(8080)
| true | true |
f73226da285f8746d282b6368e6dadea4572096c | 4,050 | py | Python | tyrant/cogs/fruit_vs_vegetables.py | AadilVarsh/tyrant | f4a5cebf09cd217b89823ca28180cb434c009b12 | [
"MIT"
] | 1 | 2021-10-12T05:10:04.000Z | 2021-10-12T05:10:04.000Z | tyrant/cogs/fruit_vs_vegetables.py | AadilVarsh/tyrant | f4a5cebf09cd217b89823ca28180cb434c009b12 | [
"MIT"
] | null | null | null | tyrant/cogs/fruit_vs_vegetables.py | AadilVarsh/tyrant | f4a5cebf09cd217b89823ca28180cb434c009b12 | [
"MIT"
] | null | null | null | import asyncio
from disnake.ext.commands import Bot, Cog
from tyrant import constants
class FruitVsVegetables(Cog):
"""Assign fruit and vegetable roles."""
def __init__(self, bot: Bot):
"""Initialize this cog with the Bot instance."""
self.bot = bot
self.locks = {}
@Cog.listener()
async def on_raw_reaction_add(self, payload):
"""Distribute fruit or vegetable role, when appropriate."""
if payload.channel_id == constants.Channels.fruit_vs_vegetables:
# Acquire a lock for this user
if payload.user_id not in self.locks:
self.locks[payload.user_id] = asyncio.Lock()
lock = self.locks[payload.user_id]
# If it's already locked, just do nothing. The code
# below will clean up and exit with a clean state.
if lock.locked():
return
async with lock:
# Get the other info we need
channel = await self.bot.fetch_channel(payload.channel_id)
guild = self.bot.get_guild(payload.guild_id)
member = await guild.fetch_member(payload.user_id)
emoji = payload.emoji
# Get the role ID from the emoji
fruit_role_id = constants.EMOJI_TO_ROLE[emoji.name]
team_id = constants.EMOJI_TO_TEAM[emoji.name]
fruit_role = guild.get_role(fruit_role_id)
team_role = guild.get_role(team_id)
# Get rid of old roles, assign the new ones
await member.remove_roles(*[role for role in member.roles if role.id in constants.ALL_FRUIT_AND_VEG_ROLES])
await member.add_roles(fruit_role, team_role)
# Finally, remove all other reactions than this one
fruit_message = await channel.fetch_message(constants.Messages.fruit_role_assignment)
veg_message = await channel.fetch_message(constants.Messages.veg_role_assignment)
reactions = fruit_message.reactions + veg_message.reactions
for reaction in reactions:
# Do not remove the reaction we're currently adding
if reaction.custom_emoji:
if reaction.emoji.name == emoji.name:
continue
else:
if str(emoji) == str(reaction.emoji):
continue
# Otherwise, remove the emoji.
users = await reaction.users().flatten()
if member in users:
await reaction.remove(member)
@Cog.listener()
async def on_raw_reaction_remove(self, payload):
"""Remove fruit and veg roles, when appropriate."""
if payload.channel_id == constants.Channels.fruit_vs_vegetables:
# Acquire a lock for this user
if payload.user_id not in self.locks:
self.locks[payload.user_id] = asyncio.Lock()
lock = self.locks[payload.user_id]
async with lock:
guild = self.bot.get_guild(payload.guild_id)
member = await guild.fetch_member(payload.user_id)
emoji = payload.emoji
# Get the role ID from the emoji
fruit_role_id = constants.EMOJI_TO_ROLE[emoji.name]
team_id = constants.EMOJI_TO_TEAM[emoji.name]
team_role = guild.get_role(team_id)
# Remove all fruit and veg roles from the member
for role in member.roles:
if role.id == fruit_role_id and role.id in constants.ALL_FRUIT_AND_VEG_ROLES:
await member.remove_roles(role, team_role)
def setup(bot: Bot) -> None:
"""
This function is called automatically when this cog is loaded by the bot.
It's only purpose is to load the cog above, and to pass the Bot instance into it.
"""
bot.add_cog(FruitVsVegetables(bot))
| 41.326531 | 123 | 0.589383 | import asyncio
from disnake.ext.commands import Bot, Cog
from tyrant import constants
class FruitVsVegetables(Cog):
def __init__(self, bot: Bot):
self.bot = bot
self.locks = {}
@Cog.listener()
async def on_raw_reaction_add(self, payload):
if payload.channel_id == constants.Channels.fruit_vs_vegetables:
if payload.user_id not in self.locks:
self.locks[payload.user_id] = asyncio.Lock()
lock = self.locks[payload.user_id]
# below will clean up and exit with a clean state.
if lock.locked():
return
async with lock:
# Get the other info we need
channel = await self.bot.fetch_channel(payload.channel_id)
guild = self.bot.get_guild(payload.guild_id)
member = await guild.fetch_member(payload.user_id)
emoji = payload.emoji
# Get the role ID from the emoji
fruit_role_id = constants.EMOJI_TO_ROLE[emoji.name]
team_id = constants.EMOJI_TO_TEAM[emoji.name]
fruit_role = guild.get_role(fruit_role_id)
team_role = guild.get_role(team_id)
# Get rid of old roles, assign the new ones
await member.remove_roles(*[role for role in member.roles if role.id in constants.ALL_FRUIT_AND_VEG_ROLES])
await member.add_roles(fruit_role, team_role)
# Finally, remove all other reactions than this one
fruit_message = await channel.fetch_message(constants.Messages.fruit_role_assignment)
veg_message = await channel.fetch_message(constants.Messages.veg_role_assignment)
reactions = fruit_message.reactions + veg_message.reactions
for reaction in reactions:
# Do not remove the reaction we're currently adding
if reaction.custom_emoji:
if reaction.emoji.name == emoji.name:
continue
else:
if str(emoji) == str(reaction.emoji):
continue
users = await reaction.users().flatten()
if member in users:
await reaction.remove(member)
@Cog.listener()
async def on_raw_reaction_remove(self, payload):
if payload.channel_id == constants.Channels.fruit_vs_vegetables:
if payload.user_id not in self.locks:
self.locks[payload.user_id] = asyncio.Lock()
lock = self.locks[payload.user_id]
async with lock:
guild = self.bot.get_guild(payload.guild_id)
member = await guild.fetch_member(payload.user_id)
emoji = payload.emoji
fruit_role_id = constants.EMOJI_TO_ROLE[emoji.name]
team_id = constants.EMOJI_TO_TEAM[emoji.name]
team_role = guild.get_role(team_id)
for role in member.roles:
if role.id == fruit_role_id and role.id in constants.ALL_FRUIT_AND_VEG_ROLES:
await member.remove_roles(role, team_role)
def setup(bot: Bot) -> None:
bot.add_cog(FruitVsVegetables(bot))
| true | true |
f732285ceeeaed769e47aab9658083e763fee6e4 | 2,947 | py | Python | tests/legacy/test_cross_cov.py | EEmGuzman/orphics | f8f25f9db7c9104dba5cbeaac0b4924bf4f6920e | [
"BSD-2-Clause"
] | 10 | 2018-01-12T16:12:11.000Z | 2021-02-11T18:46:47.000Z | tests/legacy/test_cross_cov.py | EEmGuzman/orphics | f8f25f9db7c9104dba5cbeaac0b4924bf4f6920e | [
"BSD-2-Clause"
] | 20 | 2016-11-17T20:20:53.000Z | 2021-02-02T10:08:38.000Z | tests/legacy/test_cross_cov.py | EEmGuzman/orphics | f8f25f9db7c9104dba5cbeaac0b4924bf4f6920e | [
"BSD-2-Clause"
] | 17 | 2017-04-28T23:28:16.000Z | 2021-08-15T20:28:25.000Z | from __future__ import print_function
from orphics import maps,io,cosmology,symcoupling as sc,stats,lensing
from enlib import enmap,bench
import numpy as np
import os,sys
cache = True
hdv = False
deg = 5
px = 1.5
shape,wcs = maps.rect_geometry(width_deg = deg,px_res_arcmin=px)
mc = sc.LensingModeCoupling(shape,wcs)
pols = ['TT',"TE",'EE','EB','TB']
theory = cosmology.default_theory(lpad=20000)
noise_t = 10.0
noise_p = 10.0*np.sqrt(2.)
fwhm = 1.5
kbeam = maps.gauss_beam(fwhm,mc.modlmap)
ells = np.arange(0,3000,1)
lbeam = maps.gauss_beam(fwhm,ells)
ntt = np.nan_to_num((noise_t*np.pi/180./60.)**2./kbeam**2.)
nee = np.nan_to_num((noise_p*np.pi/180./60.)**2./kbeam**2.)
nbb = np.nan_to_num((noise_p*np.pi/180./60.)**2./kbeam**2.)
lntt = np.nan_to_num((noise_t*np.pi/180./60.)**2./lbeam**2.)
lnee = np.nan_to_num((noise_p*np.pi/180./60.)**2./lbeam**2.)
lnbb = np.nan_to_num((noise_p*np.pi/180./60.)**2./lbeam**2.)
ellmin = 20
ellmax = 3000
xmask = maps.mask_kspace(shape,wcs,lmin=ellmin,lmax=ellmax)
ymask = xmask
Als = {}
for pol in pols:
with bench.show("ALcalc"):
AL = mc.AL(pol,xmask,ymask,ntt,nee,nbb,theory=theory,hdv=hdv,cache=cache)
Als[pol] = AL.copy()
bin_edges = np.arange(10,2000,40)
pl = io.Plotter(yscale='log')
pl.add(ells,theory.gCl('kk',ells),lw=3,color='k')
crosses = [('TT','EE'),('TT','TE'),('EE','TE'),('EB','TB')]
for pol1,pol2 in crosses:
print(pol1,pol2)
with bench.show("ALcalc"):
cross = mc.cross(pol1,pol2,theory,xmask,ymask,noise_t=ntt,noise_e=nee,noise_b=nbb,
ynoise_t=None,ynoise_e=None,ynoise_b=None,
cross_xnoise_t=None,cross_ynoise_t=None,
cross_xnoise_e=None,cross_ynoise_e=None,
cross_xnoise_b=None,cross_ynoise_b=None,
theory_norm=None,hdv=hdv,save_expression="current",validate=True,cache=True)
Nlalt = np.abs(mc.NL(Als[pol1],Als[pol2],cross))
cents,nkkalt = stats.bin_in_annuli(Nlalt,mc.modlmap,bin_edges)
pl.add(cents,nkkalt,marker="o",alpha=0.2,label=pol1 + "x" + pol2)
pl.legend()
pl.done()
zcrosses = [('TT','TB'),('TT','EB'),('EE','EB'),('EE','TB')]
pl = io.Plotter()
for pol1,pol2 in zcrosses:
print(pol1,pol2)
with bench.show("ALcalc"):
cross = mc.cross(pol1,pol2,theory,xmask,ymask,noise_t=ntt,noise_e=nee,noise_b=nbb,
ynoise_t=None,ynoise_e=None,ynoise_b=None,
cross_xnoise_t=None,cross_ynoise_t=None,
cross_xnoise_e=None,cross_ynoise_e=None,
cross_xnoise_b=None,cross_ynoise_b=None,
theory_norm=None,hdv=hdv,save_expression="current",validate=True,cache=True)
Nlalt = mc.NL(Als[pol1],Als[pol2],cross)
cents,nkkalt = stats.bin_in_annuli(Nlalt,mc.modlmap,bin_edges)
pl.add(cents,nkkalt,marker="o",alpha=0.2,label=pol1 + "x" + pol2)
pl.legend()
pl.done()
print("nffts : ",mc.nfft,mc.nifft)
| 33.488636 | 98 | 0.650153 | from __future__ import print_function
from orphics import maps,io,cosmology,symcoupling as sc,stats,lensing
from enlib import enmap,bench
import numpy as np
import os,sys
cache = True
hdv = False
deg = 5
px = 1.5
shape,wcs = maps.rect_geometry(width_deg = deg,px_res_arcmin=px)
mc = sc.LensingModeCoupling(shape,wcs)
pols = ['TT',"TE",'EE','EB','TB']
theory = cosmology.default_theory(lpad=20000)
noise_t = 10.0
noise_p = 10.0*np.sqrt(2.)
fwhm = 1.5
kbeam = maps.gauss_beam(fwhm,mc.modlmap)
ells = np.arange(0,3000,1)
lbeam = maps.gauss_beam(fwhm,ells)
ntt = np.nan_to_num((noise_t*np.pi/180./60.)**2./kbeam**2.)
nee = np.nan_to_num((noise_p*np.pi/180./60.)**2./kbeam**2.)
nbb = np.nan_to_num((noise_p*np.pi/180./60.)**2./kbeam**2.)
lntt = np.nan_to_num((noise_t*np.pi/180./60.)**2./lbeam**2.)
lnee = np.nan_to_num((noise_p*np.pi/180./60.)**2./lbeam**2.)
lnbb = np.nan_to_num((noise_p*np.pi/180./60.)**2./lbeam**2.)
ellmin = 20
ellmax = 3000
xmask = maps.mask_kspace(shape,wcs,lmin=ellmin,lmax=ellmax)
ymask = xmask
Als = {}
for pol in pols:
with bench.show("ALcalc"):
AL = mc.AL(pol,xmask,ymask,ntt,nee,nbb,theory=theory,hdv=hdv,cache=cache)
Als[pol] = AL.copy()
bin_edges = np.arange(10,2000,40)
pl = io.Plotter(yscale='log')
pl.add(ells,theory.gCl('kk',ells),lw=3,color='k')
crosses = [('TT','EE'),('TT','TE'),('EE','TE'),('EB','TB')]
for pol1,pol2 in crosses:
print(pol1,pol2)
with bench.show("ALcalc"):
cross = mc.cross(pol1,pol2,theory,xmask,ymask,noise_t=ntt,noise_e=nee,noise_b=nbb,
ynoise_t=None,ynoise_e=None,ynoise_b=None,
cross_xnoise_t=None,cross_ynoise_t=None,
cross_xnoise_e=None,cross_ynoise_e=None,
cross_xnoise_b=None,cross_ynoise_b=None,
theory_norm=None,hdv=hdv,save_expression="current",validate=True,cache=True)
Nlalt = np.abs(mc.NL(Als[pol1],Als[pol2],cross))
cents,nkkalt = stats.bin_in_annuli(Nlalt,mc.modlmap,bin_edges)
pl.add(cents,nkkalt,marker="o",alpha=0.2,label=pol1 + "x" + pol2)
pl.legend()
pl.done()
zcrosses = [('TT','TB'),('TT','EB'),('EE','EB'),('EE','TB')]
pl = io.Plotter()
for pol1,pol2 in zcrosses:
print(pol1,pol2)
with bench.show("ALcalc"):
cross = mc.cross(pol1,pol2,theory,xmask,ymask,noise_t=ntt,noise_e=nee,noise_b=nbb,
ynoise_t=None,ynoise_e=None,ynoise_b=None,
cross_xnoise_t=None,cross_ynoise_t=None,
cross_xnoise_e=None,cross_ynoise_e=None,
cross_xnoise_b=None,cross_ynoise_b=None,
theory_norm=None,hdv=hdv,save_expression="current",validate=True,cache=True)
Nlalt = mc.NL(Als[pol1],Als[pol2],cross)
cents,nkkalt = stats.bin_in_annuli(Nlalt,mc.modlmap,bin_edges)
pl.add(cents,nkkalt,marker="o",alpha=0.2,label=pol1 + "x" + pol2)
pl.legend()
pl.done()
print("nffts : ",mc.nfft,mc.nifft)
| true | true |
f73229b10a319926c7005288cb0476184c0feb80 | 952 | py | Python | twitterBot.py | f0xHiero/Learning_Still | 53721c0da1e2d280433e68979dbf5a4d692bd955 | [
"CC0-1.0"
] | null | null | null | twitterBot.py | f0xHiero/Learning_Still | 53721c0da1e2d280433e68979dbf5a4d692bd955 | [
"CC0-1.0"
] | null | null | null | twitterBot.py | f0xHiero/Learning_Still | 53721c0da1e2d280433e68979dbf5a4d692bd955 | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/python3
import tweepy
import time, datetime
consumer_key = 'REDACTED'
consumer_secret = 'REDACTED'
key = 'REDACTED'
secret = 'REDACTED'
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(key, secret)
api = tweepy.API(auth)
def twitter_bot(hashtag, delay):
while True:
print(f"\n{datetime.datetime.now()}\n")
for tweet in tweepy.Cursor(api.search, q=hashtag, rpp=200).items(200):
try:
tweet_id = dict(tweet._json)["id"]
tweet_text = dict(tweet._json)["text"]
print("id" + str(tweet_id))
print("text: " + str(tweet_text))
api.retweet(tweet_id)
api.create_favorite(tweet_id)
#store_last_seen(FILE_NAME, tweet_id)
except tweepy.TweepError as error:
print(error.reason)
time.sleep(delay)
twitter_bot("$FTM", 60)
| 21.636364 | 78 | 0.594538 |
import tweepy
import time, datetime
consumer_key = 'REDACTED'
consumer_secret = 'REDACTED'
key = 'REDACTED'
secret = 'REDACTED'
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(key, secret)
api = tweepy.API(auth)
def twitter_bot(hashtag, delay):
while True:
print(f"\n{datetime.datetime.now()}\n")
for tweet in tweepy.Cursor(api.search, q=hashtag, rpp=200).items(200):
try:
tweet_id = dict(tweet._json)["id"]
tweet_text = dict(tweet._json)["text"]
print("id" + str(tweet_id))
print("text: " + str(tweet_text))
api.retweet(tweet_id)
api.create_favorite(tweet_id)
except tweepy.TweepError as error:
print(error.reason)
time.sleep(delay)
twitter_bot("$FTM", 60)
| true | true |
f73229f61512eeeef654392625e73b17e8a5aff6 | 19,243 | py | Python | tests/extensions/test_version.py | jisantuc/pystac | 12eff70f9423d642c9909c92b4ba228bf97bef0e | [
"Apache-2.0"
] | 130 | 2020-09-08T08:30:23.000Z | 2022-03-29T19:38:26.000Z | tests/extensions/test_version.py | jisantuc/pystac | 12eff70f9423d642c9909c92b4ba228bf97bef0e | [
"Apache-2.0"
] | 536 | 2020-08-20T17:24:49.000Z | 2022-03-31T23:49:37.000Z | tests/extensions/test_version.py | jisantuc/pystac | 12eff70f9423d642c9909c92b4ba228bf97bef0e | [
"Apache-2.0"
] | 42 | 2020-08-20T09:08:11.000Z | 2022-03-08T07:44:12.000Z | """Tests for pystac.extensions.version."""
import datetime
import unittest
from typing import List, Optional
import pystac
from pystac import ExtensionTypeError
from pystac.extensions import version
from pystac.extensions.version import VersionExtension, VersionRelType
from tests.utils import TestCases
URL_TEMPLATE: str = "http://example.com/catalog/%s.json"
def make_item(year: int) -> pystac.Item:
"""Create basic test items that are only slightly different."""
asset_id = f"USGS/GAP/CONUS/{year}"
start = datetime.datetime(year, 1, 2)
item = pystac.Item(
id=asset_id, geometry=None, bbox=None, datetime=start, properties={}
)
item.set_self_href(URL_TEMPLATE % year)
VersionExtension.add_to(item)
return item
class VersionExtensionTest(unittest.TestCase):
def test_should_raise_exception_when_passing_invalid_extension_object(
self,
) -> None:
self.assertRaisesRegex(
ExtensionTypeError,
r"^Version extension does not apply to type 'object'$",
VersionExtension.ext,
object(),
)
class ItemVersionExtensionTest(unittest.TestCase):
version: str = "1.2.3"
def setUp(self) -> None:
super().setUp()
self.item = make_item(2011)
self.example_item_uri = TestCases.get_path("data-files/version/item.json")
def test_rel_types(self) -> None:
self.assertEqual(VersionRelType.LATEST.value, "latest-version")
self.assertEqual(VersionRelType.PREDECESSOR.value, "predecessor-version")
self.assertEqual(VersionRelType.SUCCESSOR.value, "successor-version")
def test_stac_extensions(self) -> None:
self.assertTrue(VersionExtension.has_extension(self.item))
def test_add_version(self) -> None:
VersionExtension.ext(self.item).apply(self.version)
self.assertEqual(self.version, VersionExtension.ext(self.item).version)
self.assertNotIn(version.DEPRECATED, self.item.properties)
self.assertFalse(VersionExtension.ext(self.item).deprecated)
self.item.validate()
def test_version_in_properties(self) -> None:
VersionExtension.ext(self.item).apply(self.version, deprecated=True)
self.assertIn(version.VERSION, self.item.properties)
self.assertIn(version.DEPRECATED, self.item.properties)
self.item.validate()
def test_add_not_deprecated_version(self) -> None:
VersionExtension.ext(self.item).apply(self.version, deprecated=False)
self.assertIn(version.DEPRECATED, self.item.properties)
self.assertFalse(VersionExtension.ext(self.item).deprecated)
self.item.validate()
def test_add_deprecated_version(self) -> None:
VersionExtension.ext(self.item).apply(self.version, deprecated=True)
self.assertIn(version.DEPRECATED, self.item.properties)
self.assertTrue(VersionExtension.ext(self.item).deprecated)
self.item.validate()
def test_latest(self) -> None:
year = 2013
latest = make_item(year)
VersionExtension.ext(self.item).apply(self.version, latest=latest)
latest_result = VersionExtension.ext(self.item).latest
self.assertIs(latest, latest_result)
expected_href = URL_TEMPLATE % year
link = self.item.get_links(VersionRelType.LATEST)[0]
self.assertEqual(expected_href, link.get_href())
self.item.validate()
def test_predecessor(self) -> None:
year = 2010
predecessor = make_item(year)
VersionExtension.ext(self.item).apply(self.version, predecessor=predecessor)
predecessor_result = VersionExtension.ext(self.item).predecessor
self.assertIs(predecessor, predecessor_result)
expected_href = URL_TEMPLATE % year
link = self.item.get_links(VersionRelType.PREDECESSOR)[0]
self.assertEqual(expected_href, link.get_href())
self.item.validate()
def test_successor(self) -> None:
year = 2012
successor = make_item(year)
VersionExtension.ext(self.item).apply(self.version, successor=successor)
successor_result = VersionExtension.ext(self.item).successor
self.assertIs(successor, successor_result)
expected_href = URL_TEMPLATE % year
link = self.item.get_links(VersionRelType.SUCCESSOR)[0]
self.assertEqual(expected_href, link.get_href())
self.item.validate()
def test_fail_validate(self) -> None:
with self.assertRaises(pystac.STACValidationError):
self.item.validate()
def test_all_links(self) -> None:
deprecated = True
latest = make_item(2013)
predecessor = make_item(2010)
successor = make_item(2012)
VersionExtension.ext(self.item).apply(
self.version, deprecated, latest, predecessor, successor
)
self.item.validate()
def test_full_copy(self) -> None:
cat = TestCases.test_case_1()
# Fetch two items from the catalog
item1 = cat.get_item("area-1-1-imagery", recursive=True)
item2 = cat.get_item("area-2-2-imagery", recursive=True)
assert item1 is not None
assert item2 is not None
# Enable the version extension on each, and link them
# as if they are different versions of the same Item
VersionExtension.add_to(item1)
VersionExtension.add_to(item2)
VersionExtension.ext(item1).apply(version="2.0", predecessor=item2)
VersionExtension.ext(item2).apply(version="1.0", successor=item1, latest=item1)
# Make a full copy of the catalog
cat_copy = cat.full_copy()
# Retrieve the copied version of the items
item1_copy = cat_copy.get_item("area-1-1-imagery", recursive=True)
assert item1_copy is not None
item2_copy = cat_copy.get_item("area-2-2-imagery", recursive=True)
assert item2_copy is not None
# Check to see if the version links point to the instances of the
# item objects as they should.
predecessor = item1_copy.get_single_link(VersionRelType.PREDECESSOR)
assert predecessor is not None
predecessor_target = predecessor.target
successor = item2_copy.get_single_link(VersionRelType.SUCCESSOR)
assert successor is not None
successor_target = successor.target
latest = item2_copy.get_single_link(VersionRelType.LATEST)
assert latest is not None
latest_target = latest.target
self.assertIs(predecessor_target, item2_copy)
self.assertIs(successor_target, item1_copy)
self.assertIs(latest_target, item1_copy)
def test_setting_none_clears_link(self) -> None:
deprecated = False
latest = make_item(2013)
predecessor = make_item(2010)
successor = make_item(2012)
VersionExtension.ext(self.item).apply(
self.version, deprecated, latest, predecessor, successor
)
VersionExtension.ext(self.item).latest = None
links = self.item.get_links(VersionRelType.LATEST)
self.assertEqual(0, len(links))
self.assertIsNone(VersionExtension.ext(self.item).latest)
VersionExtension.ext(self.item).predecessor = None
links = self.item.get_links(VersionRelType.PREDECESSOR)
self.assertEqual(0, len(links))
self.assertIsNone(VersionExtension.ext(self.item).predecessor)
VersionExtension.ext(self.item).successor = None
links = self.item.get_links(VersionRelType.SUCCESSOR)
self.assertEqual(0, len(links))
self.assertIsNone(VersionExtension.ext(self.item).successor)
def test_multiple_link_setting(self) -> None:
deprecated = False
latest1 = make_item(2013)
predecessor1 = make_item(2010)
successor1 = make_item(2012)
VersionExtension.ext(self.item).apply(
self.version, deprecated, latest1, predecessor1, successor1
)
year = 2015
latest2 = make_item(year)
expected_href = URL_TEMPLATE % year
VersionExtension.ext(self.item).latest = latest2
links = self.item.get_links(VersionRelType.LATEST)
self.assertEqual(1, len(links))
self.assertEqual(expected_href, links[0].get_href())
year = 2009
predecessor2 = make_item(year)
expected_href = URL_TEMPLATE % year
VersionExtension.ext(self.item).predecessor = predecessor2
links = self.item.get_links(VersionRelType.PREDECESSOR)
self.assertEqual(1, len(links))
self.assertEqual(expected_href, links[0].get_href())
year = 2014
successor2 = make_item(year)
expected_href = URL_TEMPLATE % year
VersionExtension.ext(self.item).successor = successor2
links = self.item.get_links(VersionRelType.SUCCESSOR)
self.assertEqual(1, len(links))
self.assertEqual(expected_href, links[0].get_href())
def test_extension_not_implemented(self) -> None:
# Should raise exception if Item does not include extension URI
item = pystac.Item.from_file(self.example_item_uri)
item.stac_extensions.remove(VersionExtension.get_schema_uri())
with self.assertRaises(pystac.ExtensionNotImplemented):
_ = VersionExtension.ext(item)
def test_ext_add_to(self) -> None:
item = pystac.Item.from_file(self.example_item_uri)
item.stac_extensions.remove(VersionExtension.get_schema_uri())
self.assertNotIn(VersionExtension.get_schema_uri(), item.stac_extensions)
_ = VersionExtension.ext(item, add_if_missing=True)
self.assertIn(VersionExtension.get_schema_uri(), item.stac_extensions)
def make_collection(year: int) -> pystac.Collection:
asset_id = f"my/collection/of/things/{year}"
start = datetime.datetime(2014, 8, 10)
end = datetime.datetime(year, 1, 3, 4, 5)
bboxes = [[-180.0, -90.0, 180.0, 90.0]]
spatial_extent = pystac.SpatialExtent(bboxes)
intervals: List[List[Optional[datetime.datetime]]] = [[start, end]]
temporal_extent = pystac.TemporalExtent(intervals)
extent = pystac.Extent(spatial_extent, temporal_extent)
collection = pystac.Collection(asset_id, "desc", extent)
collection.set_self_href(URL_TEMPLATE % year)
VersionExtension.add_to(collection)
return collection
class CollectionVersionExtensionTest(unittest.TestCase):
version: str = "1.2.3"
def setUp(self) -> None:
super().setUp()
self.collection = make_collection(2011)
self.example_collection_uri = TestCases.get_path(
"data-files/version/collection.json"
)
def test_stac_extensions(self) -> None:
self.assertTrue(VersionExtension.has_extension(self.collection))
def test_add_version(self) -> None:
VersionExtension.ext(self.collection).apply(self.version)
self.assertEqual(self.version, VersionExtension.ext(self.collection).version)
self.assertNotIn(version.DEPRECATED, self.collection.extra_fields)
self.assertFalse(VersionExtension.ext(self.collection).deprecated)
self.collection.validate()
def test_version_deprecated(self) -> None:
VersionExtension.ext(self.collection).apply(self.version, deprecated=True)
self.assertIn(version.VERSION, self.collection.extra_fields)
self.assertIn(version.DEPRECATED, self.collection.extra_fields)
self.collection.validate()
def test_add_not_deprecated_version(self) -> None:
VersionExtension.ext(self.collection).apply(self.version, deprecated=False)
self.assertIn(version.DEPRECATED, self.collection.extra_fields)
self.assertFalse(VersionExtension.ext(self.collection).deprecated)
self.collection.validate()
def test_add_deprecated_version(self) -> None:
VersionExtension.ext(self.collection).apply(self.version, deprecated=True)
self.assertIn(version.DEPRECATED, self.collection.extra_fields)
self.assertTrue(VersionExtension.ext(self.collection).deprecated)
self.collection.validate()
def test_latest(self) -> None:
year = 2013
latest = make_collection(year)
VersionExtension.ext(self.collection).apply(self.version, latest=latest)
latest_result = VersionExtension.ext(self.collection).latest
self.assertIs(latest, latest_result)
expected_href = URL_TEMPLATE % year
link = self.collection.get_links(VersionRelType.LATEST)[0]
self.assertEqual(expected_href, link.get_href())
self.collection.validate()
def test_predecessor(self) -> None:
year = 2010
predecessor = make_collection(year)
VersionExtension.ext(self.collection).apply(
self.version, predecessor=predecessor
)
predecessor_result = VersionExtension.ext(self.collection).predecessor
self.assertIs(predecessor, predecessor_result)
expected_href = URL_TEMPLATE % year
link = self.collection.get_links(VersionRelType.PREDECESSOR)[0]
self.assertEqual(expected_href, link.get_href())
self.collection.validate()
def test_successor(self) -> None:
year = 2012
successor = make_collection(year)
VersionExtension.ext(self.collection).apply(self.version, successor=successor)
successor_result = VersionExtension.ext(self.collection).successor
self.assertIs(successor, successor_result)
expected_href = URL_TEMPLATE % year
link = self.collection.get_links(VersionRelType.SUCCESSOR)[0]
self.assertEqual(expected_href, link.get_href())
self.collection.validate()
def test_fail_validate(self) -> None:
with self.assertRaises(pystac.STACValidationError):
self.collection.validate()
def test_validate_all(self) -> None:
deprecated = True
latest = make_collection(2013)
predecessor = make_collection(2010)
successor = make_collection(2012)
VersionExtension.ext(self.collection).apply(
self.version, deprecated, latest, predecessor, successor
)
self.collection.validate()
def test_full_copy(self) -> None:
cat = TestCases.test_case_1()
# Fetch two collections from the catalog
col1 = cat.get_child("area-1-1", recursive=True)
assert isinstance(col1, pystac.Collection)
col2 = cat.get_child("area-2-2", recursive=True)
assert isinstance(col2, pystac.Collection)
# Enable the version extension on each, and link them
# as if they are different versions of the same Collection
VersionExtension.add_to(col1)
VersionExtension.add_to(col2)
VersionExtension.ext(col1).apply(version="2.0", predecessor=col2)
VersionExtension.ext(col2).apply(version="1.0", successor=col1, latest=col1)
# Make a full copy of the catalog
cat_copy = cat.full_copy()
# Retrieve the copied version of the items
col1_copy = cat_copy.get_child("area-1-1", recursive=True)
assert col1_copy is not None
col2_copy = cat_copy.get_child("area-2-2", recursive=True)
assert col2_copy is not None
# Check to see if the version links point to the instances of the
# col objects as they should.
predecessor = col1_copy.get_single_link(VersionRelType.PREDECESSOR)
assert predecessor is not None
predecessor_target = predecessor.target
successor = col2_copy.get_single_link(VersionRelType.SUCCESSOR)
assert successor is not None
successor_target = successor.target
latest = col2_copy.get_single_link(VersionRelType.LATEST)
assert latest is not None
latest_target = latest.target
self.assertIs(predecessor_target, col2_copy)
self.assertIs(successor_target, col1_copy)
self.assertIs(latest_target, col1_copy)
def test_setting_none_clears_link(self) -> None:
deprecated = False
latest = make_collection(2013)
predecessor = make_collection(2010)
successor = make_collection(2012)
VersionExtension.ext(self.collection).apply(
self.version, deprecated, latest, predecessor, successor
)
VersionExtension.ext(self.collection).latest = None
links = self.collection.get_links(VersionRelType.LATEST)
self.assertEqual(0, len(links))
self.assertIsNone(VersionExtension.ext(self.collection).latest)
VersionExtension.ext(self.collection).predecessor = None
links = self.collection.get_links(VersionRelType.PREDECESSOR)
self.assertEqual(0, len(links))
self.assertIsNone(VersionExtension.ext(self.collection).predecessor)
VersionExtension.ext(self.collection).successor = None
links = self.collection.get_links(VersionRelType.SUCCESSOR)
self.assertEqual(0, len(links))
self.assertIsNone(VersionExtension.ext(self.collection).successor)
def test_multiple_link_setting(self) -> None:
deprecated = False
latest1 = make_collection(2013)
predecessor1 = make_collection(2010)
successor1 = make_collection(2012)
VersionExtension.ext(self.collection).apply(
self.version, deprecated, latest1, predecessor1, successor1
)
year = 2015
latest2 = make_collection(year)
expected_href = URL_TEMPLATE % year
VersionExtension.ext(self.collection).latest = latest2
links = self.collection.get_links(VersionRelType.LATEST)
self.assertEqual(1, len(links))
self.assertEqual(expected_href, links[0].get_href())
year = 2009
predecessor2 = make_collection(year)
expected_href = URL_TEMPLATE % year
VersionExtension.ext(self.collection).predecessor = predecessor2
links = self.collection.get_links(VersionRelType.PREDECESSOR)
self.assertEqual(1, len(links))
self.assertEqual(expected_href, links[0].get_href())
year = 2014
successor2 = make_collection(year)
expected_href = URL_TEMPLATE % year
VersionExtension.ext(self.collection).successor = successor2
links = self.collection.get_links(VersionRelType.SUCCESSOR)
self.assertEqual(1, len(links))
self.assertEqual(expected_href, links[0].get_href())
def test_extension_not_implemented(self) -> None:
# Should raise exception if Collection does not include extension URI
collection = pystac.Collection.from_file(self.example_collection_uri)
collection.stac_extensions.remove(VersionExtension.get_schema_uri())
with self.assertRaises(pystac.ExtensionNotImplemented):
_ = VersionExtension.ext(collection)
def test_ext_add_to(self) -> None:
collection = pystac.Collection.from_file(self.example_collection_uri)
collection.stac_extensions.remove(VersionExtension.get_schema_uri())
self.assertNotIn(VersionExtension.get_schema_uri(), collection.stac_extensions)
_ = VersionExtension.ext(collection, add_if_missing=True)
self.assertIn(VersionExtension.get_schema_uri(), collection.stac_extensions)
| 40.426471 | 87 | 0.693187 |
import datetime
import unittest
from typing import List, Optional
import pystac
from pystac import ExtensionTypeError
from pystac.extensions import version
from pystac.extensions.version import VersionExtension, VersionRelType
from tests.utils import TestCases
URL_TEMPLATE: str = "http://example.com/catalog/%s.json"
def make_item(year: int) -> pystac.Item:
asset_id = f"USGS/GAP/CONUS/{year}"
start = datetime.datetime(year, 1, 2)
item = pystac.Item(
id=asset_id, geometry=None, bbox=None, datetime=start, properties={}
)
item.set_self_href(URL_TEMPLATE % year)
VersionExtension.add_to(item)
return item
class VersionExtensionTest(unittest.TestCase):
def test_should_raise_exception_when_passing_invalid_extension_object(
self,
) -> None:
self.assertRaisesRegex(
ExtensionTypeError,
r"^Version extension does not apply to type 'object'$",
VersionExtension.ext,
object(),
)
class ItemVersionExtensionTest(unittest.TestCase):
version: str = "1.2.3"
def setUp(self) -> None:
super().setUp()
self.item = make_item(2011)
self.example_item_uri = TestCases.get_path("data-files/version/item.json")
def test_rel_types(self) -> None:
self.assertEqual(VersionRelType.LATEST.value, "latest-version")
self.assertEqual(VersionRelType.PREDECESSOR.value, "predecessor-version")
self.assertEqual(VersionRelType.SUCCESSOR.value, "successor-version")
def test_stac_extensions(self) -> None:
self.assertTrue(VersionExtension.has_extension(self.item))
def test_add_version(self) -> None:
VersionExtension.ext(self.item).apply(self.version)
self.assertEqual(self.version, VersionExtension.ext(self.item).version)
self.assertNotIn(version.DEPRECATED, self.item.properties)
self.assertFalse(VersionExtension.ext(self.item).deprecated)
self.item.validate()
def test_version_in_properties(self) -> None:
VersionExtension.ext(self.item).apply(self.version, deprecated=True)
self.assertIn(version.VERSION, self.item.properties)
self.assertIn(version.DEPRECATED, self.item.properties)
self.item.validate()
def test_add_not_deprecated_version(self) -> None:
VersionExtension.ext(self.item).apply(self.version, deprecated=False)
self.assertIn(version.DEPRECATED, self.item.properties)
self.assertFalse(VersionExtension.ext(self.item).deprecated)
self.item.validate()
def test_add_deprecated_version(self) -> None:
VersionExtension.ext(self.item).apply(self.version, deprecated=True)
self.assertIn(version.DEPRECATED, self.item.properties)
self.assertTrue(VersionExtension.ext(self.item).deprecated)
self.item.validate()
def test_latest(self) -> None:
year = 2013
latest = make_item(year)
VersionExtension.ext(self.item).apply(self.version, latest=latest)
latest_result = VersionExtension.ext(self.item).latest
self.assertIs(latest, latest_result)
expected_href = URL_TEMPLATE % year
link = self.item.get_links(VersionRelType.LATEST)[0]
self.assertEqual(expected_href, link.get_href())
self.item.validate()
def test_predecessor(self) -> None:
year = 2010
predecessor = make_item(year)
VersionExtension.ext(self.item).apply(self.version, predecessor=predecessor)
predecessor_result = VersionExtension.ext(self.item).predecessor
self.assertIs(predecessor, predecessor_result)
expected_href = URL_TEMPLATE % year
link = self.item.get_links(VersionRelType.PREDECESSOR)[0]
self.assertEqual(expected_href, link.get_href())
self.item.validate()
def test_successor(self) -> None:
year = 2012
successor = make_item(year)
VersionExtension.ext(self.item).apply(self.version, successor=successor)
successor_result = VersionExtension.ext(self.item).successor
self.assertIs(successor, successor_result)
expected_href = URL_TEMPLATE % year
link = self.item.get_links(VersionRelType.SUCCESSOR)[0]
self.assertEqual(expected_href, link.get_href())
self.item.validate()
def test_fail_validate(self) -> None:
with self.assertRaises(pystac.STACValidationError):
self.item.validate()
def test_all_links(self) -> None:
deprecated = True
latest = make_item(2013)
predecessor = make_item(2010)
successor = make_item(2012)
VersionExtension.ext(self.item).apply(
self.version, deprecated, latest, predecessor, successor
)
self.item.validate()
def test_full_copy(self) -> None:
cat = TestCases.test_case_1()
item1 = cat.get_item("area-1-1-imagery", recursive=True)
item2 = cat.get_item("area-2-2-imagery", recursive=True)
assert item1 is not None
assert item2 is not None
VersionExtension.add_to(item1)
VersionExtension.add_to(item2)
VersionExtension.ext(item1).apply(version="2.0", predecessor=item2)
VersionExtension.ext(item2).apply(version="1.0", successor=item1, latest=item1)
cat_copy = cat.full_copy()
item1_copy = cat_copy.get_item("area-1-1-imagery", recursive=True)
assert item1_copy is not None
item2_copy = cat_copy.get_item("area-2-2-imagery", recursive=True)
assert item2_copy is not None
predecessor = item1_copy.get_single_link(VersionRelType.PREDECESSOR)
assert predecessor is not None
predecessor_target = predecessor.target
successor = item2_copy.get_single_link(VersionRelType.SUCCESSOR)
assert successor is not None
successor_target = successor.target
latest = item2_copy.get_single_link(VersionRelType.LATEST)
assert latest is not None
latest_target = latest.target
self.assertIs(predecessor_target, item2_copy)
self.assertIs(successor_target, item1_copy)
self.assertIs(latest_target, item1_copy)
def test_setting_none_clears_link(self) -> None:
deprecated = False
latest = make_item(2013)
predecessor = make_item(2010)
successor = make_item(2012)
VersionExtension.ext(self.item).apply(
self.version, deprecated, latest, predecessor, successor
)
VersionExtension.ext(self.item).latest = None
links = self.item.get_links(VersionRelType.LATEST)
self.assertEqual(0, len(links))
self.assertIsNone(VersionExtension.ext(self.item).latest)
VersionExtension.ext(self.item).predecessor = None
links = self.item.get_links(VersionRelType.PREDECESSOR)
self.assertEqual(0, len(links))
self.assertIsNone(VersionExtension.ext(self.item).predecessor)
VersionExtension.ext(self.item).successor = None
links = self.item.get_links(VersionRelType.SUCCESSOR)
self.assertEqual(0, len(links))
self.assertIsNone(VersionExtension.ext(self.item).successor)
def test_multiple_link_setting(self) -> None:
deprecated = False
latest1 = make_item(2013)
predecessor1 = make_item(2010)
successor1 = make_item(2012)
VersionExtension.ext(self.item).apply(
self.version, deprecated, latest1, predecessor1, successor1
)
year = 2015
latest2 = make_item(year)
expected_href = URL_TEMPLATE % year
VersionExtension.ext(self.item).latest = latest2
links = self.item.get_links(VersionRelType.LATEST)
self.assertEqual(1, len(links))
self.assertEqual(expected_href, links[0].get_href())
year = 2009
predecessor2 = make_item(year)
expected_href = URL_TEMPLATE % year
VersionExtension.ext(self.item).predecessor = predecessor2
links = self.item.get_links(VersionRelType.PREDECESSOR)
self.assertEqual(1, len(links))
self.assertEqual(expected_href, links[0].get_href())
year = 2014
successor2 = make_item(year)
expected_href = URL_TEMPLATE % year
VersionExtension.ext(self.item).successor = successor2
links = self.item.get_links(VersionRelType.SUCCESSOR)
self.assertEqual(1, len(links))
self.assertEqual(expected_href, links[0].get_href())
def test_extension_not_implemented(self) -> None:
item = pystac.Item.from_file(self.example_item_uri)
item.stac_extensions.remove(VersionExtension.get_schema_uri())
with self.assertRaises(pystac.ExtensionNotImplemented):
_ = VersionExtension.ext(item)
def test_ext_add_to(self) -> None:
item = pystac.Item.from_file(self.example_item_uri)
item.stac_extensions.remove(VersionExtension.get_schema_uri())
self.assertNotIn(VersionExtension.get_schema_uri(), item.stac_extensions)
_ = VersionExtension.ext(item, add_if_missing=True)
self.assertIn(VersionExtension.get_schema_uri(), item.stac_extensions)
def make_collection(year: int) -> pystac.Collection:
asset_id = f"my/collection/of/things/{year}"
start = datetime.datetime(2014, 8, 10)
end = datetime.datetime(year, 1, 3, 4, 5)
bboxes = [[-180.0, -90.0, 180.0, 90.0]]
spatial_extent = pystac.SpatialExtent(bboxes)
intervals: List[List[Optional[datetime.datetime]]] = [[start, end]]
temporal_extent = pystac.TemporalExtent(intervals)
extent = pystac.Extent(spatial_extent, temporal_extent)
collection = pystac.Collection(asset_id, "desc", extent)
collection.set_self_href(URL_TEMPLATE % year)
VersionExtension.add_to(collection)
return collection
class CollectionVersionExtensionTest(unittest.TestCase):
version: str = "1.2.3"
def setUp(self) -> None:
super().setUp()
self.collection = make_collection(2011)
self.example_collection_uri = TestCases.get_path(
"data-files/version/collection.json"
)
def test_stac_extensions(self) -> None:
self.assertTrue(VersionExtension.has_extension(self.collection))
def test_add_version(self) -> None:
VersionExtension.ext(self.collection).apply(self.version)
self.assertEqual(self.version, VersionExtension.ext(self.collection).version)
self.assertNotIn(version.DEPRECATED, self.collection.extra_fields)
self.assertFalse(VersionExtension.ext(self.collection).deprecated)
self.collection.validate()
def test_version_deprecated(self) -> None:
VersionExtension.ext(self.collection).apply(self.version, deprecated=True)
self.assertIn(version.VERSION, self.collection.extra_fields)
self.assertIn(version.DEPRECATED, self.collection.extra_fields)
self.collection.validate()
def test_add_not_deprecated_version(self) -> None:
VersionExtension.ext(self.collection).apply(self.version, deprecated=False)
self.assertIn(version.DEPRECATED, self.collection.extra_fields)
self.assertFalse(VersionExtension.ext(self.collection).deprecated)
self.collection.validate()
def test_add_deprecated_version(self) -> None:
VersionExtension.ext(self.collection).apply(self.version, deprecated=True)
self.assertIn(version.DEPRECATED, self.collection.extra_fields)
self.assertTrue(VersionExtension.ext(self.collection).deprecated)
self.collection.validate()
def test_latest(self) -> None:
year = 2013
latest = make_collection(year)
VersionExtension.ext(self.collection).apply(self.version, latest=latest)
latest_result = VersionExtension.ext(self.collection).latest
self.assertIs(latest, latest_result)
expected_href = URL_TEMPLATE % year
link = self.collection.get_links(VersionRelType.LATEST)[0]
self.assertEqual(expected_href, link.get_href())
self.collection.validate()
def test_predecessor(self) -> None:
year = 2010
predecessor = make_collection(year)
VersionExtension.ext(self.collection).apply(
self.version, predecessor=predecessor
)
predecessor_result = VersionExtension.ext(self.collection).predecessor
self.assertIs(predecessor, predecessor_result)
expected_href = URL_TEMPLATE % year
link = self.collection.get_links(VersionRelType.PREDECESSOR)[0]
self.assertEqual(expected_href, link.get_href())
self.collection.validate()
def test_successor(self) -> None:
year = 2012
successor = make_collection(year)
VersionExtension.ext(self.collection).apply(self.version, successor=successor)
successor_result = VersionExtension.ext(self.collection).successor
self.assertIs(successor, successor_result)
expected_href = URL_TEMPLATE % year
link = self.collection.get_links(VersionRelType.SUCCESSOR)[0]
self.assertEqual(expected_href, link.get_href())
self.collection.validate()
def test_fail_validate(self) -> None:
with self.assertRaises(pystac.STACValidationError):
self.collection.validate()
def test_validate_all(self) -> None:
deprecated = True
latest = make_collection(2013)
predecessor = make_collection(2010)
successor = make_collection(2012)
VersionExtension.ext(self.collection).apply(
self.version, deprecated, latest, predecessor, successor
)
self.collection.validate()
def test_full_copy(self) -> None:
cat = TestCases.test_case_1()
col1 = cat.get_child("area-1-1", recursive=True)
assert isinstance(col1, pystac.Collection)
col2 = cat.get_child("area-2-2", recursive=True)
assert isinstance(col2, pystac.Collection)
VersionExtension.add_to(col1)
VersionExtension.add_to(col2)
VersionExtension.ext(col1).apply(version="2.0", predecessor=col2)
VersionExtension.ext(col2).apply(version="1.0", successor=col1, latest=col1)
cat_copy = cat.full_copy()
col1_copy = cat_copy.get_child("area-1-1", recursive=True)
assert col1_copy is not None
col2_copy = cat_copy.get_child("area-2-2", recursive=True)
assert col2_copy is not None
predecessor = col1_copy.get_single_link(VersionRelType.PREDECESSOR)
assert predecessor is not None
predecessor_target = predecessor.target
successor = col2_copy.get_single_link(VersionRelType.SUCCESSOR)
assert successor is not None
successor_target = successor.target
latest = col2_copy.get_single_link(VersionRelType.LATEST)
assert latest is not None
latest_target = latest.target
self.assertIs(predecessor_target, col2_copy)
self.assertIs(successor_target, col1_copy)
self.assertIs(latest_target, col1_copy)
def test_setting_none_clears_link(self) -> None:
deprecated = False
latest = make_collection(2013)
predecessor = make_collection(2010)
successor = make_collection(2012)
VersionExtension.ext(self.collection).apply(
self.version, deprecated, latest, predecessor, successor
)
VersionExtension.ext(self.collection).latest = None
links = self.collection.get_links(VersionRelType.LATEST)
self.assertEqual(0, len(links))
self.assertIsNone(VersionExtension.ext(self.collection).latest)
VersionExtension.ext(self.collection).predecessor = None
links = self.collection.get_links(VersionRelType.PREDECESSOR)
self.assertEqual(0, len(links))
self.assertIsNone(VersionExtension.ext(self.collection).predecessor)
VersionExtension.ext(self.collection).successor = None
links = self.collection.get_links(VersionRelType.SUCCESSOR)
self.assertEqual(0, len(links))
self.assertIsNone(VersionExtension.ext(self.collection).successor)
def test_multiple_link_setting(self) -> None:
deprecated = False
latest1 = make_collection(2013)
predecessor1 = make_collection(2010)
successor1 = make_collection(2012)
VersionExtension.ext(self.collection).apply(
self.version, deprecated, latest1, predecessor1, successor1
)
year = 2015
latest2 = make_collection(year)
expected_href = URL_TEMPLATE % year
VersionExtension.ext(self.collection).latest = latest2
links = self.collection.get_links(VersionRelType.LATEST)
self.assertEqual(1, len(links))
self.assertEqual(expected_href, links[0].get_href())
year = 2009
predecessor2 = make_collection(year)
expected_href = URL_TEMPLATE % year
VersionExtension.ext(self.collection).predecessor = predecessor2
links = self.collection.get_links(VersionRelType.PREDECESSOR)
self.assertEqual(1, len(links))
self.assertEqual(expected_href, links[0].get_href())
year = 2014
successor2 = make_collection(year)
expected_href = URL_TEMPLATE % year
VersionExtension.ext(self.collection).successor = successor2
links = self.collection.get_links(VersionRelType.SUCCESSOR)
self.assertEqual(1, len(links))
self.assertEqual(expected_href, links[0].get_href())
def test_extension_not_implemented(self) -> None:
collection = pystac.Collection.from_file(self.example_collection_uri)
collection.stac_extensions.remove(VersionExtension.get_schema_uri())
with self.assertRaises(pystac.ExtensionNotImplemented):
_ = VersionExtension.ext(collection)
def test_ext_add_to(self) -> None:
collection = pystac.Collection.from_file(self.example_collection_uri)
collection.stac_extensions.remove(VersionExtension.get_schema_uri())
self.assertNotIn(VersionExtension.get_schema_uri(), collection.stac_extensions)
_ = VersionExtension.ext(collection, add_if_missing=True)
self.assertIn(VersionExtension.get_schema_uri(), collection.stac_extensions)
| true | true |
f7322b5afc332aa2306c32d240c945d3ffe9d0d3 | 2,660 | py | Python | prepare_data/preparing_faces_parallel.py | yuval-alaluf/stylegan3-editing | ab01a5d90b8ba67e0da0e1388f0931482601006c | [
"MIT"
] | 347 | 2022-01-31T18:36:35.000Z | 2022-03-31T08:08:39.000Z | prepare_data/preparing_faces_parallel.py | yuval-alaluf/stylegan3-editing | ab01a5d90b8ba67e0da0e1388f0931482601006c | [
"MIT"
] | 11 | 2022-02-13T20:21:53.000Z | 2022-03-29T12:20:57.000Z | prepare_data/preparing_faces_parallel.py | yuval-alaluf/stylegan3-editing | ab01a5d90b8ba67e0da0e1388f0931482601006c | [
"MIT"
] | 24 | 2022-02-02T23:18:15.000Z | 2022-03-23T02:16:26.000Z | import math
import multiprocessing as mp
import sys
import time
from functools import partial
from pathlib import Path
import pyrallis
import dlib
from dataclasses import dataclass
sys.path.append(".")
sys.path.append("..")
from configs.paths_config import model_paths
from utils.alignment_utils import align_face, crop_face
SHAPE_PREDICTOR_PATH = model_paths["shape_predictor"]
@dataclass
class Options:
# Number of threads to run in parallel
num_threads: int = 1
# Path to raw data
root_path: str = ""
# Should be 'align' / 'crop'
mode: str = "align"
# In case of cropping, amount of random shifting to perform
random_shift: float = 0.05
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
def extract_on_paths(file_paths, args: Options):
predictor = dlib.shape_predictor(str(SHAPE_PREDICTOR_PATH))
detector = dlib.get_frontal_face_detector()
pid = mp.current_process().name
print(f'\t{pid} is starting to extract on #{len(file_paths)} images')
tot_count = len(file_paths)
count = 0
for file_path, res_path in file_paths:
count += 1
if count % 100 == 0:
print(f'{pid} done with {count}/{tot_count}')
try:
if args.mode == "align":
res = align_face(file_path, detector, predictor)
else:
res = crop_face(file_path, detector, predictor, random_shift=args.random_shift)
res = res.convert('RGB')
Path(res_path).parent.mkdir(exist_ok=True, parents=True)
res.save(res_path)
except Exception:
continue
print('\tDone!')
@pyrallis.wrap()
def run(args: Options):
assert args.mode in ["align", "crop"], "Expected extractions mode to be one of 'align' or 'crop'"
root_path = Path(args.root_path)
out_crops_path = root_path.parent / Path(root_path.name + "_" + args.mode + "ed")
if not out_crops_path.exists():
out_crops_path.mkdir(exist_ok=True, parents=True)
file_paths = []
for file in root_path.iterdir():
res_path = out_crops_path / file.name
file_paths.append((str(file), str(res_path)))
file_chunks = list(chunks(file_paths, int(math.ceil(len(file_paths) / args.num_threads))))
print(len(file_chunks))
pool = mp.Pool(args.num_threads)
print(f'Running on {len(file_paths)} paths\nHere we goooo')
tic = time.time()
pool.map(partial(extract_on_paths, args=args), file_chunks)
toc = time.time()
print(f'Mischief managed in {tic - toc}s')
if __name__ == '__main__':
run()
| 28.913043 | 101 | 0.659398 | import math
import multiprocessing as mp
import sys
import time
from functools import partial
from pathlib import Path
import pyrallis
import dlib
from dataclasses import dataclass
sys.path.append(".")
sys.path.append("..")
from configs.paths_config import model_paths
from utils.alignment_utils import align_face, crop_face
SHAPE_PREDICTOR_PATH = model_paths["shape_predictor"]
@dataclass
class Options:
num_threads: int = 1
root_path: str = ""
mode: str = "align"
random_shift: float = 0.05
def chunks(lst, n):
for i in range(0, len(lst), n):
yield lst[i:i + n]
def extract_on_paths(file_paths, args: Options):
predictor = dlib.shape_predictor(str(SHAPE_PREDICTOR_PATH))
detector = dlib.get_frontal_face_detector()
pid = mp.current_process().name
print(f'\t{pid} is starting to extract on #{len(file_paths)} images')
tot_count = len(file_paths)
count = 0
for file_path, res_path in file_paths:
count += 1
if count % 100 == 0:
print(f'{pid} done with {count}/{tot_count}')
try:
if args.mode == "align":
res = align_face(file_path, detector, predictor)
else:
res = crop_face(file_path, detector, predictor, random_shift=args.random_shift)
res = res.convert('RGB')
Path(res_path).parent.mkdir(exist_ok=True, parents=True)
res.save(res_path)
except Exception:
continue
print('\tDone!')
@pyrallis.wrap()
def run(args: Options):
assert args.mode in ["align", "crop"], "Expected extractions mode to be one of 'align' or 'crop'"
root_path = Path(args.root_path)
out_crops_path = root_path.parent / Path(root_path.name + "_" + args.mode + "ed")
if not out_crops_path.exists():
out_crops_path.mkdir(exist_ok=True, parents=True)
file_paths = []
for file in root_path.iterdir():
res_path = out_crops_path / file.name
file_paths.append((str(file), str(res_path)))
file_chunks = list(chunks(file_paths, int(math.ceil(len(file_paths) / args.num_threads))))
print(len(file_chunks))
pool = mp.Pool(args.num_threads)
print(f'Running on {len(file_paths)} paths\nHere we goooo')
tic = time.time()
pool.map(partial(extract_on_paths, args=args), file_chunks)
toc = time.time()
print(f'Mischief managed in {tic - toc}s')
if __name__ == '__main__':
run()
| true | true |
f7322d56ef9db59648ac91a2cf2ff4d481a60519 | 13,087 | py | Python | models/data/issue_submission_data.py | 0xNuggan/commons-config-backend | e462e9c1625eef0c44f21685f171782aedc56316 | [
"MIT"
] | 1 | 2021-11-03T18:26:12.000Z | 2021-11-03T18:26:12.000Z | models/data/issue_submission_data.py | 0xNuggan/commons-config-backend | e462e9c1625eef0c44f21685f171782aedc56316 | [
"MIT"
] | 36 | 2021-10-06T17:14:04.000Z | 2021-12-21T12:22:00.000Z | models/data/issue_submission_data.py | 0xNuggan/commons-config-backend | e462e9c1625eef0c44f21685f171782aedc56316 | [
"MIT"
] | 3 | 2021-08-19T22:21:02.000Z | 2021-11-30T15:49:20.000Z | advanced_settings_data = """
### *Advanced Settings
>This will be empty or non-existent if the user did not change any advanced settings from their default. Any settings changed from default will show up here
| Parameter | Value |
|:-----------------------:|:---------------------------:|
| Common Pool Amount | {commons_pool_amount} wxDAI |
| HNY Liquidity | {hny_liquidity} wxDAI |
| Garden Liquidity | {garden_liquidity} TEC |
| Virtual Supply | {virtual_supply} TEC |
| Virtual Balance | {virtual_balance} wxDAI |
| Transferable | {transferability} |
| Token Name | {token_name} |
| Token Symbol | {token_symbol} |
| Proposal Deposit | {proposal_deposit} wxDAI |
| Challenge Deposit | {challenge_deposit} wxDAI |
| Settlement Period | {settlement_period} days |
| Minimum Effective Supply | {minimum_effective_supply}% |
| Hatchers Rage Quit | {hatchers_rage_quit} wxDAI |
| Initial Buy | {initial_buy} wxDAI |
[*Learn more about Advanced Settings on the TEC forum](https://forum.tecommons.org/c/defi-legos-and-how-they-work-together/adv-ccd-params/27)
### [FORK THIS PROPOSAL](http://config.tecommons.org/config/import/{issue_number}) (link)
"""
issue_data = """

## What is the overall Commons Configuration strategy?
{overall_strategy}
#### Advanced Settings Modified? {has_advanced_settings}
### [FORK THIS PROPOSAL](http://config.tecommons.org/config/import/{issue_number}) (link)
# Summary
### Module 1: Token Freeze & Token Thaw
| Parameter | Value |
| ------------- | --------------------------- |
| Token Freeze | {token_freeze_period} Weeks |
| Token Thaw | {token_thaw_period} Weeks |
| Opening Price | {opening_price} wxDAI |
### Module 2: Augmented Bonding Curve
| Parameter | Value |
| ---------------- | ------------------ |
| Commons Tribute | {commons_tribute}% |
| Entry Tribute | {entry_tribute}% |
| Exit Tribute | {exit_tribute}% |
| *_Reserve Ratio_ | {reserve_ratio}% |
*This is an output. [Learn more about the Reserve Ratio here](https://forum.tecommons.org/t/augmented-bonding-curve-opening-price-reserve-ratio/516).
### Module 3: Tao Voting
| Parameters | Value |
| ----------------------- | ------------------------------------ |
| Support Required | {support_required}% |
| Minimum Quorum | {minimum_quorum}% |
| Vote Duration | {vote_duration_days} days(s) |
| Delegated Voting Period | {delegated_voting_days} day(s) |
| Quiet Ending Period | {quiet_ending_days} day(s) |
| Quiet Ending Extension | {quiet_ending_extension_days} day(s) |
| Execution Delay | {execution_delay_days} day(s) |
### Module 4: Conviction Voting
| Parameter | Value |
| ------------------ | ------------------------------- |
| Conviction Growth | {conviction_growth_days} day(s) |
| Minimum Conviction | {minimum_conviction}% |
| Spending Limit | {relative_spending_limit}% |
# Module 1: Token Freeze and Token Thaw
### Data:

| Duration | % of Tokens Released | Price Floor of Token |
| ------------------------- | --------------------- | ---------------------- |
| 3 months | {tokens_released[0]}% | {price_floor[0]} wxDAI |
| 6 months | {tokens_released[1]}% | {price_floor[1]} wxDAI |
| 9 months | {tokens_released[2]}% | {price_floor[2]} wxDAI |
| 1 year | {tokens_released[3]}% | {price_floor[3]} wxDAI |
| 1.5 years | {tokens_released[4]}% | {price_floor[4]} wxDAI |
| 2 years | {tokens_released[5]}% | {price_floor[5]} wxDAI |
| 3 years | {tokens_released[6]}% | {price_floor[6]} wxDAI |
| 4 years | {tokens_released[7]}% | {price_floor[7]} wxDAI |
| 5 years | {tokens_released[8]}% | {price_floor[8]} wxDAI |
- **Token Freeze**: **{token_freeze_period} weeks**, meaning that 100% of TEC tokens minted for Hatchers will remain locked from being sold or transferred for {token_freeze_period} weeks. They can still be used to vote while frozen.
- **Token Thaw**: **{token_thaw_period} weeks**, meaning the Hatchers frozen tokens will start to become transferable at a steady rate starting at the end of Token Freeze and ending {token_thaw_period} weeks later.
- **Opening Price**: **{opening_price} wxDAI**, meaning for the initial buy, the first TEC minted by the Augmented Bonding Curve will be priced at {opening_price} wxDAI making it the price floor during the Token Freeze.
### Strategy:
{token_lockup_strategy}
# Module 2: Augmented Bonding Curve (ABC)
### Data:

| Step # | Current Price | Amount In | Tribute Collected | Amount Out | New Price | Price Slippage |
| ------------------ | ------------------ | -------------- | ---------------------- | --------------- | -------------- | ------------------- |
{abc_steps}
#### NOTE:
We're very bullish on TEC so we provide the BUY scenario at launch to compare proposals... to explore this proposal's ABC further Click the link below to see their parameters in your dashboard, be warned this will clear any data you have in your dashboard:
### [FORK THIS PROPOSAL](http://config.tecommons.org/config/import/{issue_number}) (link)
| Allocation of Funds | wxDAI |
|----------------------------------|--------------------------|
| Common Pool (Before Initial Buy) | {common_pool_before} |
| Reserve (Before Initial Buy) | {reserve_balance_before} |
| Common Pool (After Initial Buy) | {common_pool_after} |
| Reserve (After Initial Buy) | {reserve_balance_after} |
## ABC Configuration Table
| Reserve (wxDai) | Supply (TEC) | Price (wxDai/TEC) |
|:-------------------:|:------------------:|:-----------------:|
| {abc_reserve[0]:,} | {abc_supply[0]:,.0f} | {abc_price[0]:,.2f} |
| {abc_reserve[1]:,} | {abc_supply[1]:,.0f} | {abc_price[1]:,.2f} |
| {abc_reserve[2]:,} | {abc_supply[2]:,.0f} | {abc_price[2]:,.2f} |
| {abc_reserve[3]:,} | {abc_supply[3]:,.0f} | {abc_price[3]:,.2f} |
| {abc_reserve[4]:,} | {abc_supply[4]:,.0f} | {abc_price[4]:,.2f} |
| {abc_reserve[5]:,} | {abc_supply[5]:,.0f} | {abc_price[5]:,.2f} |
| {abc_reserve[6]:,} | {abc_supply[6]:,.0f} | {abc_price[6]:,.2f} |
| {abc_reserve[7]:,} | {abc_supply[7]:,.0f} | {abc_price[7]:,.2f} |
| {abc_reserve[8]:,} | {abc_supply[8]:,.0f} | {abc_price[8]:,.2f} |
| {abc_reserve[9]:,} | {abc_supply[9]:,.0f} | {abc_price[9]:,.2f} |
| {abc_reserve[10]:,} | {abc_supply[10]:,.0f} | {abc_price[10]:,.2f} |
| {abc_reserve[11]:,} | {abc_supply[11]:,.0f} | {abc_price[11]:,.2f} |
| {abc_reserve[12]:,} | {abc_supply[12]:,.0f} | {abc_price[12]:,.2f} |
| {abc_reserve[13]:,} | {abc_supply[13]:,.0f} | {abc_price[13]:,.2f} |
| {abc_reserve[14]:,} | {abc_supply[14]:,.0f} | {abc_price[14]:,.2f} |
| {abc_reserve[15]:,} | {abc_supply[15]:,.0f} | {abc_price[15]:,.2f} |
| {abc_reserve[16]:,} | {abc_supply[16]:,.0f} | {abc_price[16]:,.2f} |
| {abc_reserve[17]:,} | {abc_supply[17]:,.0f} | {abc_price[17]:,.2f} |
| {abc_reserve[18]:,} | {abc_supply[18]:,.0f} | {abc_price[18]:,.2f} |
| {abc_reserve[19]:,} | {abc_supply[19]:,.0f} | {abc_price[19]:,.2f} |
| {abc_reserve[20]:,} | {abc_supply[20]:,.0f} | {abc_price[20]:,.2f} |
| {abc_reserve[21]:,} | {abc_supply[21]:,.0f} | {abc_price[21]:,.2f} |
| {abc_reserve[22]:,} | {abc_supply[22]:,.0f} | {abc_price[22]:,.2f} |
| {abc_reserve[23]:,} | {abc_supply[23]:,.0f} | {abc_price[23]:,.2f} |
| {abc_reserve[24]:,} | {abc_supply[24]:,.0f} | {abc_price[24]:,.2f} |
| {abc_reserve[25]:,} | {abc_supply[25]:,.0f} | {abc_price[25]:,.2f} |
| {abc_reserve[26]:,} | {abc_supply[26]:,.0f} | {abc_price[26]:,.2f} |
- **Commons Tribute**: **{commons_tribute}%**, which means that {commons_tribute}% of the Hatch funds ({common_pool_before} wxDAI) will go to the Common Pool and {commons_tribute_remainder}% ({reserve_balance_before} wxDAI) will go to the ABC's Reserve.
- **Entry Tribute**: **{entry_tribute}%** meaning that from every **BUY** order on the ABC, {entry_tribute}% of the order value in wxDAI is subtracted and sent to the Common Pool.
- **Exit Tribute**: **{exit_tribute}%** meaning that from every **SELL** order on the ABC, {exit_tribute}% of the order value in wxDAI is subtracted and sent to the Common Pool.
### Strategy:
{abc_strategy}
# Module 3: Tao Voting
### Data:

|# of Quiet Ending Extensions | No Extensions | With 1 Extension | With 2 Extensions |
| ------------------------------------------- | ------------------------------------- | ------------------------------------------------- | -------------------------------------------------- |
| **Time to Vote on Proposals** | {vote_duration_days} days | {vote_duration_days_1_extension} days | {vote_duration_days_2_extensions} days |
| **Time to Review a Delegates Vote** | {review_duration_days} days | {review_duration_days_1_extension} days | {review_duration_days_2_extensions} days |
| **Time to Execute a Passing Proposal** | {execute_proposal_duration_days} days | {execute_proposal_duration_days_1_extension} days | {execute_proposal_duration_days_2_extensions} days |
- **Support Required**: **{support_required}%**, which means {support_required}% of all votes must be in favor of a proposal for it to pass.
- **Minimum Quorum**: **{minimum_quorum}%**, meaning that {minimum_quorum}% of all tokens need to have voted on a proposal in order for it to become valid.
- **Vote Duration**: **{vote_duration_days} day(s)**, meaning that eligible voters will have {vote_duration_days} day(s) to vote on a proposal.
- **Delegated Voting Period** is set for **{delegated_voting_days} day(s)**, meaning that Delegates will have {delegated_voting_days} day(s) to use their delegated voting power to vote on a proposal.
- **Quiet Ending Period**: **{quiet_ending_days} day(s)**, this means that {quiet_ending_days} day(s) before the end of the Vote Duration, if the vote outcome changes, the Quiet Ending Extension will be triggered.
- **Quiet Ending Extension**: **{quiet_ending_extension_days} day(s)**, meaning that if the vote outcome changes during the Quiet Ending Period, an additional {quiet_ending_extension_days} day(s) will be added for voting.
- **Execution Delay**: **{execution_delay_days} days(s)**, meaning that there is an {execution_delay_days} day delay after the vote is passed before the proposed action is executed.
### Strategy:
{tao_voting_strategy}
# Module 4: Conviction Voting
### Data:

| Proposal | Requested Amount (wxDAI) | Common Pool (wxDAI) | Effective supply (TEC) | Tokens Needed To Pass (TEC) |
|:---------:|:------------------------:|:-------------------------:|:-----------------------:|:---------------------------:|
| 1 | {requested_amount[0]:,} | {amount_common_pool[0]:,} | {effective_supply[0]:,} | {min_tokens_pass[0]} |
| 2 | {requested_amount[1]:,} | {amount_common_pool[1]:,} | {effective_supply[1]:,} | {min_tokens_pass[1]} |
| 3 | {requested_amount[2]:,} | {amount_common_pool[2]:,} | {effective_supply[2]:,} | {min_tokens_pass[2]} |
| 4 | {requested_amount[3]:,} | {amount_common_pool[3]:,} | {effective_supply[3]:,} | {min_tokens_pass[3]} |
| 5 | {requested_amount[4]:,} | {amount_common_pool[4]:,} | {effective_supply[4]:,} | {min_tokens_pass[4]} |
| 6 | {requested_amount[5]:,} | {amount_common_pool[5]:,} | {effective_supply[5]:,} | {min_tokens_pass[5]} |
- **Conviction Growth**: **{conviction_growth_days} day(s)**, meaning that voting power will increase by 50% every {conviction_growth_days} days that they are staked behind a proposal, so after {double_conviction_growth_days} days, a voters voting power will have reached 75% of it's maximum capacity.
- **Minimum Conviction**: **{minimum_conviction}%**, this means that to pass any funding request it will take at least {minimum_conviction}% of the actively voting TEC tokens.
- The **Spending Limit**: **{relative_spending_limit}%**, which means that no more than {relative_spending_limit}% of the total funds in the Common Pool can be funded by a single proposal.
### Strategy:
{conviction_voting_strategy}
### [FORK THIS PROPOSAL](http://config.tecommons.org/config/import/{issue_number}) (link)
{advanced_settings_section}
"""
| 61.731132 | 301 | 0.589058 | advanced_settings_data = """
### *Advanced Settings
>This will be empty or non-existent if the user did not change any advanced settings from their default. Any settings changed from default will show up here
| Parameter | Value |
|:-----------------------:|:---------------------------:|
| Common Pool Amount | {commons_pool_amount} wxDAI |
| HNY Liquidity | {hny_liquidity} wxDAI |
| Garden Liquidity | {garden_liquidity} TEC |
| Virtual Supply | {virtual_supply} TEC |
| Virtual Balance | {virtual_balance} wxDAI |
| Transferable | {transferability} |
| Token Name | {token_name} |
| Token Symbol | {token_symbol} |
| Proposal Deposit | {proposal_deposit} wxDAI |
| Challenge Deposit | {challenge_deposit} wxDAI |
| Settlement Period | {settlement_period} days |
| Minimum Effective Supply | {minimum_effective_supply}% |
| Hatchers Rage Quit | {hatchers_rage_quit} wxDAI |
| Initial Buy | {initial_buy} wxDAI |
[*Learn more about Advanced Settings on the TEC forum](https://forum.tecommons.org/c/defi-legos-and-how-they-work-together/adv-ccd-params/27)
### [FORK THIS PROPOSAL](http://config.tecommons.org/config/import/{issue_number}) (link)
"""
issue_data = """

## What is the overall Commons Configuration strategy?
{overall_strategy}
#### Advanced Settings Modified? {has_advanced_settings}
### [FORK THIS PROPOSAL](http://config.tecommons.org/config/import/{issue_number}) (link)
# Summary
### Module 1: Token Freeze & Token Thaw
| Parameter | Value |
| ------------- | --------------------------- |
| Token Freeze | {token_freeze_period} Weeks |
| Token Thaw | {token_thaw_period} Weeks |
| Opening Price | {opening_price} wxDAI |
### Module 2: Augmented Bonding Curve
| Parameter | Value |
| ---------------- | ------------------ |
| Commons Tribute | {commons_tribute}% |
| Entry Tribute | {entry_tribute}% |
| Exit Tribute | {exit_tribute}% |
| *_Reserve Ratio_ | {reserve_ratio}% |
*This is an output. [Learn more about the Reserve Ratio here](https://forum.tecommons.org/t/augmented-bonding-curve-opening-price-reserve-ratio/516).
### Module 3: Tao Voting
| Parameters | Value |
| ----------------------- | ------------------------------------ |
| Support Required | {support_required}% |
| Minimum Quorum | {minimum_quorum}% |
| Vote Duration | {vote_duration_days} days(s) |
| Delegated Voting Period | {delegated_voting_days} day(s) |
| Quiet Ending Period | {quiet_ending_days} day(s) |
| Quiet Ending Extension | {quiet_ending_extension_days} day(s) |
| Execution Delay | {execution_delay_days} day(s) |
### Module 4: Conviction Voting
| Parameter | Value |
| ------------------ | ------------------------------- |
| Conviction Growth | {conviction_growth_days} day(s) |
| Minimum Conviction | {minimum_conviction}% |
| Spending Limit | {relative_spending_limit}% |
# Module 1: Token Freeze and Token Thaw
### Data:

| Duration | % of Tokens Released | Price Floor of Token |
| ------------------------- | --------------------- | ---------------------- |
| 3 months | {tokens_released[0]}% | {price_floor[0]} wxDAI |
| 6 months | {tokens_released[1]}% | {price_floor[1]} wxDAI |
| 9 months | {tokens_released[2]}% | {price_floor[2]} wxDAI |
| 1 year | {tokens_released[3]}% | {price_floor[3]} wxDAI |
| 1.5 years | {tokens_released[4]}% | {price_floor[4]} wxDAI |
| 2 years | {tokens_released[5]}% | {price_floor[5]} wxDAI |
| 3 years | {tokens_released[6]}% | {price_floor[6]} wxDAI |
| 4 years | {tokens_released[7]}% | {price_floor[7]} wxDAI |
| 5 years | {tokens_released[8]}% | {price_floor[8]} wxDAI |
- **Token Freeze**: **{token_freeze_period} weeks**, meaning that 100% of TEC tokens minted for Hatchers will remain locked from being sold or transferred for {token_freeze_period} weeks. They can still be used to vote while frozen.
- **Token Thaw**: **{token_thaw_period} weeks**, meaning the Hatchers frozen tokens will start to become transferable at a steady rate starting at the end of Token Freeze and ending {token_thaw_period} weeks later.
- **Opening Price**: **{opening_price} wxDAI**, meaning for the initial buy, the first TEC minted by the Augmented Bonding Curve will be priced at {opening_price} wxDAI making it the price floor during the Token Freeze.
### Strategy:
{token_lockup_strategy}
# Module 2: Augmented Bonding Curve (ABC)
### Data:

| Step # | Current Price | Amount In | Tribute Collected | Amount Out | New Price | Price Slippage |
| ------------------ | ------------------ | -------------- | ---------------------- | --------------- | -------------- | ------------------- |
{abc_steps}
#### NOTE:
We're very bullish on TEC so we provide the BUY scenario at launch to compare proposals... to explore this proposal's ABC further Click the link below to see their parameters in your dashboard, be warned this will clear any data you have in your dashboard:
### [FORK THIS PROPOSAL](http://config.tecommons.org/config/import/{issue_number}) (link)
| Allocation of Funds | wxDAI |
|----------------------------------|--------------------------|
| Common Pool (Before Initial Buy) | {common_pool_before} |
| Reserve (Before Initial Buy) | {reserve_balance_before} |
| Common Pool (After Initial Buy) | {common_pool_after} |
| Reserve (After Initial Buy) | {reserve_balance_after} |
## ABC Configuration Table
| Reserve (wxDai) | Supply (TEC) | Price (wxDai/TEC) |
|:-------------------:|:------------------:|:-----------------:|
| {abc_reserve[0]:,} | {abc_supply[0]:,.0f} | {abc_price[0]:,.2f} |
| {abc_reserve[1]:,} | {abc_supply[1]:,.0f} | {abc_price[1]:,.2f} |
| {abc_reserve[2]:,} | {abc_supply[2]:,.0f} | {abc_price[2]:,.2f} |
| {abc_reserve[3]:,} | {abc_supply[3]:,.0f} | {abc_price[3]:,.2f} |
| {abc_reserve[4]:,} | {abc_supply[4]:,.0f} | {abc_price[4]:,.2f} |
| {abc_reserve[5]:,} | {abc_supply[5]:,.0f} | {abc_price[5]:,.2f} |
| {abc_reserve[6]:,} | {abc_supply[6]:,.0f} | {abc_price[6]:,.2f} |
| {abc_reserve[7]:,} | {abc_supply[7]:,.0f} | {abc_price[7]:,.2f} |
| {abc_reserve[8]:,} | {abc_supply[8]:,.0f} | {abc_price[8]:,.2f} |
| {abc_reserve[9]:,} | {abc_supply[9]:,.0f} | {abc_price[9]:,.2f} |
| {abc_reserve[10]:,} | {abc_supply[10]:,.0f} | {abc_price[10]:,.2f} |
| {abc_reserve[11]:,} | {abc_supply[11]:,.0f} | {abc_price[11]:,.2f} |
| {abc_reserve[12]:,} | {abc_supply[12]:,.0f} | {abc_price[12]:,.2f} |
| {abc_reserve[13]:,} | {abc_supply[13]:,.0f} | {abc_price[13]:,.2f} |
| {abc_reserve[14]:,} | {abc_supply[14]:,.0f} | {abc_price[14]:,.2f} |
| {abc_reserve[15]:,} | {abc_supply[15]:,.0f} | {abc_price[15]:,.2f} |
| {abc_reserve[16]:,} | {abc_supply[16]:,.0f} | {abc_price[16]:,.2f} |
| {abc_reserve[17]:,} | {abc_supply[17]:,.0f} | {abc_price[17]:,.2f} |
| {abc_reserve[18]:,} | {abc_supply[18]:,.0f} | {abc_price[18]:,.2f} |
| {abc_reserve[19]:,} | {abc_supply[19]:,.0f} | {abc_price[19]:,.2f} |
| {abc_reserve[20]:,} | {abc_supply[20]:,.0f} | {abc_price[20]:,.2f} |
| {abc_reserve[21]:,} | {abc_supply[21]:,.0f} | {abc_price[21]:,.2f} |
| {abc_reserve[22]:,} | {abc_supply[22]:,.0f} | {abc_price[22]:,.2f} |
| {abc_reserve[23]:,} | {abc_supply[23]:,.0f} | {abc_price[23]:,.2f} |
| {abc_reserve[24]:,} | {abc_supply[24]:,.0f} | {abc_price[24]:,.2f} |
| {abc_reserve[25]:,} | {abc_supply[25]:,.0f} | {abc_price[25]:,.2f} |
| {abc_reserve[26]:,} | {abc_supply[26]:,.0f} | {abc_price[26]:,.2f} |
- **Commons Tribute**: **{commons_tribute}%**, which means that {commons_tribute}% of the Hatch funds ({common_pool_before} wxDAI) will go to the Common Pool and {commons_tribute_remainder}% ({reserve_balance_before} wxDAI) will go to the ABC's Reserve.
- **Entry Tribute**: **{entry_tribute}%** meaning that from every **BUY** order on the ABC, {entry_tribute}% of the order value in wxDAI is subtracted and sent to the Common Pool.
- **Exit Tribute**: **{exit_tribute}%** meaning that from every **SELL** order on the ABC, {exit_tribute}% of the order value in wxDAI is subtracted and sent to the Common Pool.
### Strategy:
{abc_strategy}
# Module 3: Tao Voting
### Data:

|# of Quiet Ending Extensions | No Extensions | With 1 Extension | With 2 Extensions |
| ------------------------------------------- | ------------------------------------- | ------------------------------------------------- | -------------------------------------------------- |
| **Time to Vote on Proposals** | {vote_duration_days} days | {vote_duration_days_1_extension} days | {vote_duration_days_2_extensions} days |
| **Time to Review a Delegates Vote** | {review_duration_days} days | {review_duration_days_1_extension} days | {review_duration_days_2_extensions} days |
| **Time to Execute a Passing Proposal** | {execute_proposal_duration_days} days | {execute_proposal_duration_days_1_extension} days | {execute_proposal_duration_days_2_extensions} days |
- **Support Required**: **{support_required}%**, which means {support_required}% of all votes must be in favor of a proposal for it to pass.
- **Minimum Quorum**: **{minimum_quorum}%**, meaning that {minimum_quorum}% of all tokens need to have voted on a proposal in order for it to become valid.
- **Vote Duration**: **{vote_duration_days} day(s)**, meaning that eligible voters will have {vote_duration_days} day(s) to vote on a proposal.
- **Delegated Voting Period** is set for **{delegated_voting_days} day(s)**, meaning that Delegates will have {delegated_voting_days} day(s) to use their delegated voting power to vote on a proposal.
- **Quiet Ending Period**: **{quiet_ending_days} day(s)**, this means that {quiet_ending_days} day(s) before the end of the Vote Duration, if the vote outcome changes, the Quiet Ending Extension will be triggered.
- **Quiet Ending Extension**: **{quiet_ending_extension_days} day(s)**, meaning that if the vote outcome changes during the Quiet Ending Period, an additional {quiet_ending_extension_days} day(s) will be added for voting.
- **Execution Delay**: **{execution_delay_days} days(s)**, meaning that there is an {execution_delay_days} day delay after the vote is passed before the proposed action is executed.
### Strategy:
{tao_voting_strategy}
# Module 4: Conviction Voting
### Data:

| Proposal | Requested Amount (wxDAI) | Common Pool (wxDAI) | Effective supply (TEC) | Tokens Needed To Pass (TEC) |
|:---------:|:------------------------:|:-------------------------:|:-----------------------:|:---------------------------:|
| 1 | {requested_amount[0]:,} | {amount_common_pool[0]:,} | {effective_supply[0]:,} | {min_tokens_pass[0]} |
| 2 | {requested_amount[1]:,} | {amount_common_pool[1]:,} | {effective_supply[1]:,} | {min_tokens_pass[1]} |
| 3 | {requested_amount[2]:,} | {amount_common_pool[2]:,} | {effective_supply[2]:,} | {min_tokens_pass[2]} |
| 4 | {requested_amount[3]:,} | {amount_common_pool[3]:,} | {effective_supply[3]:,} | {min_tokens_pass[3]} |
| 5 | {requested_amount[4]:,} | {amount_common_pool[4]:,} | {effective_supply[4]:,} | {min_tokens_pass[4]} |
| 6 | {requested_amount[5]:,} | {amount_common_pool[5]:,} | {effective_supply[5]:,} | {min_tokens_pass[5]} |
- **Conviction Growth**: **{conviction_growth_days} day(s)**, meaning that voting power will increase by 50% every {conviction_growth_days} days that they are staked behind a proposal, so after {double_conviction_growth_days} days, a voters voting power will have reached 75% of it's maximum capacity.
- **Minimum Conviction**: **{minimum_conviction}%**, this means that to pass any funding request it will take at least {minimum_conviction}% of the actively voting TEC tokens.
- The **Spending Limit**: **{relative_spending_limit}%**, which means that no more than {relative_spending_limit}% of the total funds in the Common Pool can be funded by a single proposal.
### Strategy:
{conviction_voting_strategy}
### [FORK THIS PROPOSAL](http://config.tecommons.org/config/import/{issue_number}) (link)
{advanced_settings_section}
"""
| true | true |
f7322d6acf0bb63777abeccc59db6c90f96b1fe9 | 2,443 | py | Python | Python/phonenumbers/data/region_GR.py | skykisl/uberbruns2 | 26933efce04dba700d93cc75c7b74e069fb02d26 | [
"Unlicense"
] | 5 | 2015-04-27T20:10:56.000Z | 2018-06-14T18:19:09.000Z | python/phonenumbers/data/region_GR.py | vemel/python-phonenumbers | 595c322bf12106a3b95e3f202e948a7c6b6c15b8 | [
"Apache-2.0"
] | 2 | 2017-06-08T16:11:13.000Z | 2018-05-07T11:50:13.000Z | python/phonenumbers/data/region_GR.py | vemel/python-phonenumbers | 595c322bf12106a3b95e3f202e948a7c6b6c15b8 | [
"Apache-2.0"
] | 6 | 2015-02-19T11:11:04.000Z | 2022-03-15T19:38:31.000Z | """Auto-generated file, do not edit by hand. GR metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_GR = PhoneMetadata(id='GR', country_code=30, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[26-9]\\d{9}', possible_number_pattern='\\d{10}'),
fixed_line=PhoneNumberDesc(national_number_pattern='2(?:1\\d{2}|2(?:3[1-8]|4[1-7]|5[1-4]|6[1-8]|7[1-5]|[289][1-9])|3(?:1\\d|2[1-57]|3[1-4]|[45][1-3]|7[1-7]|8[1-6]|9[1-79])|4(?:1\\d|2[1-8]|3[1-4]|4[13-5]|6[1-578]|9[1-5])|5(?:1\\d|2[1-3]|4[124]|5[1-6]|[39][1-4])|6(?:1\\d|3[124]|4[1-7]|5[13-9]|[269][1-6]|7[14]|8[1-5])|7(?:1\\d|2[1-5]|3[1-6]|4[1-7]|5[1-57]|6[134]|9[15-7])|8(?:1\\d|2[1-5]|[34][1-4]|9[1-7]))\\d{6}', possible_number_pattern='\\d{10}', example_number='2123456789'),
mobile=PhoneNumberDesc(national_number_pattern='69\\d{8}', possible_number_pattern='\\d{10}', example_number='6912345678'),
toll_free=PhoneNumberDesc(national_number_pattern='800\\d{7}', possible_number_pattern='\\d{10}', example_number='8001234567'),
premium_rate=PhoneNumberDesc(national_number_pattern='90[19]\\d{7}', possible_number_pattern='\\d{10}', example_number='9091234567'),
shared_cost=PhoneNumberDesc(national_number_pattern='8(?:0[16]|12|25)\\d{7}', possible_number_pattern='\\d{10}', example_number='8011234567'),
personal_number=PhoneNumberDesc(national_number_pattern='70\\d{8}', possible_number_pattern='\\d{10}', example_number='7012345678'),
voip=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
pager=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
uan=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
emergency=PhoneNumberDesc(national_number_pattern='1(?:00|12|66|99)', possible_number_pattern='\\d{3}', example_number='112'),
voicemail=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
no_international_dialling=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
number_format=[NumberFormat(pattern='([27]\\d)(\\d{4})(\\d{4})', format=u'\\1 \\2 \\3', leading_digits_pattern=['21|7']),
NumberFormat(pattern='(\\d{3})(\\d{3})(\\d{4})', format=u'\\1 \\2 \\3', leading_digits_pattern=['2[2-9]1|[689]']),
NumberFormat(pattern='(2\\d{3})(\\d{6})', format=u'\\1 \\2', leading_digits_pattern=['2[2-9][02-9]'])])
| 116.333333 | 482 | 0.693819 | from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_GR = PhoneMetadata(id='GR', country_code=30, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[26-9]\\d{9}', possible_number_pattern='\\d{10}'),
fixed_line=PhoneNumberDesc(national_number_pattern='2(?:1\\d{2}|2(?:3[1-8]|4[1-7]|5[1-4]|6[1-8]|7[1-5]|[289][1-9])|3(?:1\\d|2[1-57]|3[1-4]|[45][1-3]|7[1-7]|8[1-6]|9[1-79])|4(?:1\\d|2[1-8]|3[1-4]|4[13-5]|6[1-578]|9[1-5])|5(?:1\\d|2[1-3]|4[124]|5[1-6]|[39][1-4])|6(?:1\\d|3[124]|4[1-7]|5[13-9]|[269][1-6]|7[14]|8[1-5])|7(?:1\\d|2[1-5]|3[1-6]|4[1-7]|5[1-57]|6[134]|9[15-7])|8(?:1\\d|2[1-5]|[34][1-4]|9[1-7]))\\d{6}', possible_number_pattern='\\d{10}', example_number='2123456789'),
mobile=PhoneNumberDesc(national_number_pattern='69\\d{8}', possible_number_pattern='\\d{10}', example_number='6912345678'),
toll_free=PhoneNumberDesc(national_number_pattern='800\\d{7}', possible_number_pattern='\\d{10}', example_number='8001234567'),
premium_rate=PhoneNumberDesc(national_number_pattern='90[19]\\d{7}', possible_number_pattern='\\d{10}', example_number='9091234567'),
shared_cost=PhoneNumberDesc(national_number_pattern='8(?:0[16]|12|25)\\d{7}', possible_number_pattern='\\d{10}', example_number='8011234567'),
personal_number=PhoneNumberDesc(national_number_pattern='70\\d{8}', possible_number_pattern='\\d{10}', example_number='7012345678'),
voip=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
pager=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
uan=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
emergency=PhoneNumberDesc(national_number_pattern='1(?:00|12|66|99)', possible_number_pattern='\\d{3}', example_number='112'),
voicemail=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
no_international_dialling=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
number_format=[NumberFormat(pattern='([27]\\d)(\\d{4})(\\d{4})', format=u'\\1 \\2 \\3', leading_digits_pattern=['21|7']),
NumberFormat(pattern='(\\d{3})(\\d{3})(\\d{4})', format=u'\\1 \\2 \\3', leading_digits_pattern=['2[2-9]1|[689]']),
NumberFormat(pattern='(2\\d{3})(\\d{6})', format=u'\\1 \\2', leading_digits_pattern=['2[2-9][02-9]'])])
| true | true |
f7322f621bf5191e1411b871cefba6a1f2f1057f | 9,058 | py | Python | tests/test_0022-number-of-branches.py | nikoladze/uproot4 | 57fafcfd73c40aea21dd19a439c76c79fd370768 | [
"BSD-3-Clause"
] | null | null | null | tests/test_0022-number-of-branches.py | nikoladze/uproot4 | 57fafcfd73c40aea21dd19a439c76c79fd370768 | [
"BSD-3-Clause"
] | null | null | null | tests/test_0022-number-of-branches.py | nikoladze/uproot4 | 57fafcfd73c40aea21dd19a439c76c79fd370768 | [
"BSD-3-Clause"
] | null | null | null | # BSD 3-Clause License; see https://github.com/scikit-hep/uproot4/blob/master/LICENSE
from __future__ import absolute_import
import sys
import json
import numpy
import pytest
import skhep_testdata
import uproot4
import uproot4.interpretation.library
import uproot4.interpretation.jagged
import uproot4.interpretation.numerical
def test_branchname():
with uproot4.open(
skhep_testdata.data_path("uproot-sample-6.20.04-uncompressed.root")
)["sample"] as sample:
assert sample.arrays("i4", library="np")["i4"].tolist() == list(range(-15, 15))
arrays = sample.arrays(["i4", "i8"], library="np")
assert set(arrays.keys()) == set(["i4", "i8"])
assert arrays["i4"].tolist() == list(range(-15, 15))
assert arrays["i8"].tolist() == list(range(-15, 15))
arrays = sample.arrays(filter_name="/i[48]/", library="np")
assert set(arrays.keys()) == set(["i4", "i8"])
assert arrays["i4"].tolist() == list(range(-15, 15))
assert arrays["i8"].tolist() == list(range(-15, 15))
arrays = sample.arrays(filter_name=["/i[12]/", "/i[48]/"], library="np")
assert set(arrays.keys()) == set(["i1", "i2", "i4", "i8"])
assert arrays["i1"].tolist() == list(range(-15, 15))
assert arrays["i2"].tolist() == list(range(-15, 15))
assert arrays["i4"].tolist() == list(range(-15, 15))
assert arrays["i8"].tolist() == list(range(-15, 15))
arrays = sample.arrays(filter_name="i*", library="np")
assert set(arrays.keys()) == set(["i1", "i2", "i4", "i8"])
assert arrays["i1"].tolist() == list(range(-15, 15))
assert arrays["i2"].tolist() == list(range(-15, 15))
assert arrays["i4"].tolist() == list(range(-15, 15))
assert arrays["i8"].tolist() == list(range(-15, 15))
arrays = sample.arrays(["i4", "i8"], filter_name="u*", library="np")
assert set(arrays.keys()) == set(["i4", "i8"])
assert arrays["i4"].tolist() == list(range(-15, 15))
assert arrays["i8"].tolist() == list(range(-15, 15))
def test_tuple_branchname():
with uproot4.open(
skhep_testdata.data_path("uproot-sample-6.20.04-uncompressed.root")
)["sample"] as sample:
arrays = sample.arrays(["i4", "i8"], library="np", how=tuple)
assert isinstance(arrays, tuple) and len(arrays) == 2
assert arrays[0].tolist() == list(range(-15, 15))
assert arrays[1].tolist() == list(range(-15, 15))
arrays = sample.arrays(["i4", "i4"], library="np", how=tuple)
assert isinstance(arrays, tuple) and len(arrays) == 2
assert arrays[0].tolist() == list(range(-15, 15))
assert arrays[1].tolist() == list(range(-15, 15))
def test_interpretation():
with uproot4.open(
skhep_testdata.data_path("uproot-sample-6.20.04-uncompressed.root")
)["sample"] as sample:
assert sample["i2"].array(">u2", library="np").tolist() == list(
range(65521, 65536)
) + list(range(0, 15))
arrays = sample.arrays({"i1": ">u1", "i2": ">u2"}, library="np")
assert set(arrays.keys()) == set(["i1", "i2"])
assert arrays["i1"].tolist() == list(range(241, 256)) + list(range(0, 15))
assert arrays["i2"].tolist() == list(range(65521, 65536)) + list(range(0, 15))
arrays = sample.arrays([("i1", ">u1"), ("i2", ">u2")], library="np", how=tuple)
assert isinstance(arrays, tuple) and len(arrays) == 2
assert arrays[0].tolist() == list(range(241, 256)) + list(range(0, 15))
assert arrays[1].tolist() == list(range(65521, 65536)) + list(range(0, 15))
arrays = sample.arrays({"i1": ">u1", "i2": None}, library="np")
assert set(arrays.keys()) == set(["i1", "i2"])
assert arrays["i1"].tolist() == list(range(241, 256)) + list(range(0, 15))
assert arrays["i2"].tolist() == list(range(-15, 15))
arrays = sample.arrays([("i1", ">u1"), ("i2", None)], library="np", how=tuple)
assert isinstance(arrays, tuple) and len(arrays) == 2
assert arrays[0].tolist() == list(range(241, 256)) + list(range(0, 15))
assert arrays[1].tolist() == list(range(-15, 15))
with pytest.raises(ValueError):
sample.arrays([("i1", ">u1"), ("i1", None)], library="np", how=tuple)
def test_compute():
with uproot4.open(
skhep_testdata.data_path("uproot-sample-6.20.04-uncompressed.root")
)["sample"] as sample:
assert sample.arrays("i4 + 100", library="np")["i4 + 100"].tolist() == list(
range(85, 115)
)
arrays = sample.arrays(["i4 + 100", "i8 + 100"], library="np")
assert set(arrays.keys()) == set(["i4 + 100", "i8 + 100"])
assert arrays["i4 + 100"].tolist() == list(range(85, 115))
assert arrays["i8 + 100"].tolist() == list(range(85, 115))
arrays = sample.arrays(["i4 + 100", "i4 + 200"], library="np")
assert set(arrays.keys()) == set(["i4 + 100", "i4 + 200"])
assert arrays["i4 + 100"].tolist() == list(range(85, 115))
assert arrays["i4 + 200"].tolist() == list(range(185, 215))
arrays = sample.arrays(["i4 + 100", "i4 + 100"], library="np")
assert set(arrays.keys()) == set(["i4 + 100"])
assert arrays["i4 + 100"].tolist() == list(range(85, 115))
arrays = sample.arrays(["i4 + 100", "i4 + 100"], library="np", how=tuple)
assert isinstance(arrays, tuple) and len(arrays) == 2
assert arrays[0].tolist() == list(range(85, 115))
assert arrays[1].tolist() == list(range(85, 115))
def test_cut():
with uproot4.open(
skhep_testdata.data_path("uproot-sample-6.20.04-uncompressed.root")
)["sample"] as sample:
assert sample.arrays("i4 + 100", cut="i4 > 0", library="np")[
"i4 + 100"
].tolist() == list(range(101, 115))
arrays = sample.arrays(["i4 + 100", "i8 + 100"], cut="i4 > 0", library="np")
assert set(arrays.keys()) == set(["i4 + 100", "i8 + 100"])
assert arrays["i4 + 100"].tolist() == list(range(101, 115))
assert arrays["i8 + 100"].tolist() == list(range(101, 115))
arrays = sample.arrays(["i4", "i8"], cut="i4 > 0", library="np")
assert set(arrays.keys()) == set(["i4", "i8"])
assert arrays["i4"].tolist() == list(range(1, 15))
assert arrays["i8"].tolist() == list(range(1, 15))
def test_aliases():
with uproot4.open(
skhep_testdata.data_path("uproot-sample-6.20.04-uncompressed.root")
)["sample"] as sample:
assert sample.arrays(
"whatever", aliases={"whatever": "i4 + 100"}, library="np"
)["whatever"].tolist() == list(range(85, 115))
arrays = sample.arrays(
["one", "two"], aliases={"one": "i4 + 100", "two": "i8 + 100"}, library="np"
)
assert set(arrays.keys()) == set(["one", "two"])
assert arrays["one"].tolist() == list(range(85, 115))
assert arrays["two"].tolist() == list(range(85, 115))
arrays = sample.arrays(
["one", "two"], aliases={"one": "i4 + 100", "two": "one"}, library="np"
)
assert set(arrays.keys()) == set(["one", "two"])
assert arrays["one"].tolist() == list(range(85, 115))
assert arrays["two"].tolist() == list(range(85, 115))
with pytest.raises(ValueError):
sample.arrays(
["one", "two"], aliases={"one": "two", "two": "one"}, library="np"
)
arrays = sample.arrays(
["one", "two"],
cut="one > 100",
aliases={"one": "i4 + 100", "two": "i8 + 100"},
library="np",
)
assert set(arrays.keys()) == set(["one", "two"])
assert arrays["one"].tolist() == list(range(101, 115))
assert arrays["two"].tolist() == list(range(101, 115))
arrays = sample.arrays(
["i4"],
cut="one > 100",
aliases={"one": "i4 + 100", "two": "i8 + 100"},
library="np",
)
assert set(arrays.keys()) == set(["i4"])
assert arrays["i4"].tolist() == list(range(1, 15))
def test_jagged():
with uproot4.open(
skhep_testdata.data_path("uproot-sample-6.20.04-uncompressed.root")
)["sample"] as sample:
assert [x.tolist() for x in sample.arrays("Ai4", library="np")["Ai4"]] == [
[],
[-15],
[-15, -13],
[-15, -13, -11],
[-15, -13, -11, -9],
[],
[-10],
[-10, -8],
[-10, -8, -6],
[-10, -8, -6, -4],
[],
[-5],
[-5, -3],
[-5, -3, -1],
[-5, -3, -1, 1],
[],
[0],
[0, 2],
[0, 2, 4],
[0, 2, 4, 6],
[],
[5],
[5, 7],
[5, 7, 9],
[5, 7, 9, 11],
[],
[10],
[10, 12],
[10, 12, 14],
[10, 12, 14, 16],
]
| 39.212121 | 88 | 0.520534 |
from __future__ import absolute_import
import sys
import json
import numpy
import pytest
import skhep_testdata
import uproot4
import uproot4.interpretation.library
import uproot4.interpretation.jagged
import uproot4.interpretation.numerical
def test_branchname():
with uproot4.open(
skhep_testdata.data_path("uproot-sample-6.20.04-uncompressed.root")
)["sample"] as sample:
assert sample.arrays("i4", library="np")["i4"].tolist() == list(range(-15, 15))
arrays = sample.arrays(["i4", "i8"], library="np")
assert set(arrays.keys()) == set(["i4", "i8"])
assert arrays["i4"].tolist() == list(range(-15, 15))
assert arrays["i8"].tolist() == list(range(-15, 15))
arrays = sample.arrays(filter_name="/i[48]/", library="np")
assert set(arrays.keys()) == set(["i4", "i8"])
assert arrays["i4"].tolist() == list(range(-15, 15))
assert arrays["i8"].tolist() == list(range(-15, 15))
arrays = sample.arrays(filter_name=["/i[12]/", "/i[48]/"], library="np")
assert set(arrays.keys()) == set(["i1", "i2", "i4", "i8"])
assert arrays["i1"].tolist() == list(range(-15, 15))
assert arrays["i2"].tolist() == list(range(-15, 15))
assert arrays["i4"].tolist() == list(range(-15, 15))
assert arrays["i8"].tolist() == list(range(-15, 15))
arrays = sample.arrays(filter_name="i*", library="np")
assert set(arrays.keys()) == set(["i1", "i2", "i4", "i8"])
assert arrays["i1"].tolist() == list(range(-15, 15))
assert arrays["i2"].tolist() == list(range(-15, 15))
assert arrays["i4"].tolist() == list(range(-15, 15))
assert arrays["i8"].tolist() == list(range(-15, 15))
arrays = sample.arrays(["i4", "i8"], filter_name="u*", library="np")
assert set(arrays.keys()) == set(["i4", "i8"])
assert arrays["i4"].tolist() == list(range(-15, 15))
assert arrays["i8"].tolist() == list(range(-15, 15))
def test_tuple_branchname():
with uproot4.open(
skhep_testdata.data_path("uproot-sample-6.20.04-uncompressed.root")
)["sample"] as sample:
arrays = sample.arrays(["i4", "i8"], library="np", how=tuple)
assert isinstance(arrays, tuple) and len(arrays) == 2
assert arrays[0].tolist() == list(range(-15, 15))
assert arrays[1].tolist() == list(range(-15, 15))
arrays = sample.arrays(["i4", "i4"], library="np", how=tuple)
assert isinstance(arrays, tuple) and len(arrays) == 2
assert arrays[0].tolist() == list(range(-15, 15))
assert arrays[1].tolist() == list(range(-15, 15))
def test_interpretation():
with uproot4.open(
skhep_testdata.data_path("uproot-sample-6.20.04-uncompressed.root")
)["sample"] as sample:
assert sample["i2"].array(">u2", library="np").tolist() == list(
range(65521, 65536)
) + list(range(0, 15))
arrays = sample.arrays({"i1": ">u1", "i2": ">u2"}, library="np")
assert set(arrays.keys()) == set(["i1", "i2"])
assert arrays["i1"].tolist() == list(range(241, 256)) + list(range(0, 15))
assert arrays["i2"].tolist() == list(range(65521, 65536)) + list(range(0, 15))
arrays = sample.arrays([("i1", ">u1"), ("i2", ">u2")], library="np", how=tuple)
assert isinstance(arrays, tuple) and len(arrays) == 2
assert arrays[0].tolist() == list(range(241, 256)) + list(range(0, 15))
assert arrays[1].tolist() == list(range(65521, 65536)) + list(range(0, 15))
arrays = sample.arrays({"i1": ">u1", "i2": None}, library="np")
assert set(arrays.keys()) == set(["i1", "i2"])
assert arrays["i1"].tolist() == list(range(241, 256)) + list(range(0, 15))
assert arrays["i2"].tolist() == list(range(-15, 15))
arrays = sample.arrays([("i1", ">u1"), ("i2", None)], library="np", how=tuple)
assert isinstance(arrays, tuple) and len(arrays) == 2
assert arrays[0].tolist() == list(range(241, 256)) + list(range(0, 15))
assert arrays[1].tolist() == list(range(-15, 15))
with pytest.raises(ValueError):
sample.arrays([("i1", ">u1"), ("i1", None)], library="np", how=tuple)
def test_compute():
with uproot4.open(
skhep_testdata.data_path("uproot-sample-6.20.04-uncompressed.root")
)["sample"] as sample:
assert sample.arrays("i4 + 100", library="np")["i4 + 100"].tolist() == list(
range(85, 115)
)
arrays = sample.arrays(["i4 + 100", "i8 + 100"], library="np")
assert set(arrays.keys()) == set(["i4 + 100", "i8 + 100"])
assert arrays["i4 + 100"].tolist() == list(range(85, 115))
assert arrays["i8 + 100"].tolist() == list(range(85, 115))
arrays = sample.arrays(["i4 + 100", "i4 + 200"], library="np")
assert set(arrays.keys()) == set(["i4 + 100", "i4 + 200"])
assert arrays["i4 + 100"].tolist() == list(range(85, 115))
assert arrays["i4 + 200"].tolist() == list(range(185, 215))
arrays = sample.arrays(["i4 + 100", "i4 + 100"], library="np")
assert set(arrays.keys()) == set(["i4 + 100"])
assert arrays["i4 + 100"].tolist() == list(range(85, 115))
arrays = sample.arrays(["i4 + 100", "i4 + 100"], library="np", how=tuple)
assert isinstance(arrays, tuple) and len(arrays) == 2
assert arrays[0].tolist() == list(range(85, 115))
assert arrays[1].tolist() == list(range(85, 115))
def test_cut():
with uproot4.open(
skhep_testdata.data_path("uproot-sample-6.20.04-uncompressed.root")
)["sample"] as sample:
assert sample.arrays("i4 + 100", cut="i4 > 0", library="np")[
"i4 + 100"
].tolist() == list(range(101, 115))
arrays = sample.arrays(["i4 + 100", "i8 + 100"], cut="i4 > 0", library="np")
assert set(arrays.keys()) == set(["i4 + 100", "i8 + 100"])
assert arrays["i4 + 100"].tolist() == list(range(101, 115))
assert arrays["i8 + 100"].tolist() == list(range(101, 115))
arrays = sample.arrays(["i4", "i8"], cut="i4 > 0", library="np")
assert set(arrays.keys()) == set(["i4", "i8"])
assert arrays["i4"].tolist() == list(range(1, 15))
assert arrays["i8"].tolist() == list(range(1, 15))
def test_aliases():
with uproot4.open(
skhep_testdata.data_path("uproot-sample-6.20.04-uncompressed.root")
)["sample"] as sample:
assert sample.arrays(
"whatever", aliases={"whatever": "i4 + 100"}, library="np"
)["whatever"].tolist() == list(range(85, 115))
arrays = sample.arrays(
["one", "two"], aliases={"one": "i4 + 100", "two": "i8 + 100"}, library="np"
)
assert set(arrays.keys()) == set(["one", "two"])
assert arrays["one"].tolist() == list(range(85, 115))
assert arrays["two"].tolist() == list(range(85, 115))
arrays = sample.arrays(
["one", "two"], aliases={"one": "i4 + 100", "two": "one"}, library="np"
)
assert set(arrays.keys()) == set(["one", "two"])
assert arrays["one"].tolist() == list(range(85, 115))
assert arrays["two"].tolist() == list(range(85, 115))
with pytest.raises(ValueError):
sample.arrays(
["one", "two"], aliases={"one": "two", "two": "one"}, library="np"
)
arrays = sample.arrays(
["one", "two"],
cut="one > 100",
aliases={"one": "i4 + 100", "two": "i8 + 100"},
library="np",
)
assert set(arrays.keys()) == set(["one", "two"])
assert arrays["one"].tolist() == list(range(101, 115))
assert arrays["two"].tolist() == list(range(101, 115))
arrays = sample.arrays(
["i4"],
cut="one > 100",
aliases={"one": "i4 + 100", "two": "i8 + 100"},
library="np",
)
assert set(arrays.keys()) == set(["i4"])
assert arrays["i4"].tolist() == list(range(1, 15))
def test_jagged():
with uproot4.open(
skhep_testdata.data_path("uproot-sample-6.20.04-uncompressed.root")
)["sample"] as sample:
assert [x.tolist() for x in sample.arrays("Ai4", library="np")["Ai4"]] == [
[],
[-15],
[-15, -13],
[-15, -13, -11],
[-15, -13, -11, -9],
[],
[-10],
[-10, -8],
[-10, -8, -6],
[-10, -8, -6, -4],
[],
[-5],
[-5, -3],
[-5, -3, -1],
[-5, -3, -1, 1],
[],
[0],
[0, 2],
[0, 2, 4],
[0, 2, 4, 6],
[],
[5],
[5, 7],
[5, 7, 9],
[5, 7, 9, 11],
[],
[10],
[10, 12],
[10, 12, 14],
[10, 12, 14, 16],
]
| true | true |
f7322f73d610fd86e37f5dd2157b9b28bca8a8f7 | 43 | py | Python | msg.py | AlexCustodio1801099/alexcustodio1801099 | 96a4d45585b46087b78498f79d23b98f00b80a0c | [
"Apache-2.0"
] | null | null | null | msg.py | AlexCustodio1801099/alexcustodio1801099 | 96a4d45585b46087b78498f79d23b98f00b80a0c | [
"Apache-2.0"
] | null | null | null | msg.py | AlexCustodio1801099/alexcustodio1801099 | 96a4d45585b46087b78498f79d23b98f00b80a0c | [
"Apache-2.0"
] | null | null | null | def mensagem():
print("olá mundo!!!")
| 10.75 | 25 | 0.55814 | def mensagem():
print("olá mundo!!!")
| true | true |
f7322f82e6e309e3b1e8ce6dc31cde69ea6df68e | 1,018 | py | Python | app/forms.py | ssloat/wheatonultimate-www | 9ea27dc03adfbe63dccb404f621f9e5fa83def6b | [
"MIT"
] | null | null | null | app/forms.py | ssloat/wheatonultimate-www | 9ea27dc03adfbe63dccb404f621f9e5fa83def6b | [
"MIT"
] | null | null | null | app/forms.py | ssloat/wheatonultimate-www | 9ea27dc03adfbe63dccb404f621f9e5fa83def6b | [
"MIT"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms import RadioField, SelectMultipleField, widgets
class MultiCheckBoxField(SelectMultipleField):
widget = widgets.ListWidget(prefix_label=False)
option_widget = widgets.CheckboxInput()
class GoogleGroupsSubscribe(FlaskForm):
group = MultiCheckBoxField(
'Which emails are you signing up for?',
choices = [
('wheaton-ultimate', 'Social events'),
('wheaton-ultimate-frisbee', 'Ultimate frisbee games'),
('wheaton-soccer', 'Soccer games'),
('wheaton-housing', 'Housing roommates/tenants'),
],
)
class GoogleGroupsUnsubscribe(FlaskForm):
group = MultiCheckBoxField(
'Which emails are you unsubscribing from?',
choices = [
('wheaton-ultimate', 'Social events'),
('wheaton-ultimate-frisbee', 'Ultimate frisbee games'),
('wheaton-soccer', 'Soccer games'),
('wheaton-housing', 'Housing roommates/tenants'),
],
)
| 33.933333 | 67 | 0.639489 | from flask_wtf import FlaskForm
from wtforms import RadioField, SelectMultipleField, widgets
class MultiCheckBoxField(SelectMultipleField):
widget = widgets.ListWidget(prefix_label=False)
option_widget = widgets.CheckboxInput()
class GoogleGroupsSubscribe(FlaskForm):
group = MultiCheckBoxField(
'Which emails are you signing up for?',
choices = [
('wheaton-ultimate', 'Social events'),
('wheaton-ultimate-frisbee', 'Ultimate frisbee games'),
('wheaton-soccer', 'Soccer games'),
('wheaton-housing', 'Housing roommates/tenants'),
],
)
class GoogleGroupsUnsubscribe(FlaskForm):
group = MultiCheckBoxField(
'Which emails are you unsubscribing from?',
choices = [
('wheaton-ultimate', 'Social events'),
('wheaton-ultimate-frisbee', 'Ultimate frisbee games'),
('wheaton-soccer', 'Soccer games'),
('wheaton-housing', 'Housing roommates/tenants'),
],
)
| true | true |
f7322f8996b9015fbab55e9d33bd023ad461052d | 15,526 | py | Python | spirl/data/block_stacking/src/demo_gen/block_demo_policy.py | kouroshHakha/fist | 328c098789239fd892e17edefd799fc1957ab637 | [
"BSD-3-Clause"
] | 8 | 2021-10-14T03:14:23.000Z | 2022-03-15T21:31:17.000Z | spirl/data/block_stacking/src/demo_gen/block_demo_policy.py | kouroshHakha/fist | 328c098789239fd892e17edefd799fc1957ab637 | [
"BSD-3-Clause"
] | null | null | null | spirl/data/block_stacking/src/demo_gen/block_demo_policy.py | kouroshHakha/fist | 328c098789239fd892e17edefd799fc1957ab637 | [
"BSD-3-Clause"
] | 1 | 2021-09-13T20:42:28.000Z | 2021-09-13T20:42:28.000Z | import numpy as np
from collections import deque
import copy
from spirl.utils.general_utils import AttrDict, split_along_axis
from spirl.data.block_stacking.src.utils.utils import quat2euler
from spirl.data.block_stacking.src.block_stacking_env import BlockStackEnv
class BlockStackDemoPolicy:
"""Follows plan on given env."""
GRASP_OFFSET = 0.08 # offset between robot pos and block pos for grasping
PICK_OFFSET = 0.14 # additional vertical offset btw robot and block for placing
PLACE_OFFSET = 0.17 # additional vertical offset btw robot and block for placing
ACT_RANGE = [0.05, 0.05, 0.05, np.pi/10, 0.5] # maximum action scale for each action dimension
GRAVITY_SUPPORT = 0.01 # z dimension action when noop to prevent robot from falling
GRIPPER_OPEN = 1.
GRIPPER_CLOSED = 0.
MULTIPLIER = 20.
EPS = 0.01
def __init__(self, env_params):
"""
:param hl_plan: list of HL index tuples indicating which block should get stacked (e.g. [(1,2), (3,5)])
"""
# TODO consider whether to make task/hl_plan a proper class with transition subclass (to make reuse for kitchen easier)
self.env_params = env_params
self.lift_height = env_params.table_size[-1] + env_params.block_size * 2 * env_params.max_tower_height + 0.2
self.block_height = env_params.block_size * 2
self._hl_plan = None
self._hl_plan_to_run = deque()
self._action_plan = None
self._u_obs = None # this stores env state when planning action sequence
self._update_robot_state = True
def reset(self):
self._hl_plan = self.env_params.get_task()
self._action_plan = None
self._hl_plan_to_run = deque(self._hl_plan)
self._u_obs = None
def act(self, obs):
if self.execution_finished: # should not call 'act' if execution is already finished
return None
self._u_obs = BlockUnflattenWrapper(BlockStackEnv.unflatten_block_obs(copy.deepcopy(obs),
include_quat=self.env_params.include_quat,
include_vel=self.env_params.include_vel))
while True:
if self._action_plan is None:
if not self._hl_plan_to_run:
self._action_plan = None
ac = np.zeros(5,)
break
# generate new action plan
self._action_plan = self._plan_actions()
try:
ac = next(self._action_plan)
break
except (StopIteration, IndexError): # generator exhausted
self._action_plan = None
ac = self._post_process(ac)
return ac
@property
def execution_finished(self):
"""Checks whether the plan execution has been finished."""
return self._action_plan is None and not self._hl_plan_to_run
def _plan_actions(self):
"""Plans LL actions given HL action plan and current env state."""
# generate pick-place plan for one stacking subtask
bottom_block, top_block = self._hl_plan_to_run.popleft()
raw_plan = self._pick_place(bottom_block, top_block)
for ac in split_along_axis(raw_plan, axis=0):
yield ac
def _pick_place(self, bottom_block, top_block):
"""Plans action sequence for pick&place of single block."""
action_plan = []
# pick up block
pick_target_pos = self._get_pick_target(top_block)
top_block_quat = self._u_obs.block_quat(top_block)
action_plan.append(self._move_to(pick_target_pos, top_block_quat, self.GRIPPER_OPEN)[0])
action_plan.append(self._grasp())
# place block
place_target_pos = self._get_place_target(bottom_block)
bottom_block_quat = self._u_obs.block_quat(bottom_block)
action_plan.append(self._move_to(place_target_pos, bottom_block_quat, self.GRIPPER_CLOSED)[0])
action_plan.append(self._place())
return np.concatenate(action_plan)
def _get_pick_target(self, block):
block_pos = self._u_obs.block_pos(block)
block_pos[2] += self.PICK_OFFSET
return block_pos
def _get_place_target(self, block):
block_pos = self._u_obs.block_pos(block)
block_pos[2] += self.PLACE_OFFSET
return block_pos
def _move_to(self, target_block_pos, target_block_quat, gripper, waypoints=None):
"""
Plans action sequence for moving robot arm to block.
:param gripper: indicates whether gripper should be ['open', 'closed'] during execution
:param waypoints: (optional) list of precomputed waypoints
"""
block_angle = quat2euler(*target_block_quat)[0] # assume single-axis rotation
robot_pos, robot_angle = self._u_obs.gripper_pos, self._u_obs.gripper_angle
if waypoints is None:
waypoints = [
[robot_pos[0], robot_pos[1], robot_pos[2], robot_angle, self._u_obs.gripper_finger_pos],
[robot_pos[0], robot_pos[1], self.lift_height, robot_angle, gripper],
[target_block_pos[0], target_block_pos[1], self.lift_height, robot_angle, gripper],
[target_block_pos[0], target_block_pos[1], target_block_pos[2] + self.GRASP_OFFSET, block_angle, gripper],
]
# add disturbed subgoals in between waypoints for better state coverage
subgoals = [
self._sample_disturbed_subgoal(robot_pos,
[robot_pos[0], robot_pos[1], self.lift_height])
+ [robot_angle, gripper],
self._sample_disturbed_subgoal([robot_pos[0], robot_pos[1], self.lift_height],
[target_block_pos[0], target_block_pos[1], self.lift_height])
+ [robot_angle, gripper],
self._sample_disturbed_subgoal([target_block_pos[0], target_block_pos[1], self.lift_height],
[target_block_pos[0], target_block_pos[1], target_block_pos[2] + self.GRASP_OFFSET])
+ [block_angle, gripper],
]
# assemble final waypoint list
waypoints = [waypoints[0], subgoals[0], waypoints[1], subgoals[1], waypoints[2], subgoals[2], waypoints[3]]
else:
waypoints = [[robot_pos[0], robot_pos[1], robot_pos[2], robot_angle, self._u_obs.gripper_finger_pos]] \
+ waypoints
if self._update_robot_state:
self._u_obs.gripper_pos, self._u_obs.gripper_angle, self._u_obs.gripper_finger_pos = \
np.array(waypoints[-1][:3]), waypoints[-1][3], gripper # update robot state
return self._waypoints2plan(waypoints, absolute_dims=[-1]), waypoints[1:]
def _grasp(self):
"""Moves robot GRASP-offset down, closes gripper, moves GRASP-offset up."""
robot_pos, robot_angle = self._u_obs.gripper_pos, self._u_obs.gripper_angle
waypoints = [
[robot_pos[0], robot_pos[1], robot_pos[2], robot_angle, self.GRIPPER_OPEN],
[robot_pos[0], robot_pos[1], robot_pos[2] - self.GRASP_OFFSET, robot_angle, self.GRIPPER_OPEN],
[robot_pos[0], robot_pos[1], robot_pos[2] - self.GRASP_OFFSET, robot_angle, self.GRIPPER_CLOSED]]
waypoints += [waypoints[-1]] * 3 # noop
waypoints += [[robot_pos[0], robot_pos[1], robot_pos[2], robot_angle, self.GRIPPER_CLOSED]]
if self._update_robot_state:
self._u_obs.gripper_finger_pos = self.GRIPPER_CLOSED # update robot state
return self._waypoints2plan(waypoints, absolute_dims=[-1])
def _place(self):
"""Moves robot GRASP-offset down, opens gripper, moves GRASP-offset up."""
robot_pos, robot_angle = self._u_obs.gripper_pos, self._u_obs.gripper_angle
waypoints = [
[robot_pos[0], robot_pos[1], robot_pos[2], robot_angle, self.GRIPPER_CLOSED],
[robot_pos[0], robot_pos[1], robot_pos[2] - self.GRASP_OFFSET, robot_angle, self.GRIPPER_CLOSED],
[robot_pos[0], robot_pos[1], robot_pos[2] - self.GRASP_OFFSET, robot_angle, self.GRIPPER_OPEN],
[robot_pos[0], robot_pos[1], robot_pos[2], robot_angle, self.GRIPPER_OPEN],
[robot_pos[0], robot_pos[1], self.lift_height, robot_angle, self.GRIPPER_OPEN]
]
if self._update_robot_state:
self._u_obs.gripper_finger_pos = self.GRIPPER_OPEN # update robot state
return self._waypoints2plan(waypoints, absolute_dims=[-1])
def _waypoints2plan(self, waypoints, absolute_dims=None):
plan = np.concatenate([self._interpolate(waypoints[i], waypoints[i+1], absolute_dims)
for i in range(len(waypoints) - 1)])
return plan
def _interpolate(self, start, goal, absolute_dims=None):
"""
Interpolates between start and goal linearly while taking max_actions into account.
Since action effect is smaller than actual action scale we need a multiplier to treat the distance farther than the actual one.
:param absolute_dims: list of dimensions for which action will be set to goal state.
"""
diff = np.array(goal) - np.array(start)
n_steps = int(np.max(np.ceil(np.divide(np.abs(diff), np.array(self.ACT_RANGE)))))
for dim in absolute_dims if absolute_dims is not None else []:
diff[dim] = goal[dim] * n_steps # hack to make dims action values absolute
if n_steps > 0:
actions = [diff / n_steps for _ in range(n_steps)]
return actions
else:
return np.zeros([0, diff.shape[-1]])
def _post_process(self, ac):
# scale action
ac[:3] *= self.MULTIPLIER # scale lateral actions to make them reach the target states
# add gravity support for noop
if np.sum(ac[:-1]) == 0:
ac[2] += self.GRAVITY_SUPPORT
# crop action dimensions according to env params
if not self.env_params.allow_rotate:
ac = np.concatenate([ac[:3], ac[4:]])
if self.env_params.dimension == 2:
ac = ac[1:]
return ac
def _sample_disturbed_subgoal(self, start_pos, goal_pos, max_displacement_ratio=0.2):
"""Samples a subgoal with some offset to the direct connection line."""
start_pos, goal_pos = np.array(start_pos), np.array(goal_pos)
diff = goal_pos - start_pos
# generate unit vector that's orthogonal to diff
noise = np.asarray([diff[0], diff[2], -diff[1]])
noise /= np.linalg.norm(noise) # normalize it
# sample random offset along connection line + random length
length = (np.random.rand() * 2 * max_displacement_ratio - max_displacement_ratio) * np.linalg.norm(diff)
offset = (np.random.rand() * 0.6 + 0.2) * diff
# compute subgoal position
subgoal_pos = start_pos + offset + length * noise
return [coord for coord in subgoal_pos]
class ClosedLoopBlockStackDemoPolicy(BlockStackDemoPolicy):
PICK_OFFSET = 0.11
def __init__(self, env_params):
super().__init__(env_params)
self._update_robot_state = False
def _plan_actions(self):
# generate pick-place plan for one stacking subtask
bottom_block, top_block = self._hl_plan_to_run.popleft()
top_block_init_pos = self._u_obs.block_pos(top_block)
waypoints = None
while not self._lifted(top_block):
while not self._reached(self._get_pick_target(top_block)):
pick_target_pos = self._get_pick_target(top_block)
top_block_quat = self._u_obs.block_quat(top_block)
actions, waypoints = self._move_to(pick_target_pos, top_block_quat, self.GRIPPER_OPEN, waypoints)
if self._reached_waypoint(waypoints[0]) and len(waypoints) > 1:
waypoints = waypoints[1:]
if len(actions) > 0:
yield actions[0]
else:
break
grasp_plan = split_along_axis(self._grasp(), axis=0)
for i, action in enumerate(grasp_plan):
yield action
waypoints = None
while not self._reached(self._get_place_target(bottom_block)):
place_target_pos = self._get_place_target(bottom_block)
bottom_block_quat = self._u_obs.block_quat(bottom_block)
actions, waypoints = self._move_to(place_target_pos, bottom_block_quat, self.GRIPPER_CLOSED, waypoints)
if self._reached_waypoint(waypoints[0]) and len(waypoints) > 1:
waypoints = waypoints[1:]
if len(actions) > 0:
yield actions[0]
else:
break
while not self._stacked(top_block, bottom_block):
for action in split_along_axis(self._place(), axis=0):
yield action
def _lifted(self, top_block):
top_block_pos = self._u_obs.block_pos(top_block)
gripper_pos = self._u_obs.gripper_pos
lifted = True
x_dist = np.abs(gripper_pos[0] - top_block_pos[0])
lifted &= x_dist < self.env_params.block_size
y_dist = np.abs(gripper_pos[1] - top_block_pos[1])
lifted &= y_dist < self.env_params.block_size
z_vec = gripper_pos[-1] - top_block_pos[-1]
lifted &= z_vec < 0.14
lifted &= z_vec > 0.08
return lifted
def _stacked(self, top_block, bottom_block):
top_pos = self._u_obs.block_pos(top_block)
bottom_pos = self._u_obs.block_pos(bottom_block)
x_dist = np.linalg.norm(top_pos[0] - bottom_pos[0])
y_dist = np.linalg.norm(top_pos[0] - bottom_pos[0])
x_dist_correct = x_dist < self.env_params.block_size
y_dist_correct = y_dist < self.env_params.block_size
z_vec = top_pos[2] - bottom_pos[2]
z_vec_correct = np.abs(z_vec - 2 * self.env_params.block_size) < 0.005
return x_dist_correct and y_dist_correct and z_vec_correct
def _reached(self, pos):
target_pos = pos
target_pos[2] += self.GRASP_OFFSET
return np.linalg.norm(pos - self._u_obs.gripper_pos) < self.EPS
def _reached_waypoint(self, waypoint):
return np.linalg.norm(np.array(waypoint[:3]) - self._u_obs.gripper_pos) < self.EPS
class BlockUnflattenWrapper(AttrDict):
def block_pos(self, idx):
return list(self['block_pos'][idx])
def block_quat(self, idx):
return list(self['block_quat'][idx])
def set_block_pos(self, idx, val):
self['block_pos'][idx] = val
def set_block_quat(self, idx, val):
self['block_quat'][idx] = val
if __name__ == "__main__":
from spirl.data.block_stacking.src.block_task_generator import SingleTowerBlockTaskGenerator
obs = AttrDict(
block_pos=np.random.rand(4*3),
block_quat=np.random.rand(4*4),
gripper_pos=np.random.rand(3),
gripper_angle=np.random.rand(),
gripper_finger_pos=np.random.rand(),
)
task_gen = SingleTowerBlockTaskGenerator({}, 4)
task = task_gen.sample()
policy = BlockStackDemoPolicy(task)
print(policy.act(obs))
# print(policy._plan_actions(obs))
| 45.002899 | 135 | 0.631972 | import numpy as np
from collections import deque
import copy
from spirl.utils.general_utils import AttrDict, split_along_axis
from spirl.data.block_stacking.src.utils.utils import quat2euler
from spirl.data.block_stacking.src.block_stacking_env import BlockStackEnv
class BlockStackDemoPolicy:
GRASP_OFFSET = 0.08
PICK_OFFSET = 0.14
PLACE_OFFSET = 0.17
ACT_RANGE = [0.05, 0.05, 0.05, np.pi/10, 0.5]
GRAVITY_SUPPORT = 0.01
GRIPPER_OPEN = 1.
GRIPPER_CLOSED = 0.
MULTIPLIER = 20.
EPS = 0.01
def __init__(self, env_params):
self.env_params = env_params
self.lift_height = env_params.table_size[-1] + env_params.block_size * 2 * env_params.max_tower_height + 0.2
self.block_height = env_params.block_size * 2
self._hl_plan = None
self._hl_plan_to_run = deque()
self._action_plan = None
self._u_obs = None
self._update_robot_state = True
def reset(self):
self._hl_plan = self.env_params.get_task()
self._action_plan = None
self._hl_plan_to_run = deque(self._hl_plan)
self._u_obs = None
def act(self, obs):
if self.execution_finished:
return None
self._u_obs = BlockUnflattenWrapper(BlockStackEnv.unflatten_block_obs(copy.deepcopy(obs),
include_quat=self.env_params.include_quat,
include_vel=self.env_params.include_vel))
while True:
if self._action_plan is None:
if not self._hl_plan_to_run:
self._action_plan = None
ac = np.zeros(5,)
break
self._action_plan = self._plan_actions()
try:
ac = next(self._action_plan)
break
except (StopIteration, IndexError):
self._action_plan = None
ac = self._post_process(ac)
return ac
@property
def execution_finished(self):
return self._action_plan is None and not self._hl_plan_to_run
def _plan_actions(self):
bottom_block, top_block = self._hl_plan_to_run.popleft()
raw_plan = self._pick_place(bottom_block, top_block)
for ac in split_along_axis(raw_plan, axis=0):
yield ac
def _pick_place(self, bottom_block, top_block):
action_plan = []
pick_target_pos = self._get_pick_target(top_block)
top_block_quat = self._u_obs.block_quat(top_block)
action_plan.append(self._move_to(pick_target_pos, top_block_quat, self.GRIPPER_OPEN)[0])
action_plan.append(self._grasp())
place_target_pos = self._get_place_target(bottom_block)
bottom_block_quat = self._u_obs.block_quat(bottom_block)
action_plan.append(self._move_to(place_target_pos, bottom_block_quat, self.GRIPPER_CLOSED)[0])
action_plan.append(self._place())
return np.concatenate(action_plan)
def _get_pick_target(self, block):
block_pos = self._u_obs.block_pos(block)
block_pos[2] += self.PICK_OFFSET
return block_pos
def _get_place_target(self, block):
block_pos = self._u_obs.block_pos(block)
block_pos[2] += self.PLACE_OFFSET
return block_pos
def _move_to(self, target_block_pos, target_block_quat, gripper, waypoints=None):
block_angle = quat2euler(*target_block_quat)[0]
robot_pos, robot_angle = self._u_obs.gripper_pos, self._u_obs.gripper_angle
if waypoints is None:
waypoints = [
[robot_pos[0], robot_pos[1], robot_pos[2], robot_angle, self._u_obs.gripper_finger_pos],
[robot_pos[0], robot_pos[1], self.lift_height, robot_angle, gripper],
[target_block_pos[0], target_block_pos[1], self.lift_height, robot_angle, gripper],
[target_block_pos[0], target_block_pos[1], target_block_pos[2] + self.GRASP_OFFSET, block_angle, gripper],
]
subgoals = [
self._sample_disturbed_subgoal(robot_pos,
[robot_pos[0], robot_pos[1], self.lift_height])
+ [robot_angle, gripper],
self._sample_disturbed_subgoal([robot_pos[0], robot_pos[1], self.lift_height],
[target_block_pos[0], target_block_pos[1], self.lift_height])
+ [robot_angle, gripper],
self._sample_disturbed_subgoal([target_block_pos[0], target_block_pos[1], self.lift_height],
[target_block_pos[0], target_block_pos[1], target_block_pos[2] + self.GRASP_OFFSET])
+ [block_angle, gripper],
]
waypoints = [waypoints[0], subgoals[0], waypoints[1], subgoals[1], waypoints[2], subgoals[2], waypoints[3]]
else:
waypoints = [[robot_pos[0], robot_pos[1], robot_pos[2], robot_angle, self._u_obs.gripper_finger_pos]] \
+ waypoints
if self._update_robot_state:
self._u_obs.gripper_pos, self._u_obs.gripper_angle, self._u_obs.gripper_finger_pos = \
np.array(waypoints[-1][:3]), waypoints[-1][3], gripper
return self._waypoints2plan(waypoints, absolute_dims=[-1]), waypoints[1:]
def _grasp(self):
robot_pos, robot_angle = self._u_obs.gripper_pos, self._u_obs.gripper_angle
waypoints = [
[robot_pos[0], robot_pos[1], robot_pos[2], robot_angle, self.GRIPPER_OPEN],
[robot_pos[0], robot_pos[1], robot_pos[2] - self.GRASP_OFFSET, robot_angle, self.GRIPPER_OPEN],
[robot_pos[0], robot_pos[1], robot_pos[2] - self.GRASP_OFFSET, robot_angle, self.GRIPPER_CLOSED]]
waypoints += [waypoints[-1]] * 3
waypoints += [[robot_pos[0], robot_pos[1], robot_pos[2], robot_angle, self.GRIPPER_CLOSED]]
if self._update_robot_state:
self._u_obs.gripper_finger_pos = self.GRIPPER_CLOSED
return self._waypoints2plan(waypoints, absolute_dims=[-1])
def _place(self):
robot_pos, robot_angle = self._u_obs.gripper_pos, self._u_obs.gripper_angle
waypoints = [
[robot_pos[0], robot_pos[1], robot_pos[2], robot_angle, self.GRIPPER_CLOSED],
[robot_pos[0], robot_pos[1], robot_pos[2] - self.GRASP_OFFSET, robot_angle, self.GRIPPER_CLOSED],
[robot_pos[0], robot_pos[1], robot_pos[2] - self.GRASP_OFFSET, robot_angle, self.GRIPPER_OPEN],
[robot_pos[0], robot_pos[1], robot_pos[2], robot_angle, self.GRIPPER_OPEN],
[robot_pos[0], robot_pos[1], self.lift_height, robot_angle, self.GRIPPER_OPEN]
]
if self._update_robot_state:
self._u_obs.gripper_finger_pos = self.GRIPPER_OPEN
return self._waypoints2plan(waypoints, absolute_dims=[-1])
def _waypoints2plan(self, waypoints, absolute_dims=None):
plan = np.concatenate([self._interpolate(waypoints[i], waypoints[i+1], absolute_dims)
for i in range(len(waypoints) - 1)])
return plan
def _interpolate(self, start, goal, absolute_dims=None):
diff = np.array(goal) - np.array(start)
n_steps = int(np.max(np.ceil(np.divide(np.abs(diff), np.array(self.ACT_RANGE)))))
for dim in absolute_dims if absolute_dims is not None else []:
diff[dim] = goal[dim] * n_steps
if n_steps > 0:
actions = [diff / n_steps for _ in range(n_steps)]
return actions
else:
return np.zeros([0, diff.shape[-1]])
def _post_process(self, ac):
ac[:3] *= self.MULTIPLIER
if np.sum(ac[:-1]) == 0:
ac[2] += self.GRAVITY_SUPPORT
if not self.env_params.allow_rotate:
ac = np.concatenate([ac[:3], ac[4:]])
if self.env_params.dimension == 2:
ac = ac[1:]
return ac
def _sample_disturbed_subgoal(self, start_pos, goal_pos, max_displacement_ratio=0.2):
start_pos, goal_pos = np.array(start_pos), np.array(goal_pos)
diff = goal_pos - start_pos
noise = np.asarray([diff[0], diff[2], -diff[1]])
noise /= np.linalg.norm(noise) # normalize it
# sample random offset along connection line + random length
length = (np.random.rand() * 2 * max_displacement_ratio - max_displacement_ratio) * np.linalg.norm(diff)
offset = (np.random.rand() * 0.6 + 0.2) * diff
# compute subgoal position
subgoal_pos = start_pos + offset + length * noise
return [coord for coord in subgoal_pos]
class ClosedLoopBlockStackDemoPolicy(BlockStackDemoPolicy):
PICK_OFFSET = 0.11
def __init__(self, env_params):
super().__init__(env_params)
self._update_robot_state = False
def _plan_actions(self):
# generate pick-place plan for one stacking subtask
bottom_block, top_block = self._hl_plan_to_run.popleft()
top_block_init_pos = self._u_obs.block_pos(top_block)
waypoints = None
while not self._lifted(top_block):
while not self._reached(self._get_pick_target(top_block)):
pick_target_pos = self._get_pick_target(top_block)
top_block_quat = self._u_obs.block_quat(top_block)
actions, waypoints = self._move_to(pick_target_pos, top_block_quat, self.GRIPPER_OPEN, waypoints)
if self._reached_waypoint(waypoints[0]) and len(waypoints) > 1:
waypoints = waypoints[1:]
if len(actions) > 0:
yield actions[0]
else:
break
grasp_plan = split_along_axis(self._grasp(), axis=0)
for i, action in enumerate(grasp_plan):
yield action
waypoints = None
while not self._reached(self._get_place_target(bottom_block)):
place_target_pos = self._get_place_target(bottom_block)
bottom_block_quat = self._u_obs.block_quat(bottom_block)
actions, waypoints = self._move_to(place_target_pos, bottom_block_quat, self.GRIPPER_CLOSED, waypoints)
if self._reached_waypoint(waypoints[0]) and len(waypoints) > 1:
waypoints = waypoints[1:]
if len(actions) > 0:
yield actions[0]
else:
break
while not self._stacked(top_block, bottom_block):
for action in split_along_axis(self._place(), axis=0):
yield action
def _lifted(self, top_block):
top_block_pos = self._u_obs.block_pos(top_block)
gripper_pos = self._u_obs.gripper_pos
lifted = True
x_dist = np.abs(gripper_pos[0] - top_block_pos[0])
lifted &= x_dist < self.env_params.block_size
y_dist = np.abs(gripper_pos[1] - top_block_pos[1])
lifted &= y_dist < self.env_params.block_size
z_vec = gripper_pos[-1] - top_block_pos[-1]
lifted &= z_vec < 0.14
lifted &= z_vec > 0.08
return lifted
def _stacked(self, top_block, bottom_block):
top_pos = self._u_obs.block_pos(top_block)
bottom_pos = self._u_obs.block_pos(bottom_block)
x_dist = np.linalg.norm(top_pos[0] - bottom_pos[0])
y_dist = np.linalg.norm(top_pos[0] - bottom_pos[0])
x_dist_correct = x_dist < self.env_params.block_size
y_dist_correct = y_dist < self.env_params.block_size
z_vec = top_pos[2] - bottom_pos[2]
z_vec_correct = np.abs(z_vec - 2 * self.env_params.block_size) < 0.005
return x_dist_correct and y_dist_correct and z_vec_correct
def _reached(self, pos):
target_pos = pos
target_pos[2] += self.GRASP_OFFSET
return np.linalg.norm(pos - self._u_obs.gripper_pos) < self.EPS
def _reached_waypoint(self, waypoint):
return np.linalg.norm(np.array(waypoint[:3]) - self._u_obs.gripper_pos) < self.EPS
class BlockUnflattenWrapper(AttrDict):
def block_pos(self, idx):
return list(self['block_pos'][idx])
def block_quat(self, idx):
return list(self['block_quat'][idx])
def set_block_pos(self, idx, val):
self['block_pos'][idx] = val
def set_block_quat(self, idx, val):
self['block_quat'][idx] = val
if __name__ == "__main__":
from spirl.data.block_stacking.src.block_task_generator import SingleTowerBlockTaskGenerator
obs = AttrDict(
block_pos=np.random.rand(4*3),
block_quat=np.random.rand(4*4),
gripper_pos=np.random.rand(3),
gripper_angle=np.random.rand(),
gripper_finger_pos=np.random.rand(),
)
task_gen = SingleTowerBlockTaskGenerator({}, 4)
task = task_gen.sample()
policy = BlockStackDemoPolicy(task)
print(policy.act(obs))
# print(policy._plan_actions(obs))
| true | true |
f7323123b654734d406913c73fe56a176996774e | 1,456 | py | Python | packages/merlin/cli/About.py | PyreFramework/pyre | 345c7449a3416eea1c1affa74fb32faff30a6aaa | [
"BSD-3-Clause"
] | null | null | null | packages/merlin/cli/About.py | PyreFramework/pyre | 345c7449a3416eea1c1affa74fb32faff30a6aaa | [
"BSD-3-Clause"
] | null | null | null | packages/merlin/cli/About.py | PyreFramework/pyre | 345c7449a3416eea1c1affa74fb32faff30a6aaa | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# michael a.g. aïvázis <michael.aivazis@para-sim.com>
# (c) 1998-2022 all rights reserved
# externals
import merlin
# declaration
class About(merlin.shells.command, family='merlin.cli.about'):
"""
Display information about this application
"""
@merlin.export(tip="print the copyright note")
def copyright(self, plexus, **kwds):
"""
Print the copyright note of the merlin package
"""
# show the copyright note
plexus.info.log(merlin.meta.copyright)
# all done
return
@merlin.export(tip="print out the acknowledgments")
def credits(self, plexus, **kwds):
"""
Print out the license and terms of use of the merlin package
"""
# make some space
plexus.info.log(merlin.meta.header)
# all done
return
@merlin.export(tip="print out the license and terms of use")
def license(self, plexus, **kwds):
"""
Print out the license and terms of use of the merlin package
"""
# make some space
plexus.info.log(merlin.meta.license)
# all done
return
@merlin.export(tip="print the version number")
def version(self, plexus, **kwds):
"""
Print the version of the merlin package
"""
# make some space
plexus.info.log(merlin.meta.header)
# all done
return
# end of file
| 23.111111 | 68 | 0.592033 |
import merlin
class About(merlin.shells.command, family='merlin.cli.about'):
@merlin.export(tip="print the copyright note")
def copyright(self, plexus, **kwds):
plexus.info.log(merlin.meta.copyright)
return
@merlin.export(tip="print out the acknowledgments")
def credits(self, plexus, **kwds):
plexus.info.log(merlin.meta.header)
return
@merlin.export(tip="print out the license and terms of use")
def license(self, plexus, **kwds):
plexus.info.log(merlin.meta.license)
return
@merlin.export(tip="print the version number")
def version(self, plexus, **kwds):
plexus.info.log(merlin.meta.header)
return
| true | true |
f732324215dffd9d37733babb43056a844a03632 | 140,440 | py | Python | python/paddle/fluid/layers/control_flow.py | grasswolfs/Paddle | 0c2fff447c7d5b0bbad473a1590872c5343e1e56 | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/layers/control_flow.py | grasswolfs/Paddle | 0c2fff447c7d5b0bbad473a1590872c5343e1e56 | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/layers/control_flow.py | grasswolfs/Paddle | 0c2fff447c7d5b0bbad473a1590872c5343e1e56 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from ..wrapped_decorator import signature_safe_contextmanager
from .layer_function_generator import autodoc, templatedoc
from .tensor import assign, cast, fill_constant
from .. import core
from ..framework import Program, Variable, Operator
from ..layer_helper import LayerHelper, unique_name
from ..initializer import force_init_on_cpu
from .nn import logical_and, logical_not, logical_or
from .utils import assert_same_structure, map_structure
import numpy
import warnings
import six
from functools import reduce, partial
from ..data_feeder import convert_dtype, check_type_and_dtype
from ... import compat as cpt
from ..backward import _infer_var_data_type_shape_
__all__ = [
'While', 'Switch', 'increment', 'array_write', 'create_array', 'less_than',
'less_equal', 'greater_than', 'greater_equal', 'equal', 'not_equal',
'array_read', 'array_length', 'cond', 'IfElse', 'DynamicRNN', 'StaticRNN',
'reorder_lod_tensor_by_rank', 'Print', 'is_empty', 'case', 'switch_case',
'while_loop'
]
def select_output(input, outputs, mask):
"""
**select_output**
This API takes in one input and multiple outputs and an integer mask. It
selects the output specified by the mask and copy the input to selected
output. It is useful in control flow.
Args:
input(Variable): The input variable
outputs(tuple|list): The output variables
mask(Variable): A tensor containing 1 integer number selecting which
output to be copied with input
Returns:
Variable: The outputs variables
"""
helper = LayerHelper('select_output', **locals())
helper.append_op(
type='select_output',
inputs={'X': input,
'Mask': mask},
outputs={'Out': outputs})
return outputs
def select_input(inputs, mask):
"""
**select_input**
This API takes in multiple inputs and uses an integer mask to select one
input to output. It is useful in control flow.
Args:
inputs(tuple|list): The input variables
mask(Variable): A tensor containing 1 integer number selecting which
input to output
Returns:
Variable: The selected input variable
"""
helper = LayerHelper('select_input', **locals())
if isinstance(inputs, list) or isinstance(inputs, tuple):
input_dtype = inputs[0].dtype
input_shape = inputs[0].shape
else:
input_dtype = inputs.dtype
input_shape = inputs.shape
out = helper.create_variable(dtype=input_dtype, shape=input_shape)
helper.append_op(
type='select_input',
inputs={'X': inputs,
'Mask': mask},
outputs={'Out': out})
return out
def split_lod_tensor(input, mask, level=0):
"""
This function takes in an input that contains the complete lod information,
and takes in a mask which is used to mask certain parts of the input.
The output is the true branch and the false branch with the mask applied to
the input at a certain level in the tensor. Mainly used in IfElse to split
data into two parts.
Args:
input(tuple|list|None): The input tensor that contains complete
lod information needed to construct the output.
mask(list): A bool column vector which masks the input.
level(int): The specific lod level to split.
Returns:
tuple(Variable, Variable):
The true branch of tensor as per the mask applied to input.
The false branch of tensor as per the mask applied to input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[1])
x.persistable = True
y = fluid.layers.data(name='y', shape=[1])
y.persistable = True
out_true, out_false = fluid.layers.split_lod_tensor(
input=x, mask=y, level=level)
"""
helper = LayerHelper('split_lod_tensor', **locals())
out_true = helper.create_variable_for_type_inference(dtype=input.dtype)
out_false = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type='split_lod_tensor',
inputs={
'X': input,
'Mask': mask,
},
outputs={'OutTrue': out_true,
'OutFalse': out_false},
attrs={'level': level})
return out_true, out_false
def merge_lod_tensor(in_true, in_false, x, mask, level=0):
"""
**merge_lod_tensor**
This function takes in an input :math:`x`, the True branch, the False
branch and a binary :math:`mask`. Using this information, this function
merges the True and False branches of the tensor into a single tensor as
output at a certain lod level indicated by :math:`level`. Used in IfElse
to merge the output if True block and False Block.
Args:
in_true(tuple|list|None): The True branch to be merged.
in_false(tuple|list|None): The False branch to be merged.
x(tuple|list|None): The input tensor that contains complete
lod information needed to construct the output.
mask(list): A bool column vector which masks the input.
level(int): The specific lod level to merge.
Returns:
Variable: The merged output tensor.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = layers.data(
name='x', shape=[1], dtype='float32', stop_gradient=False)
y = layers.data(
name='y', shape=[1], dtype='bool', stop_gradient=False)
level = 0
out_true, out_false = layers.split_lod_tensor(
input=x, mask=y, level=level)
out = layers.merge_lod_tensor(
in_true=out_true, in_false=out_false, mask=y, x=x, level=level)
"""
helper = LayerHelper('merge_lod_tensor', **locals())
out = helper.create_variable_for_type_inference(dtype=in_true.dtype)
helper.append_op(
type='merge_lod_tensor',
inputs={'X': x,
'Mask': mask,
'InTrue': in_true,
'InFalse': in_false},
outputs={'Out': out},
attrs={'level': level})
return out
def Print(input,
first_n=-1,
message=None,
summarize=20,
print_tensor_name=True,
print_tensor_type=True,
print_tensor_shape=True,
print_tensor_lod=True,
print_phase='both'):
'''
**Print operator**
This creates a print op that will print when a tensor is accessed.
Wraps the tensor passed in so that whenever that a tensor is accessed,
the message `message` is printed, along with the current value of the
tensor `t`.
Args:
input (Variable): A Tensor to print.
summarize (int): Number of elements in the tensor to be print. If it's
vaule is -1, then all elements in the tensor will be print.
message (str): A string message to print as a prefix.
first_n (int): Only log `first_n` number of times.
print_tensor_name (bool, optional): Print the tensor name. Default: True.
print_tensor_type (bool, optional): Print the tensor type. Defaultt: True.
print_tensor_shape (bool, optional): Print the tensor shape. Default: True.
print_tensor_lod (bool, optional): Print the tensor lod. Default: True.
print_phase (str): Which phase to displace, including 'forward',
'backward' and 'both'. Default: 'both'. If set to 'backward', will
only print the gradients of input tensor; If set to 'both', will
both print the input tensor itself and the gradients of input tensor.
Returns:
Variable: Output tensor.
NOTES:
The input and output are two different variables, and in the
following process, you should use the output variable but not the input,
otherwise, the print layer doesn't have backward.
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.layers.fill_constant(shape=[10,2], value=3, dtype='int64')
input = fluid.layers.Print(input, message="The content of input layer:")
main_program = fluid.default_main_program()
exe = fluid.Executor(fluid.CPUPlace())
exe.run(main_program)
Output at runtime:
.. code-block:: bash
The content of input layer: The place is:CPUPlace
Tensor[fill_constant_0.tmp_0]
shape: [10,2,]
dtype: x
data: 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
'''
check_type_and_dtype(input, 'input', Variable,
['float32', 'float64', 'int32', 'int64', 'bool'],
'fluid.layers.Print')
helper = LayerHelper('print' + "_" + input.name, **locals())
output = helper.create_variable_for_type_inference(input.dtype)
helper.append_op(
type='print',
inputs={'In': input},
outputs={'Out': output},
attrs={
'first_n': first_n,
'summarize': summarize,
'message': message or "",
'print_tensor_name': print_tensor_name,
'print_tensor_type': print_tensor_type,
'print_tensor_shape': print_tensor_shape,
'print_tensor_lod': print_tensor_lod,
'print_phase': print_phase.upper()
})
return output
class BlockGuard(object):
"""
BlockGuard class.
BlockGuard class is used to create a sub-block in a program by
using the Python `with` keyword.
"""
def __init__(self, main_program):
if not isinstance(main_program, Program):
raise TypeError("BlockGuard takes a program")
self.main_program = main_program
def __enter__(self):
self.main_program._create_block()
def __exit__(self, exc_type, exc_val, exc_tb):
self.main_program._rollback()
if exc_type is not None:
return False # re-raise exception
return True
class BlockGuardWithCompletion(BlockGuard):
"""
BlockGuardWithCompletion class.
BlockGuardWithCompletion class is used to create an op with a block in a program.
"""
def __init__(self, rnn):
if not isinstance(rnn, StaticRNN):
raise TypeError("BlockGuardWithCompletion takes a StaticRNN")
super(BlockGuardWithCompletion, self).__init__(rnn.helper.main_program)
self.rnn = rnn
def __enter__(self):
self.rnn.status = StaticRNN.IN_RNN_BLOCK
return super(BlockGuardWithCompletion, self).__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
return False
self.rnn.status = StaticRNN.AFTER_RNN_BLOCK
self.rnn._complete_op()
return super(BlockGuardWithCompletion, self).__exit__(exc_type, exc_val,
exc_tb)
class StaticRNNMemoryLink(object):
"""
StaticRNNMemoryLink class.
StaticRNNMemoryLink class is used to create a link between two
memory cells of a StaticRNN.
NOTE: This is a internal data structure of a very low-level API.
Please use StaticRNN instead.
Args:
init(Variable): the initial variable for Memory.
pre_mem(Variable): the memory variable in previous time step.
mem(Variable): the memory variable in current time step.
"""
def __init__(self, init, pre_mem, mem=None):
self.init = init
self.pre_mem = pre_mem
self.mem = mem
class StaticRNN(object):
"""
StaticRNN class.
The StaticRNN can process a batch of sequence data. The first dimension of inputs
represents sequence length, the length of each input sequence must be equal.
StaticRNN will unfold sequence into time steps, user needs to define how to process
each time step during the :code:`with` step.
Args:
name (str, optional): Please refer to :ref:`api_guide_Name`, Default None.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
vocab_size, hidden_size=10000, 200
x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64')
# create word sequence
x_emb = layers.embedding(
input=x,
size=[vocab_size, hidden_size],
dtype='float32',
is_sparse=False)
# transform batch size to dim 1
x_emb = layers.transpose(x_emb, perm=[1, 0, 2])
rnn = fluid.layers.StaticRNN()
with rnn.step():
# mark created x_emb as input, each step process a word
word = rnn.step_input(x_emb)
# create prev memory parameter, batch size comes from word
prev = rnn.memory(shape=[-1, hidden_size], batch_ref = word)
hidden = fluid.layers.fc(input=[word, prev], size=hidden_size, act='relu')
# use hidden to update prev
rnn.update_memory(prev, hidden)
# mark hidden as output
rnn.step_output(hidden)
# get StaticrNN final output
result = rnn()
"""
BEFORE_RNN_BLOCK = 0
IN_RNN_BLOCK = 1
AFTER_RNN_BLOCK = 2
def __init__(self, name=None):
self.helper = LayerHelper("static_rnn", name=name)
self.memories = {} # memory map, from pre_mem.name --> MemoryLink
self.inputs = [] # input variable list in current block
self.outputs = [] # output variable list in parent block
self.status = StaticRNN.BEFORE_RNN_BLOCK # status flag.
# sequence length, since it is a static RNN, sequence length are fixed.
self.seq_len = None
def step(self):
"""
Define operators in each step. step is used in :code:`with` block, OP in :code:`with` block
will be executed sequence_len times (sequence_len is the length of input)
"""
return BlockGuardWithCompletion(self)
def _assert_in_rnn_block_(self, method):
if self.status != StaticRNN.IN_RNN_BLOCK:
raise ValueError("You must invoke {0} in rnn block".format(method))
def memory(self,
init=None,
shape=None,
batch_ref=None,
init_value=0.0,
init_batch_dim_idx=0,
ref_batch_dim_idx=1):
"""
Create a memory variable for static rnn.
If the :code:`init` is not None, :code:`memory` will be initialized by
this Variable. If the :code:`init` is None, :code:`shape` and :code:`batch_ref`
must be set, and this function will create a new variable with shape and batch_ref
to initialize :code:`init` Variable.
Args:
init(Variable, optional): Tensor used to init memory. If it is not set,
:code:`shape` and :code:`batch_ref` must be provided.
Default: None.
shape(list|tuple): When :code:`init` is None use this arg to initialize memory shape.
NOTE the shape does not contain batch_size. Default: None.
batch_ref(Variable, optional): When :code:`init` is None, memory's batch size will
be set as batch_ref's ref_batch_dim_idx value. Default: None.
init_value(float, optional): When :code:`init` is None, used to init memory's value. Default: 0.0.
init_batch_dim_idx(int, optional): the batch_size axis of the :code:`init` Variable. Default: 0.
ref_batch_dim_idx(int, optional): the batch_size axis of the :code:`batch_ref` Variable. Default: 1.
Returns:
Variable: The memory variable.
Examples 1:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
vocab_size, hidden_size=10000, 200
x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64')
# create word sequence
x_emb = layers.embedding(
input=x,
size=[vocab_size, hidden_size],
dtype='float32',
is_sparse=False)
# transform batch size to dim 1
x_emb = layers.transpose(x_emb, perm=[1, 0, 2])
rnn = fluid.layers.StaticRNN()
with rnn.step():
# mark created x_emb as input, each step process a word
word = rnn.step_input(x_emb)
# create prev memory parameter, batch size comes from word
prev = rnn.memory(shape=[-1, hidden_size], batch_ref = word)
hidden = fluid.layers.fc(input=[word, prev], size=hidden_size, act='relu')
# use hidden to update prev
rnn.update_memory(prev, hidden)
Examples 2:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
vocab_size, hidden_size=10000, 200
x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64')
# create word sequence
x_emb = layers.embedding(
input=x,
size=[vocab_size, hidden_size],
dtype='float32',
is_sparse=False)
# transform batch size to dim 1
x_emb = layers.transpose(x_emb, perm=[1, 0, 2])
boot_memory = fluid.layers.data(name='boot', shape=[hidden_size], dtype='float32', lod_level=1)
rnn = fluid.layers.StaticRNN()
with rnn.step():
# mark created x_emb as input, each step process a word
word = rnn.step_input(x_emb)
# init memory
prev = rnn.memory(init=boot_memory)
hidden = fluid.layers.fc(input=[word, prev], size=hidden_size, act='relu')
# update hidden with prev
rnn.update_memory(prev, hidden)
"""
self._assert_in_rnn_block_('memory')
if init is None:
if shape is None or batch_ref is None:
raise ValueError(
"if init is None, memory at least need shape and batch_ref")
parent_block = self._parent_block()
var_name = unique_name.generate_with_ignorable_key("@".join(
[self.helper.name, "memory_boot"]))
boot_var = parent_block.create_var(
name=var_name,
shape=shape,
dtype=batch_ref.dtype,
persistable=False)
parent_block.append_op(
type="fill_constant_batch_size_like",
inputs={'Input': [batch_ref]},
outputs={'Out': [boot_var]},
attrs={
'value': init_value,
'shape': boot_var.shape,
'dtype': boot_var.dtype,
'input_dim_idx': ref_batch_dim_idx,
'output_dim_idx': init_batch_dim_idx
})
return self.memory(init=boot_var)
else:
pre_mem = self.helper.create_variable(
name=unique_name.generate_with_ignorable_key("@".join(
[self.helper.name, "mem"])),
dtype=init.dtype,
shape=init.shape)
self.memories[pre_mem.name] = StaticRNNMemoryLink(
init=init, pre_mem=pre_mem)
return pre_mem
def step_input(self, x):
"""
Mark a sequence as a StaticRNN input.
Args:
x(Variable): The input sequence, the shape of x
should be [seq_len, ...].
Returns:
Variable: The current time step data in the input sequence.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
vocab_size, hidden_size=10000, 200
x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64')
# create word sequence
x_emb = layers.embedding(
input=x,
size=[vocab_size, hidden_size],
dtype='float32',
is_sparse=False)
# transform batch size to dim 1
x_emb = layers.transpose(x_emb, perm=[1, 0, 2])
rnn = fluid.layers.StaticRNN()
with rnn.step():
# mark created x_emb as input, each step process a word
word = rnn.step_input(x_emb)
# create prev memory parameter, batch size comes from word
prev = rnn.memory(shape=[-1, hidden_size], batch_ref = word)
hidden = fluid.layers.fc(input=[word, prev], size=hidden_size, act='relu')
# use hidden to update prev
rnn.update_memory(prev, hidden)
"""
self._assert_in_rnn_block_('step_input')
if not isinstance(x, Variable):
raise TypeError("step input takes a Variable")
if self.seq_len is None:
self.seq_len = x.shape[0]
elif x.shape[0] != -1 and self.seq_len != x.shape[0]:
raise ValueError("Static RNN only take fix seq_len input")
ipt = self.helper.create_variable(
name=x.name, dtype=x.dtype, shape=list(x.shape[1:]), type=x.type)
self.inputs.append(ipt)
return ipt
def step_output(self, o):
"""
Mark a sequence as a StaticRNN output.
Args:
o(Variable): The output sequence.
Returns:
None.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
vocab_size, hidden_size=10000, 200
x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64')
# create word sequence
x_emb = layers.embedding(
input=x,
size=[vocab_size, hidden_size],
dtype='float32',
is_sparse=False)
# transform batch size to dim 1
x_emb = layers.transpose(x_emb, perm=[1, 0, 2])
rnn = fluid.layers.StaticRNN()
with rnn.step():
# mark created x_emb as input, each step process a word
word = rnn.step_input(x_emb)
# create prev memory parameter, batch size comes from word
prev = rnn.memory(shape=[-1, hidden_size], batch_ref = word)
hidden = fluid.layers.fc(input=[word, prev], size=hidden_size, act='relu')
# use hidden to update prev
rnn.update_memory(prev, hidden)
rnn.step_output(hidden)
result = rnn()
"""
self._assert_in_rnn_block_('step_output')
if not isinstance(o, Variable):
raise TypeError("step output takes a Variable")
tmp_o = self.helper.create_variable_for_type_inference(dtype=o.dtype)
self.helper.append_op(
type='rnn_memory_helper',
inputs={'X': [o]},
outputs={'Out': tmp_o},
attrs={'dtype': o.dtype})
out_var = self._parent_block().create_var(
name=tmp_o.name,
shape=[self.seq_len] + list(tmp_o.shape),
dtype=tmp_o.dtype)
self.outputs.append(out_var)
def output(self, *outputs):
"""
Mark the StaticRNN output variables.
Args:
outputs: The output Tensor, can mark multiple variables as output
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
vocab_size, hidden_size=10000, 200
x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64')
# create word sequence
x_emb = layers.embedding(
input=x,
size=[vocab_size, hidden_size],
dtype='float32',
is_sparse=False)
# transform batch size to dim 1
x_emb = layers.transpose(x_emb, perm=[1, 0, 2])
rnn = fluid.layers.StaticRNN()
with rnn.step():
# mark created x_emb as input, each step process a word
word = rnn.step_input(x_emb)
# create prev memory parameter, batch size comes from word
prev = rnn.memory(shape=[-1, hidden_size], batch_ref = word)
hidden = fluid.layers.fc(input=[word, prev], size=hidden_size, act='relu')
# use hidden to update prev
rnn.update_memory(prev, hidden)
# mark each step's hidden and word as output
rnn.output(hidden, word)
result = rnn()
"""
for each in outputs:
self.step_output(each)
def update_memory(self, mem, var):
"""
Update the memory from :code:`mem` to :code:`var`.
Args:
mem(Variable): the memory variable.
var(Variable): the plain variable generated in RNN block, used to update memory.
var and mem should hava same dims and data type.
Returns:
None
"""
if not isinstance(mem, Variable) or not isinstance(var, Variable):
raise TypeError("update memory should take variables")
self.memories[mem.name].mem = var
def _parent_block(self):
prog = self.helper.main_program
parent_idx = prog.current_block().parent_idx
assert parent_idx >= 0
parent_block = prog.block(parent_idx)
return parent_block
def __call__(self, *args, **kwargs):
if self.status != StaticRNN.AFTER_RNN_BLOCK:
raise ValueError("RNN output can only be retrieved after rnn block")
if len(self.outputs) == 0:
raise ValueError("RNN has no output")
elif len(self.outputs) == 1:
return self.outputs[0]
else:
return self.outputs
def _complete_op(self):
main_program = self.helper.main_program
rnn_block = main_program.current_block()
parent_block = self._parent_block()
local_inputs = set()
for op in rnn_block.ops:
assert isinstance(op, Operator)
for oname in op.output_names:
for out_var_name in op.output(oname):
local_inputs.add(out_var_name)
for var in self.inputs:
local_inputs.add(var.name)
for m in self.memories:
local_inputs.add(m)
# NOTE(zcd): the params have two categories of variables.
# - the variables that are the out of StaticRnn.
# - the variables that are the parameters of some layers, for example, conv2d.
params = list()
for op in rnn_block.ops:
assert isinstance(op, Operator)
for iname in op.input_names:
for in_var_name in op.input(iname):
if in_var_name not in local_inputs:
params.append(in_var_name)
parameters = [parent_block.var(name) for name in set(params)]
step_scope = parent_block.create_var(
type=core.VarDesc.VarType.STEP_SCOPES)
inlinks = [parent_block.var(i.name) for i in self.inputs]
outlinks = self.outputs
# NOTE(zcd): the states maybe empty in some case.
boot_memories = []
pre_memories = []
memories = []
for _, mem in six.iteritems(self.memories):
boot_memories.append(mem.init)
pre_memories.append(mem.pre_mem.name)
assert mem.mem is not None, "%s should be updated in every step." % (
mem.init.name)
mem_var = rnn_block.var(mem.mem.name)
assert isinstance(mem_var, Variable)
new_mem = self.helper.create_variable_for_type_inference(
dtype=mem_var.dtype)
rnn_block.append_op(
type='rnn_memory_helper',
inputs={'X': [mem_var]},
outputs={'Out': [new_mem]},
attrs={'dtype': mem_var.dtype})
memories.append(new_mem.name)
parent_block.append_op(
type='recurrent',
inputs={
'inputs': inlinks,
'initial_states': boot_memories,
'parameters': parameters
},
outputs={'outputs': outlinks,
'step_scopes': [step_scope]},
attrs={
'has_states': len(pre_memories) > 0,
'ex_states': pre_memories,
'states': memories,
'sub_block': rnn_block
})
class WhileGuard(BlockGuard):
def __init__(self, while_op):
if not isinstance(while_op, While):
raise TypeError("WhileGuard takes a while op")
super(WhileGuard, self).__init__(while_op.helper.main_program)
self.while_op = while_op
def __enter__(self):
self.while_op.status = While.IN_WHILE_BLOCK
return super(WhileGuard, self).__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
return False
self.while_op.status = While.AFTER_WHILE_BLOCK
self.while_op._complete()
return super(WhileGuard, self).__exit__(exc_type, exc_val, exc_tb)
class While(object):
"""
while loop control flow. Repeat while body until cond is False.
Note:
A new OP :ref:`api_fluid_layers_while_loop` is highly recommended instead of ``While`` if the shape of parameter ``cond`` is [1].
OP :ref:`api_fluid_layers_while_loop` is easier to use and is called with less code but does the same thing as ``While`` .
Args:
cond(Variable): A Tensor whose data type is bool controlling whether to continue looping.
is_test(bool, optional): A flag indicating whether execution is in test phase. Default value is False.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0) # loop counter
loop_len = fluid.layers.fill_constant(shape=[1],dtype='int64', value=10) # loop length
cond = fluid.layers.less_than(x=i, y=loop_len)
while_op = fluid.layers.While(cond=cond)
with while_op.block():
i = fluid.layers.increment(x=i, value=1, in_place=True)
fluid.layers.less_than(x=i, y=loop_len, cond=cond)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
res = exe.run(fluid.default_main_program(), feed={}, fetch_list=[i])
print(res) # [array([10])]
"""
BEFORE_WHILE_BLOCK = 0
IN_WHILE_BLOCK = 1
AFTER_WHILE_BLOCK = 2
def __init__(self, cond, is_test=False, name=None):
self.helper = LayerHelper("while", name=name)
self.status = While.BEFORE_WHILE_BLOCK
if not isinstance(cond, Variable):
raise TypeError("condition should be a variable")
assert isinstance(cond, Variable)
if cond.dtype != core.VarDesc.VarType.BOOL:
raise TypeError("condition should be a boolean variable")
if reduce(lambda a, b: a * b, cond.shape, 1) != 1:
raise TypeError(
"condition expected shape as [], but given shape as {0}.".
format(list(cond.shape)))
self.cond_var = cond
self.is_test = is_test
def block(self):
return WhileGuard(self)
def _complete(self):
main_program = self.helper.main_program
while_block = main_program.current_block()
parent_block = main_program.block(main_program.current_block()
.parent_idx)
inner_outputs = {self.cond_var.name}
x_name_list = set()
for op in while_block.ops:
for iname in op.input_names:
for in_var_name in op.input(iname):
if in_var_name not in inner_outputs:
x_name_list.add(in_var_name)
for oname in op.output_names:
for out_var_name in op.output(oname):
inner_outputs.add(out_var_name)
out_vars = []
for inner_out_name in inner_outputs:
inner_var = parent_block._find_var_recursive(inner_out_name)
if inner_var:
out_vars.append(inner_var)
step_scope = parent_block.create_var(
type=core.VarDesc.VarType.STEP_SCOPES)
parent_block.append_op(
type='while',
inputs={
'X': [
parent_block._var_recursive(x_name)
for x_name in x_name_list
],
'Condition': [self.cond_var]
},
outputs={'Out': out_vars,
'StepScopes': [step_scope]},
attrs={'sub_block': while_block,
"is_test": self.is_test})
def while_loop(cond, body, loop_vars, is_test=False, name=None):
"""
while_loop is one of the control flows. Repeats while_loop `body` until `cond` returns False.
Args:
cond(Callable): A callable returning a boolean tensor controlling whether to continue looping.
body(Callable): A callable returning a tuple or list of tensors of the same arity (length and structure)
and types as ``loops_vars`` .
loop_vars(list|tuple): A list or tuple of tensors that is passed to both ``cond`` and ``body`` .
is_test(bool, optional): A flag indicating whether execution is in test phase. Default value is False.
name(str, optional): Normally there is no need for users to set this property. For more information, please
refer to :ref:`api_guide_Name`. Default is None.
Returns:
A list or tuple of tensors which returned by ``body`` .
Returen type:
list(Variable)|tuple(Variable).
Raises:
TypeError: If the type of ``cond`` is not callable.
TypeError: If the type of ``body`` is not callable.
TypeError: If the type of ``loop_vars`` is not list or tuple.
TypeError: If the type of ``cond`` returns is not Variable.
TypeError: If the type of ``cond`` returns is not a boolean variable.
TypeError: If the shape of ``cond`` returns is not equals 1.
ValueError: If the ``var_loops`` is empty.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
def cond(i):
return layers.less_than(i, ten)
def body(i):
return layers.increment(x=i, value=1, in_place=True)
main_program = fluid.default_main_program()
startup_program = fluid.default_startup_program()
with fluid.program_guard(main_program, startup_program):
i = layers.fill_constant(shape=[1], dtype='int64', value=0) # loop counter
ten = layers.fill_constant(shape=[1], dtype='int64', value=10) # loop length
out = layers.while_loop(cond, body, [i])
exe = fluid.Executor(fluid.CPUPlace())
res = exe.run(main_program, feed={}, fetch_list=out)
print(res) # [array([10])]
"""
helper = LayerHelper('while_loop', **locals())
if not callable(cond):
raise TypeError("cond in while_loop should be callable")
if not callable(body):
raise TypeError("body in while_loop should be callable")
if not isinstance(loop_vars, (list, tuple)):
raise TypeError("loop_vars in while_loop should be a list or tuple")
if len(loop_vars) == 0:
raise ValueError("loop_vars in while_loop should not be empty")
pre_cond = cond(*loop_vars)
if not isinstance(pre_cond, Variable):
raise TypeError("cond in while_loop should return a variable")
if pre_cond.dtype != core.VarDesc.VarType.BOOL:
raise TypeError("cond in while_loop should return a boolean variable")
if reduce(lambda a, b: a * b, pre_cond.shape, 1) != 1:
raise TypeError(
"the shape of the variable returned by cond should be [],"
"but given shape as {0}.".format(list(pre_cond.shape)))
while_loop_block = While(pre_cond, is_test, name)
with while_loop_block.block():
output_vars = body(*loop_vars)
if len(loop_vars) == 1:
assign(output_vars, loop_vars[0])
now_cond = cond(output_vars)
else:
for i in range(len(output_vars)):
assign(output_vars[i], loop_vars[i])
now_cond = cond(*output_vars)
assign(now_cond, pre_cond)
return loop_vars
def lod_rank_table(x, level=0):
"""
LoD Rank Table Operator. Given an input variable **x** and a level number
of LoD, this layer creates a LodRankTable object. A LoDRankTable object
contains a list of bi-element tuples. Each tuple consists of an index and
a length, both of which are int type. Refering to specified level of LoD,
the index is the sequence index number and the length representes the
sequence length. Please note that the list is ranked in descending order by
the length. The following is an example:
.. code-block:: text
x is a LoDTensor:
x.lod = [[2, 1],
[5, 1, 1]]
x.data = [a, b, c, d, e, f, g]
1. set level to 0:
Create lod rank table:
lod_rank_table_obj = lod_rank_table(x, level=0)
Get:
lod_rank_table_obj.items() = [(0, 2), (1, 1)]
2. set level to 1:
Create lod rank table:
lod_rank_table_obj = lod_rank_table(x, level=1)
Get:
lod_rank_table_obj.items() = [(0, 5), (1, 1), (2, 1)]
Args:
x (Variable): Input variable, a LoDTensor based which to create the lod
rank table.
level (int): Specify the LoD level, on which to create the lod rank
table.
Returns:
Variable: The created LoDRankTable object.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[10],
dtype='float32', lod_level=1)
out = layers.lod_rank_table(x=x, level=0)
"""
helper = LayerHelper("lod_rank_table", **locals())
table = helper.create_variable(
type=core.VarDesc.VarType.LOD_RANK_TABLE,
name=unique_name.generate("lod_rank_table"))
helper.append_op(
type='lod_rank_table',
inputs={'X': x},
outputs={'Out': table},
attrs={'level': level})
return table
@templatedoc()
def max_sequence_len(rank_table):
"""
${comment}
>>> import paddle.fluid as fluid
>>> x = fluid.layers.data(name='x', shape=[10], dtype='float32',
>>> lod_level=1)
>>> rank_table = layers.lod_rank_table(x=x, level=0)
>>> max_seq_len = layers.max_sequence_len(rank_table)
Args:
rank_table(${rank_table_type}): ${rank_table_comment}.
Returns:
${out_comment}.
"""
helper = LayerHelper("max_seqence_len", **locals())
res = helper.create_variable_for_type_inference(dtype="int64")
helper.append_op(
type="max_sequence_len",
inputs={"RankTable": rank_table},
outputs={"Out": res})
return res
def lod_tensor_to_array(x, table):
"""
Convert a LoDTensor to a LoDTensorArray.
This function split a LoDTesnor to a LoDTensorArray according to its LoD
information. LoDTensorArray is an alias of C++ std::vector<LoDTensor> in
PaddlePaddle. The generated LoDTensorArray of this function can be further read
or written by `read_from_array()` and `write_to_array()` operators. However,
this function is generally an internal component of PaddlePaddle `DynamicRNN`.
Users should not use it directly.
Args:
x (Variable|list): The LoDTensor to be converted to a LoDTensorArray.
table (ParamAttr|list): The variable that stores the level of lod
which is ordered by sequence length in
descending order. It is generally generated
by `layers.lod_rank_table()` API.
Returns:
Variable: The LoDTensorArray that has been converted from the input tensor.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[10])
table = fluid.layers.lod_rank_table(x, level=0)
array = fluid.layers.lod_tensor_to_array(x, table)
"""
helper = LayerHelper("lod_tensor_to_array", **locals())
array = helper.create_variable(
name=unique_name.generate("lod_tensor_to_array"),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=x.dtype)
helper.append_op(
type='lod_tensor_to_array',
inputs={'X': x,
'RankTable': table},
outputs={'Out': array})
return array
def array_to_lod_tensor(x, table):
"""Convert a LoD_Tensor_Aarry to an LoDTensor.
Args:
x (Variable|list): The lod tensor array to be converted to a tensor.
table (ParamAttr|list): The variable that stores the level of lod
which is ordered by sequence length in
descending order.
Returns:
Variable: The variable of type tensor that has been converted
from an array.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[10])
table = fluid.layers.lod_rank_table(x, level=0)
array = fluid.layers.lod_tensor_to_array(x, table)
lod_tensor = fluid.layers.array_to_lod_tensor(array, table)
"""
helper = LayerHelper("array_to_lod_tensor", **locals())
tmp = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="array_to_lod_tensor",
inputs={'X': x,
'RankTable': table},
outputs={'Out': tmp})
return tmp
def increment(x, value=1.0, in_place=True):
"""
The OP is usually used for control flow to increment the data of :attr:`x` by an amount :attr:`value`.
Notice that the number of elements in :attr:`x` must be equal to 1.
Parameters:
x (Variable): A tensor that must alway contain only one element, its data type supports
float32, float64, int32 and int64.
value (float, optional): The amount to increment the data of :attr:`x`. Default: 1.0.
in_place (bool, optional): Whether the OP should be performed in-place. Default: True.
Returns:
Variable: The elementwise-incremented tensor with the same shape and data type as :attr:`x`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
counter = fluid.layers.zeros(shape=[1], dtype='float32') # [0.]
fluid.layers.increment(counter) # [1.]
"""
helper = LayerHelper("increment", **locals())
if not in_place:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = x
helper.append_op(
type='increment',
inputs={'X': [x]},
outputs={'Out': [out]},
attrs={'step': float(value)})
return out
def array_write(x, i, array=None):
"""
This OP writes the input ``x`` into the i-th position of the ``array``
:ref:`api_fluid_LoDTensorArray` and returns the modified array.
If ``array`` is none, a new LoDTensorArray will be created and returned.
This OP is often used together with :ref:`api_fluid_layers_array_read` OP.
Args:
x (Variable): The input data to be written into array. It's multi-dimensional
Tensor or LoDTensor. Data type: float32, float64, int32, int64.
i (Variable): 1-D Tensor with shape [1], which represents the position into which
``x`` is written. Data type: int64.
array (LoDTensorArray, optional): The LoDTensorArray into which ``x`` is written.
The default value is None, when a new LoDTensorArray will be created and returned
as a result.
Returns:
Variable: The input ``array`` after ``x`` is written into.
Examples:
.. code-block:: python
import paddle.fluid as fluid
tmp = fluid.layers.fill_constant(shape=[3, 2], dtype='int64', value=5)
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10)
# Write tmp into the position of arr with subscript 10 and return arr.
arr = fluid.layers.array_write(tmp, i=i)
# Now, arr is a LoDTensorArray with length 11. We can use array_read OP to read
# the data at subscript 10 and print it out.
item = fluid.layers.array_read(arr, i=i)
input = fluid.layers.Print(item, message="The content of i-th LoDTensor:")
main_program = fluid.default_main_program()
exe = fluid.Executor(fluid.CPUPlace())
exe.run(main_program)
# The printed result is:
# 1570533133 The content of i-th LoDTensor: The place is:CPUPlace
# Tensor[array_read_0.tmp_0]
# shape: [3,2,]
# dtype: l
# data: 5,5,5,5,5,5,
# the output is 2-D Tensor with shape [3,2], which is tmp above.
# dtype is the corresponding C++ data type, which may vary in different environments.
# Eg: if the data type of tensor is int64, then the corresponding C++ data type is int64_t,
# so the dtype value is typeid(int64_t).Name(), which is 'x' on MacOS, 'l' on Linux,
# and '__int64' on Windows. They both represent 64-bit integer variables.
"""
helper = LayerHelper('array_write', **locals())
if array is None:
array = helper.create_variable(
name="{0}.out".format(helper.name),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=x.dtype)
helper.append_op(
type='write_to_array',
inputs={'X': [x],
'I': [i]},
outputs={'Out': [array]})
return array
def create_array(dtype):
"""
This OP creates an LOD_TENSOR_ARRAY. It is used as
the input of :ref:`api_fluid_layers_array_read` and
:ref:`api_fluid_layers_array_write`. Also it can be used
with :ref:`api_fluid_layers_While` to create RNN network.
Args:
dtype (str): The data type of the elements in the lod_tensor_array.
Support data type: float32, float64, int32, int64.
Returns:
Variable: The empty lod_tensor_array. The data type of elements in Tensor is ``dtype``.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.layers.create_array(dtype='float32') # Create a float32 LoDTensorArray.
"""
helper = LayerHelper("array", **locals())
return helper.create_variable(
name="{0}.out".format(helper.name),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=dtype)
@templatedoc()
def less_than(x, y, force_cpu=None, cond=None):
"""
${comment}
Args:
x(${x_type}): ${x_comment}.
y(${y_type}): ${y_comment}.
force_cpu(${force_cpu_type}): ${force_cpu_comment}.
cond(Variable|None): Optional output variable to store the result of *less_than*
Returns:
${out_comment}.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
# Graph Organizing
x = fluid.layers.data(name='x', shape=[2], dtype='float64')
y = fluid.layers.data(name='y', shape=[2], dtype='float64')
result = fluid.layers.less_than(x=x, y=y)
# The comment lists another available method.
# result = fluid.layers.fill_constant(shape=[2], dtype='float64', value=0)
# fluid.layers.less_than(x=x, y=y, cond=result)
# Create an executor using CPU as example
exe = fluid.Executor(fluid.CPUPlace())
# Execute
x_i = np.array([[1, 2], [3, 4]]).astype(np.float64)
y_i = np.array([[2, 2], [1, 3]]).astype(np.float64)
result_value, = exe.run(fluid.default_main_program(), feed={'x':x_i, 'y':y_i}, fetch_list=[result])
print(result_value) # [[True, False], [False, False]]
"""
helper = LayerHelper("less_than", **locals())
if cond is None:
cond = helper.create_variable_for_type_inference(dtype='bool')
cond.stop_gradient = True
attrs = dict()
if force_cpu is not None:
attrs['force_cpu'] = force_cpu
elif force_init_on_cpu():
attrs['force_cpu'] = force_init_on_cpu()
helper.append_op(
type='less_than',
inputs={'X': [x],
'Y': [y]},
outputs={'Out': [cond]},
attrs=attrs)
return cond
@templatedoc()
def less_equal(x, y, cond=None):
"""
This OP returns the truth value of :math:`x <= y` elementwise, which is equivalent function to the overloaded operator `<=`.
Args:
x(Variable): First input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
y(Variable): Second input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
cond(Variable, optional): If is :attr:`None`, the op will create a variable as output tensor, the input shape and data type of \
this tensor is the same as input :attr:`x`. If is not :attr:`None`, the op will set the variable as output tensor, the input shape \
and data type of this tensor should be the same as input :attr:`x`. Default value is :attr:`None`.
Returns:
Variable, the output data type is bool.: The tensor variable storing the output, the output shape is the same as input :attr:`x`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
label = fluid.layers.assign(np.array([1, 3], dtype='int32'))
limit = fluid.layers.assign(np.array([1, 2], dtype='int32'))
out = fluid.layers.less_equal(x=label, y=limit) #out=[True, False]
out1 = label<= limit #out1=[True, False]
"""
helper = LayerHelper("less_equal", **locals())
if cond is None:
cond = helper.create_variable_for_type_inference(dtype='bool')
cond.stop_gradient = True
attrs = dict()
if force_init_on_cpu():
attrs['force_cpu'] = force_init_on_cpu()
helper.append_op(
type='less_equal',
inputs={'X': [x],
'Y': [y]},
outputs={'Out': [cond]},
attrs=attrs)
return cond
@templatedoc()
def greater_than(x, y, cond=None):
"""
This OP returns the truth value of :math:`x > y` elementwise, which is equivalent function to the overloaded operator `>`.
Args:
x(Variable): First input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
y(Variable): Second input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
cond(Variable, optional): If is :attr:`None`, the op will create a variable as output tensor, the shape and data type of this \
tensor is the same as input :attr:`x` . If is not :attr:`None`, the op will set the variable as output tensor, the shape and data type \
of this tensor should be the same as input :attr:`x` . Default value is :attr:`None`.
Returns:
Variable, the output data type is bool.: The tensor variable storing the output, the output shape is the same as input :attr:`x` .
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
label = fluid.layers.assign(np.array([2, 3], dtype='int32'))
limit = fluid.layers.assign(np.array([3, 2], dtype='int32'))
out = fluid.layers.greater_than(x=label, y=limit) #out=[False, True]
out1 = label > limit #out1=[False, True]
"""
helper = LayerHelper("greater_than", **locals())
if cond is None:
cond = helper.create_variable_for_type_inference(dtype='bool')
cond.stop_gradient = True
attrs = dict()
if force_init_on_cpu():
attrs['force_cpu'] = force_init_on_cpu()
helper.append_op(
type='greater_than',
inputs={'X': [x],
'Y': [y]},
outputs={'Out': [cond]},
attrs=attrs)
return cond
@templatedoc()
def greater_equal(x, y, cond=None):
"""
This OP returns the truth value of :math:`x >= y` elementwise, which is equivalent function to the overloaded operator `>=`.
Args:
x(Variable): First input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
y(Variable): Second input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
cond(Variable, optional): If is :attr:`None` , the op will create a variable as output tensor, the shape and data type of this \
tensor is the same as input :attr:`x`. If is not :attr:`None` , the op will set the variable as output tensor, the shape and data \
type of this tensor is the same as input :attr:`x`. Default value is :attr:`None`.
Returns:
Variable, the output data type is bool.: The tensor variable storing the output, the output shape is the same as input :attr:`x`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
label = fluid.layers.assign(np.array([2, 2], dtype='int32'))
limit = fluid.layers.assign(np.array([2, 3], dtype='int32'))
out = fluid.layers.greater_equal(x=label, y=limit) #out=[True, False]
out_1 = label >= limit #out1=[True, False]
"""
helper = LayerHelper("greater_equal", **locals())
if cond is None:
cond = helper.create_variable_for_type_inference(dtype='bool')
cond.stop_gradient = True
attrs = dict()
if force_init_on_cpu():
attrs['force_cpu'] = force_init_on_cpu()
helper.append_op(
type='greater_equal',
inputs={'X': [x],
'Y': [y]},
outputs={'Out': [cond]},
attrs=attrs)
return cond
def equal(x, y, cond=None):
"""
This layer returns the truth value of :math:`x == y` elementwise.
Args:
x(Variable): Tensor, data type is float32, float64, int32, int64.
y(Variable): Tensor, data type is float32, float64, int32, int64.
cond(Variable, optional): Optional output which can be any created
Variable that meets the requirements to store the result of *equal*.
if cond is None, a new Varibale will be created to store the result.
Returns:
Variable: output Tensor, it's shape is the same as the input's Tensor,
and the data type is bool.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
out_cond =fluid.data(name="input1", shape=[2], dtype='bool')
label = fluid.layers.assign(np.array([3, 3], dtype="int32"))
limit = fluid.layers.assign(np.array([3, 2], dtype="int32"))
label_cond = fluid.layers.assign(np.array([1, 2], dtype="int32"))
out1 = fluid.layers.equal(x=label,y=limit) #out1=[True, False]
out2 = fluid.layers.equal(x=label_cond,y=limit, cond=out_cond) #out2=[False, True] out_cond=[False, True]
"""
helper = LayerHelper("equal", **locals())
if cond is None:
cond = helper.create_variable_for_type_inference(dtype='bool')
cond.stop_gradient = True
helper.append_op(
type='equal', inputs={'X': [x],
'Y': [y]}, outputs={'Out': [cond]})
return cond
def not_equal(x, y, cond=None):
"""
This OP returns the truth value of :math:`x != y` elementwise, which is equivalent function to the overloaded operator `!=`.
Args:
x(Variable): First input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
y(Variable): Second input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
cond(Variable, optional): If is :attr:`None`, the op will create a variable as output tensor, the shape and data type of this \
tensor is the same as input :attr:`x`. If is not :attr:`None`, the op will set the variable as output tensor, the shape and data \
type of this tensor should be the same as input :attr:`x`. Default value is :attr:`None`.
Returns:
Variable, the output data type is bool.: The tensor variable storing the output, the output shape is the same as input :attr:`x`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
limit = fluid.layers.fill_constant(shape=[1], value=1, dtype='int64')
out = fluid.layers.not_equal(x=label, y=limit)
"""
helper = LayerHelper("not_equal", **locals())
if cond is None:
cond = helper.create_variable_for_type_inference(dtype='bool')
cond.stop_gradient = True
helper.append_op(
type='not_equal', inputs={'X': [x],
'Y': [y]}, outputs={'Out': [cond]})
return cond
def array_read(array, i):
"""
This OP is used to read data at the specified position from the input array
:ref:`api_fluid_LoDTensorArray` . ``array`` is the input array and ``i``
is the specified read position. This OP is often used together with
:ref:`api_fluid_layers_array_write` OP.
Case 1:
::
Input:
The shape of first three tensors are [1], and that of the last one is [1,2]:
array = ([0.6], [0.1], [0.3], [0.4, 0.2])
And:
i = [3]
Output:
output = [0.4, 0.2]
Args:
array (LoDTensorArray): The input LoDTensorArray.
i (Variable): 1-D Tensor, whose shape is [1] and dtype is int64. It represents the
specified read position of ``array``.
Returns:
Variable: The LoDTensor or Tensor that is read at the specified position of ``array``.
Examples:
.. code-block:: python
# First we're going to create a LoDTensorArray, then we're going to write the Tensor into
# the specified position, and finally we're going to read the Tensor at that position.
import paddle.fluid as fluid
arr = fluid.layers.create_array(dtype='float32')
tmp = fluid.layers.fill_constant(shape=[3, 2], dtype='int64', value=5)
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10)
# tmp is the Tensor with shape [3,2], and if we write it into the position with subscript 10
# of the empty-array: arr, then the length of arr becomes 11.
arr = fluid.layers.array_write(tmp, i, array=arr)
# Read the data of the position with subscript 10.
item = fluid.layers.array_read(arr, i)
# You can print out the data via executor.
input = fluid.layers.Print(item, message="The LoDTensor of the i-th position:")
main_program = fluid.default_main_program()
exe = fluid.Executor(fluid.CPUPlace())
exe.run(main_program)
# The printed result is:
# 1569588169 The LoDTensor of the i-th position: The place is:CPUPlace
# Tensor[array_read_0.tmp_0]
# shape: [3,2,]
# dtype: l
# data: 5,5,5,5,5,5,
# the output is 2-D Tensor with shape [3,2].
# dtype is the corresponding C++ data type, which may vary in different environments.
# Eg: if the data type of tensor is int64, then the corresponding C++ data type is int64_t,
# so the dtype value is typeid(int64_t).Name(), which is 'x' on MacOS, 'l' on Linux,
# and '__int64' on Windows. They both represent 64-bit integer variables.
"""
helper = LayerHelper('array_read', **locals())
if not isinstance(
array,
Variable) or array.type != core.VarDesc.VarType.LOD_TENSOR_ARRAY:
raise TypeError("array should be tensor array vairable")
out = helper.create_variable_for_type_inference(dtype=array.dtype)
helper.append_op(
type='read_from_array',
inputs={'X': [array],
'I': [i]},
outputs={'Out': [out]})
return out
def shrink_memory(x, i, table):
"""
This function creates an operator to shrink rnn memory using the RankTable
as mentioned in the input parameter.
NOTE: This API is very low-level API. It is used by DynamicRNN only.
Since the Dynamic RNN uses no-padding way to implement RNN. The sequence
will be sorted by order, and the length of valid memory will be shrink after
each time step.
Args:
x(Variable): The memory object in the previous time step.
i(Variable): The step count variable. A int scalar as LoDTensor.
table(Variable): The RNNRankTable object.
Returns:
the memory variable after shrink.
Examples:
Since this API is very low level API. The example is not provided.
Please reference the implementation of class DynamicRNN for detail
usage.
"""
helper = LayerHelper('shrink_memory', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='shrink_rnn_memory',
inputs={'X': [x],
'I': [i],
'RankTable': [table]},
outputs={'Out': [out]},
attrs={})
return out
def array_length(array):
"""
This OP is used to get the length of the input array :ref:`api_fluid_LoDTensorArray` .
It can be used together with :ref:`api_fluid_layers_array_read` , :ref:`api_fluid_layers_array_write` ,
:ref:`api_fluid_layers_While` OP to traverse, read and wirte LoDTensorArray.
Args:
array (LoDTensorArray): The input array that will be used to compute the length.
Returns:
Variable: 1-D Tensor with shape [1], which is the length of array. Datatype: int64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
tmp = fluid.layers.zeros(shape=[10], dtype='int32')
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10)
# tmp is 1-D Tensor with shape [10]. We write tmp into arr on subscript 10,
# then the length of arr becomes 11.
arr = fluid.layers.array_write(tmp, i=i)
# return the length of arr
arr_len = fluid.layers.array_length(arr)
# You can use executor to print out the length of LoDTensorArray.
input = fluid.layers.Print(arr_len, message="The length of LoDTensorArray:")
main_program = fluid.default_main_program()
exe = fluid.Executor(fluid.CPUPlace())
exe.run(main_program)
# The printed result is:
# 1569576542 The length of LoDTensorArray: The place is:CPUPlace
# Tensor[array_length_0.tmp_0]
# shape: [1,]
# dtype: l
# data: 11,
# 1-D Tensor with shape [1], whose value is 11. It means that the length of LoDTensorArray
# is 11.
# dtype is the corresponding C++ data type, which may vary in different environments.
# Eg: if the data type of tensor is int64, then the corresponding C++ data type is int64_t,
# so the dtype value is typeid(int64_t).Name(), which is 'x' on MacOS, 'l' on Linux,
# and '__int64' on Windows. They both represent 64-bit integer variables.
"""
helper = LayerHelper('array_length', **locals())
tmp = helper.create_variable_for_type_inference(dtype='int64')
tmp.stop_gradient = True
helper.append_op(
type='lod_array_length', inputs={'X': [array]}, outputs={'Out': [tmp]})
return tmp
class ConditionalBlockGuard(BlockGuard):
"""
ConditionalBlockGuard is derived from BlockGuard. It is dedicated for
holding a ConditionalBlock, and helping users entering and exiting the
ConditionalBlock via Python's 'with' keyword. However, ConditionalBlockGuard
is generally an internal component of IfElse, users should not use it directly.
"""
def __init__(self, block):
if not isinstance(block, ConditionalBlock):
raise TypeError("block should be conditional block")
super(ConditionalBlockGuard, self).__init__(block.helper.main_program)
self.block = block
def __enter__(self):
return super(ConditionalBlockGuard, self).__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
self.block.complete()
return super(ConditionalBlockGuard, self).__exit__(exc_type, exc_val,
exc_tb)
class ConditionalBlock(object):
'''
**ConditionalBlock**
ConditionalBlock is an operator that bind a block to a specific condition,
if the condition matches, the corresponding block will be executed.
Args:
inputs (Variable): bool conditions.
is_scalar_condition (bool): whether the branch is controled by a scalar.
name(str): name of this ConditionalBlock.
Examples:
.. code-block:: python
import paddle.fluid as fluid
cond = layers.less_than(x=label, y=limit)
true_image, false_image = layers.split_lod_tensor(
input=image, mask=cond)
true_cond = layers.ConditionalBlock([true_image])
with true_cond.block():
...
with false_cond.block():
...
'''
def __init__(self, inputs, is_scalar_condition=False, name=None):
for each_input in inputs:
if not isinstance(each_input, Variable):
raise TypeError("Each input should be variable")
self.inputs = inputs
self.is_scalar_condition = is_scalar_condition
self.helper = LayerHelper('conditional_block', name=name)
def block(self):
return ConditionalBlockGuard(self)
def complete(self):
inside_block = self.helper.main_program.current_block()
parent_block = self.helper.main_program.block(inside_block.parent_idx)
intermediate = set()
params = set()
for each_op in inside_block.ops:
assert isinstance(each_op, Operator)
for iname in each_op.input_names:
for in_var_name in each_op.input(iname):
if in_var_name not in intermediate:
params.add(in_var_name)
for oname in each_op.output_names:
for out_var_name in each_op.output(oname):
intermediate.add(out_var_name)
input_set = set([ipt.name for ipt in self.inputs])
# Todo(liym27) Here assume that all params are in recursive parent block
# but when minimize() called in control flow, some params may be in
# conditional grad block
param_list = [
parent_block._var_recursive(each_name) for each_name in params
]
out_list = []
for inner_out_name in intermediate:
inner_var = parent_block._find_var_recursive(inner_out_name)
if inner_var:
out_list.append(inner_var)
step_scope = parent_block.create_var(
type=core.VarDesc.VarType.STEP_SCOPES)
conditional_block_op = parent_block.append_op(
type='conditional_block',
inputs={
'Cond': self.inputs,
'Input': param_list,
},
outputs={'Out': out_list,
'Scope': [step_scope]},
attrs={
'sub_block': inside_block,
'is_scalar_condition': self.is_scalar_condition
})
if self.need_append_conditional_block_grad(inside_block):
self.append_conditional_block_grad(parent_block, inside_block,
conditional_block_op)
def need_append_conditional_block_grad(self, inside_block):
grad_sub_block_idx = inside_block.backward_block_idx
return grad_sub_block_idx != -1
def append_conditional_block_grad(self, parent_block, inside_block,
conditional_block_op):
'''
Append op `conditional_block_grad` manually.
When `optimizer.minimize/append_backward` is called in Paddle control flow,
grad ops will be appended before appending op `conditional_block` so that
op `conditional_block_grad` can't be appended when calling
`optimizer.minimize/append_backward`. After appending op `conditional_block`,
`conditional_block_grad` is appended manually.
Args:
parent_block (Block): The block that `conditional_block_op` blongs to.
inside_block (Block): The sub block of `conditional_block_op`.
conditional_block_op (Operator): The forward op conditional_block.
'''
grad_sub_block_idx = inside_block.backward_block_idx
grad_sub_block = self.helper.main_program.block(grad_sub_block_idx)
intermediate = set()
params = set()
for each_op in grad_sub_block.ops:
assert isinstance(each_op, Operator)
for iname in each_op.input_names:
for in_var_name in each_op.input(iname):
if in_var_name not in intermediate:
params.add(in_var_name)
for oname in each_op.output_names:
for out_var_name in each_op.output(oname):
intermediate.add(out_var_name)
param_list = []
for inner_input_name in params:
inner_var = parent_block._find_var_recursive(inner_input_name)
if inner_var:
param_list.append(cpt.to_text(inner_var.name))
grad_op_desc, op_grad_to_var = core.get_grad_op_desc(
conditional_block_op.desc,
cpt.to_text(set()), [grad_sub_block.desc])
# append op_desc in grad_op_descs to target_block
op_role_attr_name = core.op_proto_and_checker_maker.kOpRoleAttrName()
backward = core.op_proto_and_checker_maker.OpRole.Backward
new_op_desc = parent_block.desc.append_op()
new_op_desc.copy_from(grad_op_desc[0])
new_op_desc._set_attr(op_role_attr_name, backward)
# set input and output manually
new_op_desc.set_input('Input', param_list)
new_op_desc.set_output('Input@GRAD',
[param + "@GRAD" for param in param_list])
new_vars = set()
for grad_var_name in new_op_desc.output_arg_names():
if grad_sub_block.desc.has_var_recursive(
cpt.to_bytes(grad_var_name)
) or grad_var_name == core.empty_var_name():
continue
grad_sub_block.desc.var(cpt.to_bytes(grad_var_name))
new_vars.add(grad_var_name)
if grad_var_name not in op_grad_to_var:
continue
# infer_shape and infer_type
new_op_desc.infer_var_type(grad_sub_block.desc)
new_op_desc.infer_shape(grad_sub_block.desc)
for arg in new_op_desc.output_arg_names():
if arg in new_vars:
_infer_var_data_type_shape_(arg, grad_sub_block)
self.helper.main_program._sync_with_cpp()
def copy_var_to_parent_block(var, layer_helper):
if var is None:
return None
prog = layer_helper.main_program
parent_idx = prog.current_block().parent_idx
assert parent_idx >= 0, "Got wrong parent block index when assigning var to parent scope in control_flow"
parent_block = prog.block(parent_idx)
parent_block_var = parent_block.create_var(
dtype=var.dtype, shape=var.shape, type=var.type)
assign(var, parent_block_var)
return parent_block_var
def cond(pred, true_fn=None, false_fn=None, name=None):
"""
This API returns ``true_fn()`` if the predicate ``pred`` is true else
``false_fn()`` . Users could also set ``true_fn`` or ``false_fn`` to
``None`` if do nothing and this API will treat the callable simply returns
``None`` in this case.
``true_fn`` and ``false_fn`` should return same nest structure of tensors
or both return ``None`` if user doens't like to return anything. A nest
structure of tensors in PaddlePaddle is tensor(s), or tuple of tensors, or
list of tensors.
Note:
1. The tuples or lists returned by ``true_fn`` and ``false_fn`` must have
the same shape because of dataflow model of PaddlePaddle while the
tensors in the tuples or the lists can have different shapes.
2. Any tensors or operations created outside of ``true_fn`` and
``false_fn`` will be executed regardless of which branch is selected at
runtime. This has frequently surprised users who expected a lazy
semantics. For example:
.. code-block:: python
import paddle.fluid as fluid
a = fluid.data(name='a', shape=[-1, 1], dtype='float32')
b = fluid.data(name='b', shape=[-1, 1], dtype='float32')
c = a * b
out = fluid.layers.cond(a < b, lambda: a + c, lambda: b * b)
No matter whether ``a < b`` , ``c = a * b`` will run.
Args:
pred(Variable): A boolean tensor whose numel should be 1. The boolean
value determines whether to return the result of ``true_fn`` or
``false_fn`` .
true_fn(callable, optional): A callable to be performed if ``pred`` is
true. The default value is ``None`` .
false_fn(callable, optional): A callable to be performed if ``pred`` is
false. The default value is ``None`` .
name(str, optional): The default value is ``None`` . Normally users
don't have to set this parameter. For more information, please
refer to :ref:`api_guide_Name` .
Returns:
Variable|list(Variable)|tuple(Variable): returns ``true_fn()`` if the
predicate ``pred`` is true else ``false_fn()`` .
Raises:
TypeError: if ``true_fn`` or ``false_fn`` is not callable.
ValueError: if ``true_fn`` and ``false_fn`` don't return the same nest
structure of tensors.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
from paddle.fluid.executor import Executor
from paddle.fluid.framework import Program, program_guard
#
# pseudocode:
# if 0.1 < 0.23:
# return 1, True
# else:
# return 3, 2
#
def true_func():
return layers.fill_constant(
shape=[1, 2], dtype='int32', value=1), layers.fill_constant(
shape=[2, 3], dtype='bool', value=True)
def false_func():
return layers.fill_constant(
shape=[3, 4], dtype='float32', value=3), layers.fill_constant(
shape=[4, 5], dtype='int64', value=2)
main_program = Program()
startup_program = Program()
with program_guard(main_program, startup_program):
x = layers.fill_constant(shape=[1], dtype='float32', value=0.1)
y = layers.fill_constant(shape=[1], dtype='float32', value=0.23)
pred = layers.less_than(x, y)
out = layers.cond(pred, true_func, false_func)
# out is a tuple containing 2 tensors
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
exe = fluid.Executor(place)
ret = exe.run(main_program, fetch_list=out)
# ret[0] = [[1 1]]
# ret[1] = [[ True True True]
# [ True True True]]
"""
helper = LayerHelper('cond', **locals())
true_output = None
false_output = None
copy_to_parent_func = lambda var: copy_var_to_parent_block(var, helper)
if true_fn is not None:
if not callable(true_fn):
raise TypeError("The true_fn in cond must be callable")
true_cond_block = ConditionalBlock([pred], is_scalar_condition=True)
with true_cond_block.block():
origin_true_output = true_fn()
if origin_true_output is not None:
true_output = map_structure(copy_to_parent_func,
origin_true_output)
if false_fn is not None:
if not callable(false_fn):
raise TypeError("The false_fn in cond must be callable")
false_cond_block = ConditionalBlock(
[logical_not(pred)], is_scalar_condition=True)
with false_cond_block.block():
origin_false_output = false_fn()
if origin_false_output is not None:
false_output = map_structure(copy_to_parent_func,
origin_false_output)
if true_output is None and false_output is None:
return None
if true_output is None:
raise ValueError(
"Incompatible return values of true_fn and false_fn in cond: "
"true_fn returns None while false_fn returns non-None")
if false_output is None:
raise ValueError(
"Incompatible return values of true_fn and false_fn in cond: "
"true_fn returns non-None while false_fn returns None")
# Merge ture and false output if they are not None
try:
assert_same_structure(true_output, false_output, check_types=False)
except ValueError as e:
raise ValueError(
"Incompatible return values of true_fn and false_fn in cond: {}".
format(e))
mask = cast(pred, dtype='int32')
merge_func = lambda false_var, true_var : select_input([false_var, true_var], mask)
merged_output = map_structure(merge_func, false_output, true_output)
return merged_output
def _error_message(what, arg_name, op_name, right_value, error_value):
error_message = "{what} of '{arg_name}' in Op({op_name}) must be " \
"{right_value}, but received: {error_value}.".format(
what=what,
arg_name=arg_name,
op_name=op_name,
right_value=right_value,
error_value=error_value)
return error_message
def case(pred_fn_pairs, default=None, name=None):
'''
This operator works like an if-elif-elif-else chain.
Args:
pred_fn_pairs(list|tuple): A list or tuple of (pred, fn) pairs. ``pred`` is a boolean Tensor with shape [1], ``fn`` is a callable. All callables return the same structure of Tensors.
default(callable, optional): Callable that returns a structure of Tensors.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Variable|list(Variable): Tensors returned by the callable from the first pair whose pred is True,
or Tensors returned by ``default`` if no pred in ``pred_fn_pairs`` is True and ``default`` is not None,
or Tensors returned by the last callable in ``pred_fn_pairs`` if no pred in ``pred_fn_pairs`` is True and ``default`` is None.
Raises:
TypeError: If the type of ``pred_fn_pairs`` is not list or tuple.
TypeError: If the type of elements in ``pred_fn_pairs`` is not tuple.
TypeError: If the size of tuples in ``pred_fn_pairs`` is not 2.
TypeError: If the first element of 2-tuple in ``pred_fn_pairs`` is not Variable.
TypeError: If the second element of 2-tuple in ``pred_fn_pairs`` is not callable.
TypeError: If ``default`` is not None but it is not callable.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
def fn_1():
return layers.fill_constant(shape=[1, 2], dtype='float32', value=1)
def fn_2():
return layers.fill_constant(shape=[2, 2], dtype='int32', value=2)
def fn_3():
return layers.fill_constant(shape=[3], dtype='int32', value=3)
main_program = fluid.default_startup_program()
startup_program = fluid.default_main_program()
with fluid.program_guard(main_program, startup_program):
x = layers.fill_constant(shape=[1], dtype='float32', value=0.3)
y = layers.fill_constant(shape=[1], dtype='float32', value=0.1)
z = layers.fill_constant(shape=[1], dtype='float32', value=0.2)
pred_1 = layers.less_than(z, x) # true: 0.2 < 0.3
pred_2 = layers.less_than(x, y) # false: 0.3 < 0.1
pred_3 = layers.equal(x, y) # false: 0.3 == 0.1
# Call fn_1 because pred_1 is True
out_1 = layers.case(
pred_fn_pairs=[(pred_1, fn_1), (pred_2, fn_2)], default=fn_3)
# Argument default is None and no pred in pred_fn_pairs is True. fn_3 will be called.
# because fn_3 is the last callable in pred_fn_pairs.
out_2 = layers.case(pred_fn_pairs=[(pred_2, fn_2), (pred_3, fn_3)])
exe = fluid.Executor(fluid.CPUPlace())
res_1, res_2 = exe.run(main_program, fetch_list=[out_1, out_2])
print(res_1) # [[1. 1.]]
print(res_2) # [3 3 3]
'''
helper = LayerHelper('case', **locals())
def _case_check_args(pred_fn_pairs, default):
'''
Check arguments pred_fn_pairs and default. Return canonical pre_fn_pairs and default.
'''
if not isinstance(pred_fn_pairs, (list, tuple)):
raise TypeError(
_error_message("The type", "pred_fn_pairs", "case",
"list or tuple", type(pred_fn_pairs)))
for pred_fn in pred_fn_pairs:
if not isinstance(pred_fn, tuple):
raise TypeError(
_error_message("The elements' type", "pred_fn_pairs",
"case", "tuple", type(pred_fn)))
if len(pred_fn) != 2:
raise TypeError(
_error_message("The tuple's size", "pred_fn_pairs", "case",
"2", str(len(pred_fn)) + "-tuple"))
pred, fn = pred_fn
if not isinstance(pred, Variable):
raise TypeError(
_error_message("The pred's type", "pred_fn_pairs", "case",
"boolean Variable", type(pred)))
if not callable(fn):
raise TypeError(
"The fn for {} of pred_fn_pairs in Op(case) must"
" be callable.".format(pred.name))
if default is None:
default_index = len(pred_fn_pairs) - 1 # pick the last one
default = pred_fn_pairs[default_index][1]
pred_fn_pairs = pred_fn_pairs[:default_index]
elif not callable(default):
raise TypeError("The default in Op(case) must be callable.")
return pred_fn_pairs, default
pred_fn_pairs, default = _case_check_args(pred_fn_pairs, default)
false_fn = default
for pred, true_fn in reversed(pred_fn_pairs):
false_fn = partial(cond, pred=pred, true_fn=true_fn, false_fn=false_fn)
final_fn = false_fn
return final_fn()
class Switch(object):
"""
This class is used to implement Switch branch control function.
Switch branch contains several case branches and one default branch.
Switch control flow checks whether the case branch conditions are satisfied in turn,
and only executes the statement after the first case branch that satisfies the conditions.
If there is no case branch that satisfies the condition,
only the statement following the default branch is executed.
Note:
A new OP :ref:`api_fluid_layers_case` is highly recommended instead of ``Switch`` if the shape of parameter ``cond`` is [1].
OP :ref:`api_fluid_layers_case` is easier to use and is called with less code but does the same thing as ``Switch`` .
Member Functions:
case(cond): The case branch of Switch whose parameter cond is a scalar Variable of bool type. Only if the cond of the current case branch is True and the cond of the previous case branch is False, the statement after the case branch will be executed, and the statement after the case branch will not be executed.
default(): The default branch of Switch. When cond of all case branches is False, the statement after default branch is executed.
Case and default functions can only be used inside the scope of Switch, as shown below:
.. code-block:: python
'''
with fluid.layers.Switch() as switch:
with switch.case(cond1):
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=1)
with switch.case(cond2):
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=2)
with switch.default():
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)
'''
Args:
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Examples:
.. code-block:: python
import paddle.fluid as fluid
lr = fluid.layers.create_global_var(
shape=[1],
value=0.0,
dtype='float32',
persistable=True,
name="learning_rate")
zero_var = fluid.layers.fill_constant(
shape=[1], dtype='float32', value=0.0)
one_var = fluid.layers.fill_constant(
shape=[1], dtype='float32', value=1.0)
two_var = fluid.layers.fill_constant(
shape=[1], dtype='float32', value=2.0)
global_step = fluid.layers.autoincreased_step_counter(counter_name='@LR_DECAY_COUNTER@', begin=0, step=1)
with fluid.layers.control_flow.Switch() as switch:
with switch.case(global_step == zero_var):
fluid.layers.assign(input=one_var, output=lr)
with switch.default():
fluid.layers.assign(input=two_var, output=lr)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
res = exe.run(fluid.default_main_program(), feed={}, fetch_list=[lr])
print(res) # [array([1.], dtype=float32)]
"""
def __init__(self, name=None):
self.helper = LayerHelper('switch', name=name)
self.inside_scope = False
self.pre_not_conditions = []
def case(self, condition):
if not self.inside_scope:
raise ValueError("case should be called inside with")
if len(self.pre_not_conditions) == 0:
cond_block = ConditionalBlock([condition], is_scalar_condition=True)
not_cond = logical_not(x=condition)
self.pre_not_conditions.append(not_cond)
else:
pre_cond_num = len(self.pre_not_conditions)
pre_not_cond = self.pre_not_conditions[pre_cond_num - 1]
new_not_cond = logical_and(
x=pre_not_cond, y=logical_not(x=condition))
self.pre_not_conditions.append(new_not_cond)
cond_block = ConditionalBlock(
[logical_and(
x=pre_not_cond, y=condition)],
is_scalar_condition=True)
return ConditionalBlockGuard(cond_block)
def default(self):
pre_cond_num = len(self.pre_not_conditions)
if pre_cond_num == 0:
raise ValueError("there should be at least one condition")
cond_block = ConditionalBlock(
[self.pre_not_conditions[pre_cond_num - 1]],
is_scalar_condition=True)
return ConditionalBlockGuard(cond_block)
def __enter__(self):
"""
set flag that now is inside switch.block {}
:return:
"""
self.inside_scope = True
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.inside_scope = False
if exc_type is not None:
return False # re-raise exception
return True
class IfElseBlockGuard(object):
def __init__(self, is_true, ifelse):
if not isinstance(ifelse, IfElse):
raise TypeError("ifelse must be an instance of IfElse class")
if ifelse.status != IfElse.OUT_IF_ELSE_BLOCKS:
raise ValueError("You cannot invoke IfElse.block() inside a block")
self.is_true = is_true
self.ie = ifelse
if is_true:
self.cond_block = ifelse.conditional_true_block
else:
self.cond_block = ifelse.conditional_false_block
if not isinstance(self.cond_block, ConditionalBlock):
raise TypeError("Unexpected situation")
self.cond_block = self.cond_block.block()
def __enter__(self):
self.ie.status = IfElse.IN_IF_ELSE_TRUE_BLOCKS if self.is_true else IfElse.IN_IF_ELSE_FALSE_BLOCKS
self.cond_block.__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.cond_block.__exit__(exc_type, exc_val, exc_tb):
# re-raise inside exception
return False
if len(self.ie.output_table[1 if self.is_true else 0]) == 0:
raise ValueError("Must set output inside block")
self.ie.status = IfElse.OUT_IF_ELSE_BLOCKS
class IfElse(object):
"""
This class is used to implement IfElse branch control function. IfElse contains two blocks, true_block and false_block. IfElse will put data satisfying True or False conditions into different blocks to run.
Cond is a 2-D Tensor with shape [N, 1] and data type bool, representing the execution conditions of the corresponding part of the input data.
Note:
A new OP :ref:`api_fluid_layers_cond` is highly recommended instead of ``IfElse``. if the shape of parameter ``cond`` is [1].
OP :ref:`api_fluid_layers_cond` is easier to use and is called with less code but does the same thing as ``IfElse`` .
IfElse OP is different from other OPs in usage, which may cause some users confusion. Here is a simple example to illustrate this OP.
.. code-block:: python
# The following code completes the function: subtract 10 from the data greater than 0 in x, add 10 to the data less than 0 in x, and sum all the data.
import numpy as np
import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[4, 1], dtype='float32', append_batch_size=False)
y = fluid.layers.data(name='y', shape=[4, 1], dtype='float32', append_batch_size=False)
x_d = np.array([[3], [1], [-2], [-3]]).astype(np.float32)
y_d = np.zeros((4, 1)).astype(np.float32)
# Compare the size of x, y pairs of elements, output cond, cond is shape [4, 1], data type bool 2-D tensor.
# Based on the input data x_d, y_d, it can be inferred that the data in cond are [[true], [true], [false], [false]].
cond = fluid.layers.greater_than(x, y)
# Unlike other common OPs, ie below returned by the OP is an IfElse OP object
ie = fluid.layers.IfElse(cond)
with ie.true_block():
# In this block, according to cond condition, the data corresponding to true dimension in X is obtained and subtracted by 10.
out_1 = ie.input(x)
out_1 = out_1 - 10
ie.output(out_1)
with ie.false_block():
# In this block, according to cond condition, get the data of the corresponding condition in X as false dimension, and add 10
out_1 = ie.input(x)
out_1 = out_1 + 10
ie.output(out_1)
# According to cond condition, the data processed in the two blocks are merged. The output here is output, the type is List, and the element type in List is Variable.
output = ie() # [array([[-7.], [-9.], [ 8.], [ 7.]], dtype=float32)]
# Get the first Variable in the output List and add all elements.
out = fluid.layers.reduce_sum(output[0])
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
res = exe.run(fluid.default_main_program(), feed={"x":x_d, "y":y_d}, fetch_list=[out])
print res
# [array([-1.], dtype=float32)]
Args:
cond (Variable): cond is a 2-D Tensor with shape [N, 1] and data type bool, representing the corresponding execution conditions of N input data. The data type is bool.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
Unlike other common OPs, the OP call returns an IfElse OP object (e.g. ie in the example), which branches the input data by calling the internal functions of the object ``true_block ()``, ``false_block ()``, ``input ()``, ``output ()``, and integrates the data processed by different branches as the overall output by calling the internal ``call ()`` function. The output type is a list, and the type of each element in the list is Variable.
Internal Functions:
The block is constructed by calling the ``with ie. true_block()`` function in the object, and the computational logic under condition true is put into the block. If no corresponding block is constructed, the input data in the corresponding conditional dimension is unchanged.
The block is constructed by calling the ``with ie. false_block()`` function in the object, and the computational logic under condition false is put into the block. If no corresponding block is constructed, the input data in the corresponding conditional dimension is unchanged.
``Out = ie. input (x)`` will take out the data of the corresponding conditional dimension in X and put it into out, supporting the internal processing of multiple inputs in block.
``ie. output (out)`` writes the result to the output of the corresponding condition.
There is a ``call ()`` function inside the object, that is, by calling ``output = ie ()``, all the outputs inside the block of False are fused as the whole output, the output type is a list, and the type of each element in the list is Variable.
"""
OUT_IF_ELSE_BLOCKS = 0
IN_IF_ELSE_TRUE_BLOCKS = 1
IN_IF_ELSE_FALSE_BLOCKS = 2
def __init__(self, cond, name=None):
if not isinstance(cond, Variable):
raise TypeError("cond must be a Variable")
self.helper = LayerHelper('ifelse', name=name)
self.cond = cond
self.input_table = {}
self.status = IfElse.OUT_IF_ELSE_BLOCKS
self.conditional_true_block = ConditionalBlock(inputs=[self.cond])
self.conditional_false_block = ConditionalBlock(inputs=[self.cond])
self.output_table = ([], []) # (true_outs, false_outs)
def input(self, x):
if self.status == IfElse.OUT_IF_ELSE_BLOCKS:
raise ValueError("input must in true/false blocks")
if id(x) not in self.input_table:
parent_block = self._parent_block()
out_true = parent_block.create_var(
name=unique_name.generate_with_ignorable_key('ifelse_input' +
self.helper.name),
dtype=x.dtype)
out_false = parent_block.create_var(
name=unique_name.generate_with_ignorable_key('ifelse_input' +
self.helper.name),
dtype=x.dtype)
parent_block.append_op(
type='split_lod_tensor',
inputs={
'X': x,
'Mask': self.cond,
},
outputs={'OutTrue': out_true,
'OutFalse': out_false},
attrs={'level': 0})
self.input_table[id(x)] = (out_true, out_false)
else:
out_true, out_false = self.input_table[id(x)]
if self.status == IfElse.IN_IF_ELSE_TRUE_BLOCKS:
return out_true
else:
return out_false
def _parent_block(self):
current_block = self.helper.main_program.current_block()
return self.helper.main_program.block(current_block.parent_idx)
def true_block(self):
return IfElseBlockGuard(True, self)
def false_block(self):
return IfElseBlockGuard(False, self)
def output(self, *outs):
if self.status == self.OUT_IF_ELSE_BLOCKS:
raise ValueError("output can only be invoked in the sub-block")
out_table = self.output_table[1 if self.status ==
self.IN_IF_ELSE_TRUE_BLOCKS else 0]
parent_block = self._parent_block()
for each_out in outs:
if not isinstance(each_out, Variable):
raise TypeError("Each output should be a variable")
# create outside tensor
outside_out = parent_block.create_var(
name=unique_name.generate_with_ignorable_key("_".join(
[self.helper.name, 'output'])),
dtype=each_out.dtype)
out_table.append(outside_out)
# assign local var to outside
assign(input=each_out, output=outside_out)
def __call__(self):
if self.status != self.OUT_IF_ELSE_BLOCKS:
raise ValueError("IfElse::__call__ must be out of sub-block")
false_len, true_len = list(map(len, self.output_table))
if false_len == 0 and true_len == 0:
raise ValueError("Must invoke true_block/false_block before "
"__call__")
elif false_len != true_len and false_len != 0 and true_len != 0:
raise ValueError("The output side must be same")
elif false_len == 0 or true_len == 0:
return self.output_table[0 if false_len != 0 else 1]
# else none of false_len/true_len is zero
# merge together
rlist = []
for false_var, true_var in zip(*self.output_table):
rlist.append(
merge_lod_tensor(
in_true=true_var,
in_false=false_var,
mask=self.cond,
x=self.cond,
level=0))
return rlist
class DynamicRNN(object):
"""
**Note: the input of this class should be LoDTensor which holds the
information of variable-length sequences. If the input is fixed-length Tensor,
please use StaticRNN (fluid.layers.** :ref:`api_fluid_layers_StaticRNN` **) for
better performance.**
DynamicRNN can process a minibatch of variable-length sequences.
The length of each sample can be different and is recorded in LoD.
In DynamicRNN, an input sequence will be unfolded into time steps and users
can define how to process each time step in :code:`block()` .
The total number of time steps is determined by the longest sequence.
DynamicRNN will not pad all sequences to the same length, instead it will
sort the sequences internally by the sequence length in descending order.
The input sequences will be shrinked because only sequences of which the
length is larger than the time step will participate the remaining calculation.
If defined :code:`drnn = DynamicRNN()`, then users can call :code:`drnn()`
to obtain the result sequences. It is a LoDTensor gained by merging all
time steps's output. When RNN's input sequence x meets :code:`x.lod_level == 1`,
the output LoDTensor will have the same LoD with x. The result of :code:`drnn()`
includes RNN's outputs of all time steps, users can call
:ref:`api_fluid_layers_sequence_last_step` to extract the data of the last time step.
Warning:
Currently it is not supported to set :code:`is_sparse = True` of any
layers defined within DynamicRNN's :code:`block` function.
Args:
name (str, optional): The default value is None. Normally there is no
need for user to set this property. For more information,
please refer to :ref:`api_guide_Name` .
Examples:
.. code-block:: python
import paddle.fluid as fluid
sentence = fluid.data(name='sentence', shape=[None, 32], dtype='float32', lod_level=1)
encoder_proj = fluid.data(name='encoder_proj', shape=[None, 32], dtype='float32', lod_level=1)
decoder_boot = fluid.data(name='boot', shape=[None, 10], dtype='float32')
drnn = fluid.layers.DynamicRNN()
with drnn.block():
# Set sentence as RNN's input, each time step processes a word from the sentence
current_word = drnn.step_input(sentence)
# Set encode_proj as RNN's static input
encoder_word = drnn.static_input(encoder_proj)
# Initialize memory with boot_memory, which need reorder according to RNN's input sequences
memory = drnn.memory(init=decoder_boot, need_reorder=True)
fc_1 = fluid.layers.fc(input=encoder_word, size=30)
fc_2 = fluid.layers.fc(input=current_word, size=30)
decoder_inputs = fc_1 + fc_2
hidden, _, _ = fluid.layers.gru_unit(input=decoder_inputs, hidden=memory, size=30)
# Update memory with hidden
drnn.update_memory(ex_mem=memory, new_mem=hidden)
out = fluid.layers.fc(input=hidden, size=10, bias_attr=True, act='softmax')
# Set hidden and out as RNN's outputs
drnn.output(hidden, out)
# Get RNN's result
hidden, out = drnn()
# Get RNN's result of the last time step
last = fluid.layers.sequence_last_step(out)
"""
BEFORE_RNN = 0
IN_RNN = 1
AFTER_RNN = 2
def __init__(self, name=None):
self.helper = LayerHelper('dynamic_rnn', name=name)
self.status = DynamicRNN.BEFORE_RNN
self.lod_rank_table = None
self.max_seq_len = None
self.step_idx = None
self.zero_idx = None
self.mem_dict = dict()
self.output_array = []
self.outputs = []
self.cond = self.helper.create_variable_for_type_inference(dtype='bool')
self.cond.stop_gradient = False
self.while_op = While(self.cond)
self.input_array = []
self.mem_link = []
def step_input(self, x, level=0):
"""
This function is used to set sequence x as DynamicRNN's input.
The maximum sequence length in x determines the number of time steps
the RNN unit will be executed. DynamicRNN can take multiple inputs.
When all inputs' :code:`lod_level` are 1, all inputs should hold the
same LoD. When :code:`x.lod_level >= 2` , the input sequence will be
unfold along specified level, and the slice of each time step is a
LoDTensor whose lod_level is :code:`x.lod_level - level - 1` .
In this case, the specified LoD level of multiple inputs should be the same.
- Case 1:
.. code-block:: text
# input, where Si is slice data of shape [1, N]
level = 0
x.lod = [[2, 1, 3]]
x.shape = [6, N]
x.data = [[S0],
[S0],
[S1],
[S2],
[S2],
[S2]]
# output
# step 0, time step data of 3 sequences
out.lod = [[]]
out.shape = [3, N]
out.data = [[S2],
[S0],
[S1]]
# step 1, time step data of 2 sequences
out.lod = [[]]
out.shape = [2, N]
out.data = [[S2],
[S0]]
# step 2, time step data of 1 sequences
out.lod = [[]]
out.shape = [1, N]
out.data = [[S2]]
Args:
x (Variable): The input LoDTensor which holds information of a
minibatch of variable-length sequences and should meet :code:`x.lod_level >= 1` .
When RNN has multiple inputs, the first dimension should match
across all inputs, but other shape components may differ.
Optional data types are: bool, float16, float32, float64, int8, int16, int32, int64, uint8.
level (int, optional): The level of lod used to split steps.
It should be in range :math:`[0, x.lod\_level)` . The default value is 0.
Returns:
Variable: The current time step in the input sequence. If there are :code:`num_sequences` \
sequences in x whose length is larger than :code:`step_idx` , the returned Variable \
will only hold the :code:`step_idx` -th time step of those `num_sequences` sequences. \
The data type is the same as input. If :code:`x.lod_level == 1` , the return value is \
a Tensor of shape :math:`\{num\_sequences, x.shape[1], ...\}` , or it will \
be a variable-length LoDTensor.
Raises:
ValueError: When :code:`step_input()` is called outside :code:`block()` .
TypeError: When x is not a Variable.
Examples:
.. code-block:: python
import paddle.fluid as fluid
sentence = fluid.data(name='sentence', shape=[None, 1], dtype='int64', lod_level=1)
embedding = fluid.layers.embedding(input=sentence, size=[65536, 32], is_sparse=True)
drnn = fluid.layers.DynamicRNN()
with drnn.block():
# Set embedding as RNN's input, each time step processes a word from the sentence
word = drnn.step_input(embedding)
# Initialize memory to a Tensor whose value is 0, shape=[batch_size, 200],
# where batch_size is the number of sequences in embedding.
memory = drnn.memory(shape=[200])
hidden = fluid.layers.fc(input=[word, memory], size=200, act='relu')
# Update memory to hidden
drnn.update_memory(ex_mem=memory, new_mem=hidden)
# Set hidden as RNN's output
drnn.output(hidden)
# Get RNN's result
rnn_output = drnn()
"""
self._assert_in_rnn_block_("step_input")
if not isinstance(x, Variable):
raise TypeError(
"step_input() can only take a Variable as its input.")
parent_block = self._parent_block_()
if self.lod_rank_table is None:
self.lod_rank_table = parent_block.create_var(
name=unique_name.generate('lod_rank_table'),
type=core.VarDesc.VarType.LOD_RANK_TABLE)
self.lod_rank_table.stop_gradient = True
parent_block.append_op(
type='lod_rank_table',
inputs={"X": x},
outputs={"Out": self.lod_rank_table},
attrs={"level": level})
self.max_seq_len = parent_block.create_var(
name=unique_name.generate('dynamic_rnn_max_seq_len'),
dtype='int64')
self.max_seq_len.stop_gradient = False
parent_block.append_op(
type='max_sequence_len',
inputs={'RankTable': self.lod_rank_table},
outputs={"Out": self.max_seq_len})
self.cond.stop_gradient = True
parent_block.append_op(
type='less_than',
inputs={'X': self.step_idx,
'Y': self.max_seq_len},
outputs={'Out': self.cond},
attrs={'force_cpu': True})
input_array = parent_block.create_var(
name=unique_name.generate('dynamic_rnn_input_array'),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=x.dtype)
self.input_array.append((input_array, x.dtype))
parent_block.append_op(
type='lod_tensor_to_array',
inputs={'X': x,
'RankTable': self.lod_rank_table},
outputs={'Out': input_array})
return array_read(array=input_array, i=self.step_idx)
def static_input(self, x):
"""
This function is used to set x as DynamicRNN's static input. It is optional.
- Case 1, set static input with LoD
.. code-block:: text
# RNN's input is the same as the case listed in step_input
# static input, where Si is slice data of shape [1, M]
x.lod = [[3, 1, 2]]
x.shape = [6, M]
x.data = [[S0],
[S0],
[S0],
[S1],
[S2],
[S2]]
# step 0, batch data corresponding to the 3 input sequences
out.lod = [[2, 3, 1]]
out.shape = [6, M]
out.data = [[S2],
[S2],
[S0],
[S0],
[S0],
[S1]]
# step 1, batch data corresponding to the 2 input sequences
out.lod = [[2, 3]]
out.shape = [5, M]
out.data = [[S2],
[S2],
[S0],
[S0],
[S0]]
# step 2, batch data corresponding to the 1 input sequences
out.lod = [[2]]
out.shape = [2, M]
out.data = [[S2],
[S2]]
- Case 2, set static input without LoD
.. code-block:: text
# RNN's input is the same as the case listed in step_input
# static input, where Si is slice data of shape [1, M]
x.lod = [[]]
x.shape = [3, M]
x.data = [[S0],
[S1],
[S2]]
# step 0, batch data corresponding to the 3 input sequences
out.lod = [[]]
out.shape = [3, M]
out.data = [[S2],
[S0],
[S1]]
# step 1, batch data corresponding to the 2 input sequences
out.lod = [[]]
out.shape = [2, M]
out.data = [[S2],
[S0]]
# step 2, batch data corresponding to the 1 input sequences
out.lod = [[]]
out.shape = [1, M]
out.data = [[S2]]
Args:
x (Variable): The static input LoDTensor which should hold the same number of sequences
as RNN's input (the input LoDTensor set by :code:`step_input()` ). If the LoD is None,
the input x will be treated as a minibatch with :code:`x.shape[0]` sequences of length 1.
Optional data types are: bool, float16, float32, float64, int8, int16, int32, int64, uint8.
Returns:
Variable: The input LoDTensor after sorted and shrinked. If there are :code:`num_sequences` \
sequences in RNN's input LoDTensor whose length is larger than :code:`step_idx` , \
the static input Tensor will be sorted to the same order as RNN's input and \
will only retain data corresponding to those :code:`num_sequences` sequences. \
The data type is the same as input. If :code:`x.lod == None` , the return value is \
a Tensor of shape :math:`\{num\_sequences, x.shape[1], ...\}` , or it will \
be a variable-length LoDTensor.
Raises:
ValueError: When :code:`static_input()` is called outside :code:`block()` .
TypeError: When x is not a Variable.
RuntimeError: When :code:`static_input()` is called before :code:`step_input()` .
Examples:
.. code-block:: python
import paddle.fluid as fluid
sentence = fluid.data(name='sentence', shape=[None, 32], dtype='float32', lod_level=1)
encoder_proj = fluid.data(name='encoder_proj', shape=[None, 32], dtype='float32', lod_level=1)
decoder_boot = fluid.data(name='boot', shape=[None, 10], dtype='float32')
drnn = fluid.layers.DynamicRNN()
with drnn.block():
# Set sentence as RNN's input, each time step processes a word from the sentence
current_word = drnn.step_input(sentence)
# Set encode_proj as RNN's static input
encoder_word = drnn.static_input(encoder_proj)
# Initialize memory with boot_memory, which need reorder according to RNN's input sequences
memory = drnn.memory(init=decoder_boot, need_reorder=True)
fc_1 = fluid.layers.fc(input=encoder_word, size=30)
fc_2 = fluid.layers.fc(input=current_word, size=30)
decoder_inputs = fc_1 + fc_2
hidden, _, _ = fluid.layers.gru_unit(input=decoder_inputs, hidden=memory, size=30)
# Update memory with hidden
drnn.update_memory(ex_mem=memory, new_mem=hidden)
out = fluid.layers.fc(input=hidden, size=10, bias_attr=True, act='softmax')
# Set out as RNN's output
drnn.output(out)
# Get RNN's result
rnn_output = drnn()
"""
self._assert_in_rnn_block_("static_input")
if not isinstance(x, Variable):
raise TypeError(
"static_input() can only take a Variable as its input")
if self.lod_rank_table is None:
raise RuntimeError(
"static_input() must be called after step_input().")
parent_block = self._parent_block_()
x_reordered = parent_block.create_var(
name=unique_name.generate("dynamic_rnn_static_input_reordered"),
type=core.VarDesc.VarType.LOD_TENSOR,
dtype=x.dtype)
parent_block.append_op(
type='reorder_lod_tensor_by_rank',
inputs={'X': [x],
'RankTable': [self.lod_rank_table]},
outputs={'Out': [x_reordered]})
return shrink_memory(x_reordered, self.step_idx, self.lod_rank_table)
@signature_safe_contextmanager
def block(self):
"""
The function is used to list the operations executed during
each time step in RNN. The operation list will be executed :code:`max_sequence_len`
times (where :code:`max_sequence_len` is the maximum length of RNN's input sequences).
Raises:
ValueError: When :code:`block()` is called multi-times.
"""
if self.status != DynamicRNN.BEFORE_RNN:
raise ValueError("rnn.block() can only be invoke once")
self.step_idx = fill_constant(
shape=[1], dtype='int64', value=0, force_cpu=True)
self.step_idx.stop_gradient = False
self.status = DynamicRNN.IN_RNN
with self.while_op.block():
yield
increment(x=self.step_idx, value=1.0, in_place=True)
for new_mem, mem_array in self.mem_link:
array_write(x=new_mem, i=self.step_idx, array=mem_array)
less_than(
x=self.step_idx,
y=self.max_seq_len,
force_cpu=True,
cond=self.cond)
self.status = DynamicRNN.AFTER_RNN
for each_array in self.output_array:
self.outputs.append(
array_to_lod_tensor(
x=each_array, table=self.lod_rank_table))
def __call__(self, *args, **kwargs):
"""
This function is used to get the output sequneces of DynamicRNN.
Args:
None
Returns:
Variable or Variable list: RNN's output sequences.
Raises:
ValueError: When :code:`__call__()` is called before :code:`block()` .
"""
if self.status != DynamicRNN.AFTER_RNN:
raise ValueError(("Output of the dynamic RNN can only be visited "
"outside the rnn block."))
if len(self.outputs) == 1:
return self.outputs[0]
else:
return self.outputs
def memory(self,
init=None,
shape=None,
value=0.0,
need_reorder=False,
dtype='float32'):
"""
Create a memory Variable for DynamicRNN to deliver data cross time steps.
It can be initialized by an existing Tensor or a constant Tensor of given
dtype and shape.
Args:
init (Variable, optional): LoDTensor used to initialize the memory.
If init is not None, it should hold the same number of sequences
as RNN's input (the input LoDTensor set by :code:`step_input()` )
and the memory will be initialized to it. If init's LoD is None,
it will be treated as a minibatch with :code:`init.shape[0]` sequences
of length 1. The default value is None.
shape (list|tuple, optional): When init is None, it is used to specify
the memory's shape. Note that the shape does not include the batch_size.
If setting shape to :math:`\{D_1, D_2, ...\}` , the shape of memory Tensor
will be :math:`\{batch\_size, D_1, D_2, ...\}` , where batch_size is
determined by RNN's input sequences. The default value is None.
value (float, optional): When init is None, it is used as initalized value
of memory. The default value is 0.0.
need_reorder (bool, optional): When init is not None, it determines whether
the memory needs to reorder like the RNN's input sequeneces. It should be
set to True when the initialized memory depends on the order of input samples.
The default value is False.
dtype (str|numpy.dtype, optional): When init is None, it is used to set the
data type of memory. The default value is "float32". Optional data types
are: "float32", "float64", "int32", "int64".
Returns:
Variable: The memory LoDTensor after shrinked. If there are :code:`num_sequences` \
sequences in RNN's input LoDTensor whose length is larger than :code:`step_idx` , \
the memory Tensor also need to be shrinked and will only retain data \
corresponding to those :code:`num_sequences` sequences.
Raises:
ValueError: When :code:`memory()` is called outside :code:`block()` .
TypeError: When init is set and is not a Variable.
ValueError: When :code:`memory()` is called before :code:`step_input()` .
Examples:
.. code-block:: python
import paddle.fluid as fluid
sentence = fluid.data(name='sentence', shape=[None, 32], dtype='float32', lod_level=1)
boot_memory = fluid.data(name='boot', shape=[None, 10], dtype='float32')
drnn = fluid.layers.DynamicRNN()
with drnn.block():
# Set sentence as RNN's input, each time step processes a word from the sentence
word = drnn.step_input(sentence)
# Initialize memory with boot_memory, which need reorder according to RNN's input sequences
memory = drnn.memory(init=boot_memory, need_reorder=True)
hidden = fluid.layers.fc(input=[word, memory], size=10, act='tanh')
# Update memory with hidden
drnn.update_memory(ex_mem=memory, new_mem=hidden)
# Set hidden as RNN's output
drnn.output(hidden)
# Get RNN's result
rnn_output = drnn()
Examples:
.. code-block:: python
import paddle.fluid as fluid
sentence = fluid.data(name='sentence', shape=[None, 32], dtype='float32', lod_level=1)
drnn = fluid.layers.DynamicRNN()
with drnn.block():
# Set sentence as RNN's input, each time step processes a word from the sentence
word = drnn.step_input(sentence)
# Initialize memory to a Tensor whose value is 0, shape=[batch_size, 10],
# where batch_size is the number of sequences in sentence.
memory = drnn.memory(shape=[10], dtype='float32', value=0)
hidden = fluid.layers.fc(input=[word, memory], size=10, act='tanh')
# Update memory with hidden
drnn.update_memory(ex_mem=memory, new_mem=hidden)
# Set hidden as RNN's output
drnn.output(hidden)
# Get RNN's result
rnn_output = drnn()
"""
self._assert_in_rnn_block_('memory')
self._init_zero_idx_()
if init is not None:
if not isinstance(init, Variable):
raise TypeError(
"The input arg `init` of memory() must be a Variable")
parent_block = self._parent_block_()
init_tensor = init
if need_reorder == True:
if self.lod_rank_table is None:
raise ValueError(
'If set need_reorder to True, make sure step_input be '
'invoked before '
'memory(init=init, need_reordered=True, ...).')
init_reordered = parent_block.create_var(
name=unique_name.generate('dynamic_rnn_mem_init_reordered'),
type=core.VarDesc.VarType.LOD_TENSOR,
dtype=init.dtype)
parent_block.append_op(
type='reorder_lod_tensor_by_rank',
inputs={
'X': [init_tensor],
'RankTable': [self.lod_rank_table]
},
outputs={'Out': [init_reordered]})
init_tensor = init_reordered
mem_array = parent_block.create_var(
name=unique_name.generate('dynamic_rnn_mem_array'),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=init.dtype)
parent_block.append_op(
type='write_to_array',
inputs={'X': init_tensor,
'I': self.zero_idx},
outputs={'Out': mem_array})
retv = array_read(array=mem_array, i=self.step_idx)
retv = shrink_memory(
x=retv, i=self.step_idx, table=self.lod_rank_table)
self.mem_dict[retv.name] = mem_array
return retv
else:
if len(self.input_array) == 0:
raise ValueError(
"step_input should be invoked before memory(shape=..., value=...)"
)
parent_block = self._parent_block_()
init = parent_block.create_var(
name=unique_name.generate('mem_init'), dtype=dtype)
arr, dtype = self.input_array[0]
in0 = parent_block.create_var(
name=unique_name.generate('in0'), dtype=dtype)
parent_block.append_op(
type='read_from_array',
inputs={'X': [arr],
'I': [self.zero_idx]},
outputs={'Out': [in0]})
parent_block.append_op(
type='fill_constant_batch_size_like',
inputs={'Input': [in0]},
outputs={'Out': [init]},
attrs={
'shape': [-1] + shape,
'value': float(value),
'dtype': init.dtype
})
return self.memory(init=init)
def update_memory(self, ex_mem, new_mem):
"""
Update the memory which need to be delivered across time steps.
Args:
ex_mem (Variable): The memory data of previous time step.
new_mem (Variable): The new memory data produced in current time step.
The shape and data type of ex_mem and new_mem should be the same.
Returns:
None
Raises:
ValueError: When :code:`update_memory()` is called outside :code:`block()` .
TypeError: When :code:`ex_mem` or :code:`new_mem` is not a Variable.
ValueError: When :code:`ex_mem` is defined by :code:`memory()` .
ValueError: When :code:`update_memory()` is called before :code:`step_input()` .
"""
self._assert_in_rnn_block_('update_memory')
if not isinstance(ex_mem, Variable):
raise TypeError("The input arg `ex_mem` of update_memory() must "
"be a Variable")
if not isinstance(new_mem, Variable):
raise TypeError("The input arg `new_mem` of update_memory() must "
"be a Variable")
mem_array = self.mem_dict.get(ex_mem.name, None)
if mem_array is None:
raise ValueError("Please invoke memory before update_memory")
if self.lod_rank_table is None:
raise ValueError("Please invoke step_input before update_memory")
self.mem_link.append((new_mem, mem_array))
def output(self, *outputs):
"""
This function is used to set :code:`outputs` as RNN's output.
Args:
*outputs (Variable ...): The output Tensor. DynamicRNN can mark multiple
Variables as its output.
Returns:
None
Raises:
ValueError: When :code:`output()` is called outside :code:`block()` .
"""
self._assert_in_rnn_block_('output')
parent_block = self._parent_block_()
for each in outputs:
outside_array = parent_block.create_var(
name=unique_name.generate_with_ignorable_key("_".join(
[self.helper.name, "output_array", each.name])),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=each.dtype)
array_write(x=each, i=self.step_idx, array=outside_array)
self.output_array.append(outside_array)
def _init_zero_idx_(self):
if self.zero_idx is None:
parent_block = self._parent_block_()
self.zero_idx = parent_block.create_var(
name=unique_name.generate('zero_idx'), dtype='int64')
parent_block.append_op(
type='fill_constant',
inputs={},
outputs={'Out': [self.zero_idx]},
attrs={
'shape': [1],
'dtype': self.zero_idx.dtype,
'value': float(0),
'force_cpu': True
})
def _parent_block_(self):
prog = self.helper.main_program
parent_idx = prog.current_block().parent_idx
assert parent_idx >= 0
parent_block = prog.block(parent_idx)
return parent_block
def _assert_in_rnn_block_(self, method):
if self.status != DynamicRNN.IN_RNN:
raise ValueError("{0} can only be invoked inside rnn block.".format(
method))
def switch_case(branch_index, branch_fns, default=None, name=None):
'''
This operator is like a C++ switch/case statement.
Args:
branch_index(Variable): A Tensor with shape [1] to specify which branch to execute. The data type is ``int32``, ``int64`` or ``uint8``.
branch_fns(dict|list|tuple): If it's a list or tuple, the elements in it could be pairs of (int, callable) or simple callables whose actual index will be used as the index of callable. If it's a dict, its key is a python integer and the value is a callable. All callables return the same structure of Tensors.
default(callable, optional): Callable that returns a structure of Tensors.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Variable|list(Variable): Tensors returned by the callable specified by ``branch_index`` in ``branch_fns``,
or Tensors returned by ``default`` if ``default`` is not None and no index matches in ``branch_fns``,
or Tensors returned by the callable with the max index in ``branch_fns`` if ``default`` is None and no index matches in ``branch_fns``.
Raises:
TypeError: If the type of ``branch_index`` is not Variable.
TypeError: If the data type of ``branch_index`` is not ``int32``, ``int64`` or ``uint8``.
TypeError: If the type of ``branch_fns`` is not dict, list or tuple.
TypeError: If the elements of ``branch_fns`` is not 2-tuple.
TypeError: If the first element of 2-tuple in ``branch_fns`` is not integer.
ValueError: If the first element of 2-tuple in ``branch_fns`` is not unique.
TypeError: If the second element of 2-tuple in ``branch_fns`` is not callable.
TypeError: If ``default`` is not None but it is not callable.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
def fn_1():
return layers.fill_constant(shape=[1, 2], dtype='float32', value=1)
def fn_2():
return layers.fill_constant(shape=[2, 2], dtype='int32', value=2)
def fn_3():
return layers.fill_constant(shape=[3], dtype='int32', value=3)
main_program = fluid.default_startup_program()
startup_program = fluid.default_main_program()
with fluid.program_guard(main_program, startup_program):
index_1 = layers.fill_constant(shape=[1], dtype='int32', value=1)
index_2 = layers.fill_constant(shape=[1], dtype='int32', value=2)
out_1 = layers.switch_case(
branch_index=index_1,
branch_fns={1: fn_1, 2: fn_2},
default=fn_3)
out_2 = layers.switch_case(
branch_index=index_2,
branch_fns=[(1, fn_1), (2, fn_2)],
default=fn_3)
# Argument default is None and no index matches. fn_3 will be called because of the max index 7.
out_3 = layers.switch_case(
branch_index=index_2,
branch_fns=[(0, fn_1), (4, fn_2), (7, fn_3)])
exe = fluid.Executor(fluid.CPUPlace())
res_1, res_2, res_3 = exe.run(main_program,
fetch_list=[out_1, out_2, out_3])
print(res_1) # [[1. 1.]]
print(res_2) # [[2 2] [2 2]]
print(res_3) # [3 3 3]
'''
helper = LayerHelper('switch_case', **locals())
def _check_args(branch_index, branch_fns, default):
if not isinstance(branch_index, Variable):
raise TypeError(
_error_message("The type", "branch_index", "switch_case",
"Variable", type(branch_index)))
if convert_dtype(branch_index.dtype) not in ["uint8", "int32", "int64"]:
raise TypeError(
_error_message("The data type", "branch_index", "switch_case",
"uint8, int32 or int64",
convert_dtype(branch_index.dtype)))
if convert_dtype(branch_index.dtype) != "int64":
branch_index = cast(branch_index, "int64")
if not isinstance(branch_fns, (list, tuple, dict)):
raise TypeError(
_error_message("The type", "branch_fns", "switch_case",
"dict, tuple or list", type(branch_fns)))
branch_fns = branch_fns.items() if isinstance(branch_fns,
dict) else branch_fns
branch_fns = list(enumerate(branch_fns)) if all(
callable(fn) for fn in branch_fns) else branch_fns
keys_of_fns = []
for index_fn_pair in branch_fns:
if not isinstance(index_fn_pair, tuple):
raise TypeError(
_error_message("The elements' type", "branch_fns",
"switch_case", "tuple", type(branch_fns)))
if len(index_fn_pair) != 2:
raise TypeError(
_error_message("The tuple's size", "branch_fns",
"switch_case", "2",
str(len(index_fn_pair)) + "-tuple"))
key, fn = index_fn_pair
if not isinstance(key, int):
raise TypeError(
_error_message("The key's type", "branch_fns",
"switch_case", "int", type(key)))
if key in keys_of_fns:
raise ValueError(
"The key in 'branch_fns' must be unique, but '{}' appears more than once.".
format(key))
else:
keys_of_fns.append(key)
if not callable(fn):
raise TypeError(
_error_message("The type of function for key {}".format(
key), "branch_fns", "switch_case", "callable", type(
fn)))
if default is None:
default = sorted(branch_fns)[-1][1]
branch_fns = sorted(branch_fns)[:-1]
elif not callable(default):
raise TypeError("The default in Op(case) must be callable.")
pred_fn_pairs = []
for index, fn in branch_fns:
new_index = fill_constant(shape=[1], dtype="int64", value=index)
pred = equal(branch_index, new_index)
pred_fn_pairs.append((pred, fn))
return pred_fn_pairs, default
pred_fn_pairs, default = _check_args(branch_index, branch_fns, default)
false_fn = default
for pred, true_fn in pred_fn_pairs:
false_fn = partial(cond, pred=pred, true_fn=true_fn, false_fn=false_fn)
final_fn = false_fn
return final_fn()
@templatedoc()
def reorder_lod_tensor_by_rank(x, rank_table):
"""
${comment}
Args:
x(${x_type}): ${x_comment}.
rank_table(${rank_table_type}): ${rank_table_comment}.
Returns:
out(${out_type}): ${out_comment}.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data_desc = (['input', [9], 0], ['ref', [5], 1])
data = fluid.layers.data(name=data_desc[0][0], shape=data_desc[0][1])
rank_data = fluid.layers.data(name=data_desc[1][0], shape=data_desc[1][1])
table = fluid.layers.control_flow.lod_rank_table(rank_data)
new_data = fluid.layers.reorder_lod_tensor_by_rank(
x=data, rank_table=table)
"""
helper = LayerHelper('reorder_lod_tensor_by_rank', **locals())
helper.is_instance('x', Variable)
helper.is_instance('rank_table', Variable)
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='reorder_lod_tensor_by_rank',
inputs={'X': [x],
'RankTable': [rank_table]},
outputs={'Out': [out]})
return out
def is_empty(x, cond=None):
"""
Test whether a Variable is empty.
Args:
x (Variable): The Variable to be tested.
cond (Variable, optional): Output parameter. Default: None. If this parameter is given, it
saves the test result of given 'x'.
Returns:
Variable: A bool scalar. True if 'x' is an empty Variable.
Raises:
TypeError: If input cond is not a variable, or cond's dtype is
not bool.
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.layers.data(name="input", shape=[4, 32, 32], dtype="float32")
res = fluid.layers.is_empty(x=input)
# or:
# fluid.layers.is_empty(x=input, cond=res)
"""
helper = LayerHelper("is_empty", **locals())
if cond is None:
cond = helper.create_variable_for_type_inference(dtype='bool')
cond.stop_gradient = True
elif not isinstance(cond, Variable):
raise TypeError("cond takes a variable")
elif cond.dtype != 'bool':
raise TypeError("The data type of cond must be bool")
helper.append_op(
type='is_empty', inputs={'X': [x]}, outputs={'Out': [cond]})
return cond
| 40.944606 | 449 | 0.592815 |
from __future__ import print_function
from ..wrapped_decorator import signature_safe_contextmanager
from .layer_function_generator import autodoc, templatedoc
from .tensor import assign, cast, fill_constant
from .. import core
from ..framework import Program, Variable, Operator
from ..layer_helper import LayerHelper, unique_name
from ..initializer import force_init_on_cpu
from .nn import logical_and, logical_not, logical_or
from .utils import assert_same_structure, map_structure
import numpy
import warnings
import six
from functools import reduce, partial
from ..data_feeder import convert_dtype, check_type_and_dtype
from ... import compat as cpt
from ..backward import _infer_var_data_type_shape_
__all__ = [
'While', 'Switch', 'increment', 'array_write', 'create_array', 'less_than',
'less_equal', 'greater_than', 'greater_equal', 'equal', 'not_equal',
'array_read', 'array_length', 'cond', 'IfElse', 'DynamicRNN', 'StaticRNN',
'reorder_lod_tensor_by_rank', 'Print', 'is_empty', 'case', 'switch_case',
'while_loop'
]
def select_output(input, outputs, mask):
helper = LayerHelper('select_output', **locals())
helper.append_op(
type='select_output',
inputs={'X': input,
'Mask': mask},
outputs={'Out': outputs})
return outputs
def select_input(inputs, mask):
helper = LayerHelper('select_input', **locals())
if isinstance(inputs, list) or isinstance(inputs, tuple):
input_dtype = inputs[0].dtype
input_shape = inputs[0].shape
else:
input_dtype = inputs.dtype
input_shape = inputs.shape
out = helper.create_variable(dtype=input_dtype, shape=input_shape)
helper.append_op(
type='select_input',
inputs={'X': inputs,
'Mask': mask},
outputs={'Out': out})
return out
def split_lod_tensor(input, mask, level=0):
helper = LayerHelper('split_lod_tensor', **locals())
out_true = helper.create_variable_for_type_inference(dtype=input.dtype)
out_false = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type='split_lod_tensor',
inputs={
'X': input,
'Mask': mask,
},
outputs={'OutTrue': out_true,
'OutFalse': out_false},
attrs={'level': level})
return out_true, out_false
def merge_lod_tensor(in_true, in_false, x, mask, level=0):
helper = LayerHelper('merge_lod_tensor', **locals())
out = helper.create_variable_for_type_inference(dtype=in_true.dtype)
helper.append_op(
type='merge_lod_tensor',
inputs={'X': x,
'Mask': mask,
'InTrue': in_true,
'InFalse': in_false},
outputs={'Out': out},
attrs={'level': level})
return out
def Print(input,
first_n=-1,
message=None,
summarize=20,
print_tensor_name=True,
print_tensor_type=True,
print_tensor_shape=True,
print_tensor_lod=True,
print_phase='both'):
check_type_and_dtype(input, 'input', Variable,
['float32', 'float64', 'int32', 'int64', 'bool'],
'fluid.layers.Print')
helper = LayerHelper('print' + "_" + input.name, **locals())
output = helper.create_variable_for_type_inference(input.dtype)
helper.append_op(
type='print',
inputs={'In': input},
outputs={'Out': output},
attrs={
'first_n': first_n,
'summarize': summarize,
'message': message or "",
'print_tensor_name': print_tensor_name,
'print_tensor_type': print_tensor_type,
'print_tensor_shape': print_tensor_shape,
'print_tensor_lod': print_tensor_lod,
'print_phase': print_phase.upper()
})
return output
class BlockGuard(object):
def __init__(self, main_program):
if not isinstance(main_program, Program):
raise TypeError("BlockGuard takes a program")
self.main_program = main_program
def __enter__(self):
self.main_program._create_block()
def __exit__(self, exc_type, exc_val, exc_tb):
self.main_program._rollback()
if exc_type is not None:
return False
return True
class BlockGuardWithCompletion(BlockGuard):
def __init__(self, rnn):
if not isinstance(rnn, StaticRNN):
raise TypeError("BlockGuardWithCompletion takes a StaticRNN")
super(BlockGuardWithCompletion, self).__init__(rnn.helper.main_program)
self.rnn = rnn
def __enter__(self):
self.rnn.status = StaticRNN.IN_RNN_BLOCK
return super(BlockGuardWithCompletion, self).__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
return False
self.rnn.status = StaticRNN.AFTER_RNN_BLOCK
self.rnn._complete_op()
return super(BlockGuardWithCompletion, self).__exit__(exc_type, exc_val,
exc_tb)
class StaticRNNMemoryLink(object):
def __init__(self, init, pre_mem, mem=None):
self.init = init
self.pre_mem = pre_mem
self.mem = mem
class StaticRNN(object):
BEFORE_RNN_BLOCK = 0
IN_RNN_BLOCK = 1
AFTER_RNN_BLOCK = 2
def __init__(self, name=None):
self.helper = LayerHelper("static_rnn", name=name)
self.memories = {}
self.inputs = []
self.outputs = []
self.status = StaticRNN.BEFORE_RNN_BLOCK
self.seq_len = None
def step(self):
return BlockGuardWithCompletion(self)
def _assert_in_rnn_block_(self, method):
if self.status != StaticRNN.IN_RNN_BLOCK:
raise ValueError("You must invoke {0} in rnn block".format(method))
def memory(self,
init=None,
shape=None,
batch_ref=None,
init_value=0.0,
init_batch_dim_idx=0,
ref_batch_dim_idx=1):
self._assert_in_rnn_block_('memory')
if init is None:
if shape is None or batch_ref is None:
raise ValueError(
"if init is None, memory at least need shape and batch_ref")
parent_block = self._parent_block()
var_name = unique_name.generate_with_ignorable_key("@".join(
[self.helper.name, "memory_boot"]))
boot_var = parent_block.create_var(
name=var_name,
shape=shape,
dtype=batch_ref.dtype,
persistable=False)
parent_block.append_op(
type="fill_constant_batch_size_like",
inputs={'Input': [batch_ref]},
outputs={'Out': [boot_var]},
attrs={
'value': init_value,
'shape': boot_var.shape,
'dtype': boot_var.dtype,
'input_dim_idx': ref_batch_dim_idx,
'output_dim_idx': init_batch_dim_idx
})
return self.memory(init=boot_var)
else:
pre_mem = self.helper.create_variable(
name=unique_name.generate_with_ignorable_key("@".join(
[self.helper.name, "mem"])),
dtype=init.dtype,
shape=init.shape)
self.memories[pre_mem.name] = StaticRNNMemoryLink(
init=init, pre_mem=pre_mem)
return pre_mem
def step_input(self, x):
self._assert_in_rnn_block_('step_input')
if not isinstance(x, Variable):
raise TypeError("step input takes a Variable")
if self.seq_len is None:
self.seq_len = x.shape[0]
elif x.shape[0] != -1 and self.seq_len != x.shape[0]:
raise ValueError("Static RNN only take fix seq_len input")
ipt = self.helper.create_variable(
name=x.name, dtype=x.dtype, shape=list(x.shape[1:]), type=x.type)
self.inputs.append(ipt)
return ipt
def step_output(self, o):
self._assert_in_rnn_block_('step_output')
if not isinstance(o, Variable):
raise TypeError("step output takes a Variable")
tmp_o = self.helper.create_variable_for_type_inference(dtype=o.dtype)
self.helper.append_op(
type='rnn_memory_helper',
inputs={'X': [o]},
outputs={'Out': tmp_o},
attrs={'dtype': o.dtype})
out_var = self._parent_block().create_var(
name=tmp_o.name,
shape=[self.seq_len] + list(tmp_o.shape),
dtype=tmp_o.dtype)
self.outputs.append(out_var)
def output(self, *outputs):
for each in outputs:
self.step_output(each)
def update_memory(self, mem, var):
if not isinstance(mem, Variable) or not isinstance(var, Variable):
raise TypeError("update memory should take variables")
self.memories[mem.name].mem = var
def _parent_block(self):
prog = self.helper.main_program
parent_idx = prog.current_block().parent_idx
assert parent_idx >= 0
parent_block = prog.block(parent_idx)
return parent_block
def __call__(self, *args, **kwargs):
if self.status != StaticRNN.AFTER_RNN_BLOCK:
raise ValueError("RNN output can only be retrieved after rnn block")
if len(self.outputs) == 0:
raise ValueError("RNN has no output")
elif len(self.outputs) == 1:
return self.outputs[0]
else:
return self.outputs
def _complete_op(self):
main_program = self.helper.main_program
rnn_block = main_program.current_block()
parent_block = self._parent_block()
local_inputs = set()
for op in rnn_block.ops:
assert isinstance(op, Operator)
for oname in op.output_names:
for out_var_name in op.output(oname):
local_inputs.add(out_var_name)
for var in self.inputs:
local_inputs.add(var.name)
for m in self.memories:
local_inputs.add(m)
params = list()
for op in rnn_block.ops:
assert isinstance(op, Operator)
for iname in op.input_names:
for in_var_name in op.input(iname):
if in_var_name not in local_inputs:
params.append(in_var_name)
parameters = [parent_block.var(name) for name in set(params)]
step_scope = parent_block.create_var(
type=core.VarDesc.VarType.STEP_SCOPES)
inlinks = [parent_block.var(i.name) for i in self.inputs]
outlinks = self.outputs
boot_memories = []
pre_memories = []
memories = []
for _, mem in six.iteritems(self.memories):
boot_memories.append(mem.init)
pre_memories.append(mem.pre_mem.name)
assert mem.mem is not None, "%s should be updated in every step." % (
mem.init.name)
mem_var = rnn_block.var(mem.mem.name)
assert isinstance(mem_var, Variable)
new_mem = self.helper.create_variable_for_type_inference(
dtype=mem_var.dtype)
rnn_block.append_op(
type='rnn_memory_helper',
inputs={'X': [mem_var]},
outputs={'Out': [new_mem]},
attrs={'dtype': mem_var.dtype})
memories.append(new_mem.name)
parent_block.append_op(
type='recurrent',
inputs={
'inputs': inlinks,
'initial_states': boot_memories,
'parameters': parameters
},
outputs={'outputs': outlinks,
'step_scopes': [step_scope]},
attrs={
'has_states': len(pre_memories) > 0,
'ex_states': pre_memories,
'states': memories,
'sub_block': rnn_block
})
class WhileGuard(BlockGuard):
def __init__(self, while_op):
if not isinstance(while_op, While):
raise TypeError("WhileGuard takes a while op")
super(WhileGuard, self).__init__(while_op.helper.main_program)
self.while_op = while_op
def __enter__(self):
self.while_op.status = While.IN_WHILE_BLOCK
return super(WhileGuard, self).__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
return False
self.while_op.status = While.AFTER_WHILE_BLOCK
self.while_op._complete()
return super(WhileGuard, self).__exit__(exc_type, exc_val, exc_tb)
class While(object):
BEFORE_WHILE_BLOCK = 0
IN_WHILE_BLOCK = 1
AFTER_WHILE_BLOCK = 2
def __init__(self, cond, is_test=False, name=None):
self.helper = LayerHelper("while", name=name)
self.status = While.BEFORE_WHILE_BLOCK
if not isinstance(cond, Variable):
raise TypeError("condition should be a variable")
assert isinstance(cond, Variable)
if cond.dtype != core.VarDesc.VarType.BOOL:
raise TypeError("condition should be a boolean variable")
if reduce(lambda a, b: a * b, cond.shape, 1) != 1:
raise TypeError(
"condition expected shape as [], but given shape as {0}.".
format(list(cond.shape)))
self.cond_var = cond
self.is_test = is_test
def block(self):
return WhileGuard(self)
def _complete(self):
main_program = self.helper.main_program
while_block = main_program.current_block()
parent_block = main_program.block(main_program.current_block()
.parent_idx)
inner_outputs = {self.cond_var.name}
x_name_list = set()
for op in while_block.ops:
for iname in op.input_names:
for in_var_name in op.input(iname):
if in_var_name not in inner_outputs:
x_name_list.add(in_var_name)
for oname in op.output_names:
for out_var_name in op.output(oname):
inner_outputs.add(out_var_name)
out_vars = []
for inner_out_name in inner_outputs:
inner_var = parent_block._find_var_recursive(inner_out_name)
if inner_var:
out_vars.append(inner_var)
step_scope = parent_block.create_var(
type=core.VarDesc.VarType.STEP_SCOPES)
parent_block.append_op(
type='while',
inputs={
'X': [
parent_block._var_recursive(x_name)
for x_name in x_name_list
],
'Condition': [self.cond_var]
},
outputs={'Out': out_vars,
'StepScopes': [step_scope]},
attrs={'sub_block': while_block,
"is_test": self.is_test})
def while_loop(cond, body, loop_vars, is_test=False, name=None):
helper = LayerHelper('while_loop', **locals())
if not callable(cond):
raise TypeError("cond in while_loop should be callable")
if not callable(body):
raise TypeError("body in while_loop should be callable")
if not isinstance(loop_vars, (list, tuple)):
raise TypeError("loop_vars in while_loop should be a list or tuple")
if len(loop_vars) == 0:
raise ValueError("loop_vars in while_loop should not be empty")
pre_cond = cond(*loop_vars)
if not isinstance(pre_cond, Variable):
raise TypeError("cond in while_loop should return a variable")
if pre_cond.dtype != core.VarDesc.VarType.BOOL:
raise TypeError("cond in while_loop should return a boolean variable")
if reduce(lambda a, b: a * b, pre_cond.shape, 1) != 1:
raise TypeError(
"the shape of the variable returned by cond should be [],"
"but given shape as {0}.".format(list(pre_cond.shape)))
while_loop_block = While(pre_cond, is_test, name)
with while_loop_block.block():
output_vars = body(*loop_vars)
if len(loop_vars) == 1:
assign(output_vars, loop_vars[0])
now_cond = cond(output_vars)
else:
for i in range(len(output_vars)):
assign(output_vars[i], loop_vars[i])
now_cond = cond(*output_vars)
assign(now_cond, pre_cond)
return loop_vars
def lod_rank_table(x, level=0):
helper = LayerHelper("lod_rank_table", **locals())
table = helper.create_variable(
type=core.VarDesc.VarType.LOD_RANK_TABLE,
name=unique_name.generate("lod_rank_table"))
helper.append_op(
type='lod_rank_table',
inputs={'X': x},
outputs={'Out': table},
attrs={'level': level})
return table
@templatedoc()
def max_sequence_len(rank_table):
helper = LayerHelper("max_seqence_len", **locals())
res = helper.create_variable_for_type_inference(dtype="int64")
helper.append_op(
type="max_sequence_len",
inputs={"RankTable": rank_table},
outputs={"Out": res})
return res
def lod_tensor_to_array(x, table):
helper = LayerHelper("lod_tensor_to_array", **locals())
array = helper.create_variable(
name=unique_name.generate("lod_tensor_to_array"),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=x.dtype)
helper.append_op(
type='lod_tensor_to_array',
inputs={'X': x,
'RankTable': table},
outputs={'Out': array})
return array
def array_to_lod_tensor(x, table):
helper = LayerHelper("array_to_lod_tensor", **locals())
tmp = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="array_to_lod_tensor",
inputs={'X': x,
'RankTable': table},
outputs={'Out': tmp})
return tmp
def increment(x, value=1.0, in_place=True):
helper = LayerHelper("increment", **locals())
if not in_place:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = x
helper.append_op(
type='increment',
inputs={'X': [x]},
outputs={'Out': [out]},
attrs={'step': float(value)})
return out
def array_write(x, i, array=None):
helper = LayerHelper('array_write', **locals())
if array is None:
array = helper.create_variable(
name="{0}.out".format(helper.name),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=x.dtype)
helper.append_op(
type='write_to_array',
inputs={'X': [x],
'I': [i]},
outputs={'Out': [array]})
return array
def create_array(dtype):
helper = LayerHelper("array", **locals())
return helper.create_variable(
name="{0}.out".format(helper.name),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=dtype)
@templatedoc()
def less_than(x, y, force_cpu=None, cond=None):
helper = LayerHelper("less_than", **locals())
if cond is None:
cond = helper.create_variable_for_type_inference(dtype='bool')
cond.stop_gradient = True
attrs = dict()
if force_cpu is not None:
attrs['force_cpu'] = force_cpu
elif force_init_on_cpu():
attrs['force_cpu'] = force_init_on_cpu()
helper.append_op(
type='less_than',
inputs={'X': [x],
'Y': [y]},
outputs={'Out': [cond]},
attrs=attrs)
return cond
@templatedoc()
def less_equal(x, y, cond=None):
helper = LayerHelper("less_equal", **locals())
if cond is None:
cond = helper.create_variable_for_type_inference(dtype='bool')
cond.stop_gradient = True
attrs = dict()
if force_init_on_cpu():
attrs['force_cpu'] = force_init_on_cpu()
helper.append_op(
type='less_equal',
inputs={'X': [x],
'Y': [y]},
outputs={'Out': [cond]},
attrs=attrs)
return cond
@templatedoc()
def greater_than(x, y, cond=None):
helper = LayerHelper("greater_than", **locals())
if cond is None:
cond = helper.create_variable_for_type_inference(dtype='bool')
cond.stop_gradient = True
attrs = dict()
if force_init_on_cpu():
attrs['force_cpu'] = force_init_on_cpu()
helper.append_op(
type='greater_than',
inputs={'X': [x],
'Y': [y]},
outputs={'Out': [cond]},
attrs=attrs)
return cond
@templatedoc()
def greater_equal(x, y, cond=None):
helper = LayerHelper("greater_equal", **locals())
if cond is None:
cond = helper.create_variable_for_type_inference(dtype='bool')
cond.stop_gradient = True
attrs = dict()
if force_init_on_cpu():
attrs['force_cpu'] = force_init_on_cpu()
helper.append_op(
type='greater_equal',
inputs={'X': [x],
'Y': [y]},
outputs={'Out': [cond]},
attrs=attrs)
return cond
def equal(x, y, cond=None):
helper = LayerHelper("equal", **locals())
if cond is None:
cond = helper.create_variable_for_type_inference(dtype='bool')
cond.stop_gradient = True
helper.append_op(
type='equal', inputs={'X': [x],
'Y': [y]}, outputs={'Out': [cond]})
return cond
def not_equal(x, y, cond=None):
helper = LayerHelper("not_equal", **locals())
if cond is None:
cond = helper.create_variable_for_type_inference(dtype='bool')
cond.stop_gradient = True
helper.append_op(
type='not_equal', inputs={'X': [x],
'Y': [y]}, outputs={'Out': [cond]})
return cond
def array_read(array, i):
helper = LayerHelper('array_read', **locals())
if not isinstance(
array,
Variable) or array.type != core.VarDesc.VarType.LOD_TENSOR_ARRAY:
raise TypeError("array should be tensor array vairable")
out = helper.create_variable_for_type_inference(dtype=array.dtype)
helper.append_op(
type='read_from_array',
inputs={'X': [array],
'I': [i]},
outputs={'Out': [out]})
return out
def shrink_memory(x, i, table):
helper = LayerHelper('shrink_memory', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='shrink_rnn_memory',
inputs={'X': [x],
'I': [i],
'RankTable': [table]},
outputs={'Out': [out]},
attrs={})
return out
def array_length(array):
helper = LayerHelper('array_length', **locals())
tmp = helper.create_variable_for_type_inference(dtype='int64')
tmp.stop_gradient = True
helper.append_op(
type='lod_array_length', inputs={'X': [array]}, outputs={'Out': [tmp]})
return tmp
class ConditionalBlockGuard(BlockGuard):
def __init__(self, block):
if not isinstance(block, ConditionalBlock):
raise TypeError("block should be conditional block")
super(ConditionalBlockGuard, self).__init__(block.helper.main_program)
self.block = block
def __enter__(self):
return super(ConditionalBlockGuard, self).__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
self.block.complete()
return super(ConditionalBlockGuard, self).__exit__(exc_type, exc_val,
exc_tb)
class ConditionalBlock(object):
def __init__(self, inputs, is_scalar_condition=False, name=None):
for each_input in inputs:
if not isinstance(each_input, Variable):
raise TypeError("Each input should be variable")
self.inputs = inputs
self.is_scalar_condition = is_scalar_condition
self.helper = LayerHelper('conditional_block', name=name)
def block(self):
return ConditionalBlockGuard(self)
def complete(self):
inside_block = self.helper.main_program.current_block()
parent_block = self.helper.main_program.block(inside_block.parent_idx)
intermediate = set()
params = set()
for each_op in inside_block.ops:
assert isinstance(each_op, Operator)
for iname in each_op.input_names:
for in_var_name in each_op.input(iname):
if in_var_name not in intermediate:
params.add(in_var_name)
for oname in each_op.output_names:
for out_var_name in each_op.output(oname):
intermediate.add(out_var_name)
input_set = set([ipt.name for ipt in self.inputs])
param_list = [
parent_block._var_recursive(each_name) for each_name in params
]
out_list = []
for inner_out_name in intermediate:
inner_var = parent_block._find_var_recursive(inner_out_name)
if inner_var:
out_list.append(inner_var)
step_scope = parent_block.create_var(
type=core.VarDesc.VarType.STEP_SCOPES)
conditional_block_op = parent_block.append_op(
type='conditional_block',
inputs={
'Cond': self.inputs,
'Input': param_list,
},
outputs={'Out': out_list,
'Scope': [step_scope]},
attrs={
'sub_block': inside_block,
'is_scalar_condition': self.is_scalar_condition
})
if self.need_append_conditional_block_grad(inside_block):
self.append_conditional_block_grad(parent_block, inside_block,
conditional_block_op)
def need_append_conditional_block_grad(self, inside_block):
grad_sub_block_idx = inside_block.backward_block_idx
return grad_sub_block_idx != -1
def append_conditional_block_grad(self, parent_block, inside_block,
conditional_block_op):
grad_sub_block_idx = inside_block.backward_block_idx
grad_sub_block = self.helper.main_program.block(grad_sub_block_idx)
intermediate = set()
params = set()
for each_op in grad_sub_block.ops:
assert isinstance(each_op, Operator)
for iname in each_op.input_names:
for in_var_name in each_op.input(iname):
if in_var_name not in intermediate:
params.add(in_var_name)
for oname in each_op.output_names:
for out_var_name in each_op.output(oname):
intermediate.add(out_var_name)
param_list = []
for inner_input_name in params:
inner_var = parent_block._find_var_recursive(inner_input_name)
if inner_var:
param_list.append(cpt.to_text(inner_var.name))
grad_op_desc, op_grad_to_var = core.get_grad_op_desc(
conditional_block_op.desc,
cpt.to_text(set()), [grad_sub_block.desc])
op_role_attr_name = core.op_proto_and_checker_maker.kOpRoleAttrName()
backward = core.op_proto_and_checker_maker.OpRole.Backward
new_op_desc = parent_block.desc.append_op()
new_op_desc.copy_from(grad_op_desc[0])
new_op_desc._set_attr(op_role_attr_name, backward)
new_op_desc.set_input('Input', param_list)
new_op_desc.set_output('Input@GRAD',
[param + "@GRAD" for param in param_list])
new_vars = set()
for grad_var_name in new_op_desc.output_arg_names():
if grad_sub_block.desc.has_var_recursive(
cpt.to_bytes(grad_var_name)
) or grad_var_name == core.empty_var_name():
continue
grad_sub_block.desc.var(cpt.to_bytes(grad_var_name))
new_vars.add(grad_var_name)
if grad_var_name not in op_grad_to_var:
continue
new_op_desc.infer_var_type(grad_sub_block.desc)
new_op_desc.infer_shape(grad_sub_block.desc)
for arg in new_op_desc.output_arg_names():
if arg in new_vars:
_infer_var_data_type_shape_(arg, grad_sub_block)
self.helper.main_program._sync_with_cpp()
def copy_var_to_parent_block(var, layer_helper):
if var is None:
return None
prog = layer_helper.main_program
parent_idx = prog.current_block().parent_idx
assert parent_idx >= 0, "Got wrong parent block index when assigning var to parent scope in control_flow"
parent_block = prog.block(parent_idx)
parent_block_var = parent_block.create_var(
dtype=var.dtype, shape=var.shape, type=var.type)
assign(var, parent_block_var)
return parent_block_var
def cond(pred, true_fn=None, false_fn=None, name=None):
helper = LayerHelper('cond', **locals())
true_output = None
false_output = None
copy_to_parent_func = lambda var: copy_var_to_parent_block(var, helper)
if true_fn is not None:
if not callable(true_fn):
raise TypeError("The true_fn in cond must be callable")
true_cond_block = ConditionalBlock([pred], is_scalar_condition=True)
with true_cond_block.block():
origin_true_output = true_fn()
if origin_true_output is not None:
true_output = map_structure(copy_to_parent_func,
origin_true_output)
if false_fn is not None:
if not callable(false_fn):
raise TypeError("The false_fn in cond must be callable")
false_cond_block = ConditionalBlock(
[logical_not(pred)], is_scalar_condition=True)
with false_cond_block.block():
origin_false_output = false_fn()
if origin_false_output is not None:
false_output = map_structure(copy_to_parent_func,
origin_false_output)
if true_output is None and false_output is None:
return None
if true_output is None:
raise ValueError(
"Incompatible return values of true_fn and false_fn in cond: "
"true_fn returns None while false_fn returns non-None")
if false_output is None:
raise ValueError(
"Incompatible return values of true_fn and false_fn in cond: "
"true_fn returns non-None while false_fn returns None")
try:
assert_same_structure(true_output, false_output, check_types=False)
except ValueError as e:
raise ValueError(
"Incompatible return values of true_fn and false_fn in cond: {}".
format(e))
mask = cast(pred, dtype='int32')
merge_func = lambda false_var, true_var : select_input([false_var, true_var], mask)
merged_output = map_structure(merge_func, false_output, true_output)
return merged_output
def _error_message(what, arg_name, op_name, right_value, error_value):
error_message = "{what} of '{arg_name}' in Op({op_name}) must be " \
"{right_value}, but received: {error_value}.".format(
what=what,
arg_name=arg_name,
op_name=op_name,
right_value=right_value,
error_value=error_value)
return error_message
def case(pred_fn_pairs, default=None, name=None):
helper = LayerHelper('case', **locals())
def _case_check_args(pred_fn_pairs, default):
if not isinstance(pred_fn_pairs, (list, tuple)):
raise TypeError(
_error_message("The type", "pred_fn_pairs", "case",
"list or tuple", type(pred_fn_pairs)))
for pred_fn in pred_fn_pairs:
if not isinstance(pred_fn, tuple):
raise TypeError(
_error_message("The elements' type", "pred_fn_pairs",
"case", "tuple", type(pred_fn)))
if len(pred_fn) != 2:
raise TypeError(
_error_message("The tuple's size", "pred_fn_pairs", "case",
"2", str(len(pred_fn)) + "-tuple"))
pred, fn = pred_fn
if not isinstance(pred, Variable):
raise TypeError(
_error_message("The pred's type", "pred_fn_pairs", "case",
"boolean Variable", type(pred)))
if not callable(fn):
raise TypeError(
"The fn for {} of pred_fn_pairs in Op(case) must"
" be callable.".format(pred.name))
if default is None:
default_index = len(pred_fn_pairs) - 1 # pick the last one
default = pred_fn_pairs[default_index][1]
pred_fn_pairs = pred_fn_pairs[:default_index]
elif not callable(default):
raise TypeError("The default in Op(case) must be callable.")
return pred_fn_pairs, default
pred_fn_pairs, default = _case_check_args(pred_fn_pairs, default)
false_fn = default
for pred, true_fn in reversed(pred_fn_pairs):
false_fn = partial(cond, pred=pred, true_fn=true_fn, false_fn=false_fn)
final_fn = false_fn
return final_fn()
class Switch(object):
def __init__(self, name=None):
self.helper = LayerHelper('switch', name=name)
self.inside_scope = False
self.pre_not_conditions = []
def case(self, condition):
if not self.inside_scope:
raise ValueError("case should be called inside with")
if len(self.pre_not_conditions) == 0:
cond_block = ConditionalBlock([condition], is_scalar_condition=True)
not_cond = logical_not(x=condition)
self.pre_not_conditions.append(not_cond)
else:
pre_cond_num = len(self.pre_not_conditions)
pre_not_cond = self.pre_not_conditions[pre_cond_num - 1]
new_not_cond = logical_and(
x=pre_not_cond, y=logical_not(x=condition))
self.pre_not_conditions.append(new_not_cond)
cond_block = ConditionalBlock(
[logical_and(
x=pre_not_cond, y=condition)],
is_scalar_condition=True)
return ConditionalBlockGuard(cond_block)
def default(self):
pre_cond_num = len(self.pre_not_conditions)
if pre_cond_num == 0:
raise ValueError("there should be at least one condition")
cond_block = ConditionalBlock(
[self.pre_not_conditions[pre_cond_num - 1]],
is_scalar_condition=True)
return ConditionalBlockGuard(cond_block)
def __enter__(self):
self.inside_scope = True
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.inside_scope = False
if exc_type is not None:
return False # re-raise exception
return True
class IfElseBlockGuard(object):
def __init__(self, is_true, ifelse):
if not isinstance(ifelse, IfElse):
raise TypeError("ifelse must be an instance of IfElse class")
if ifelse.status != IfElse.OUT_IF_ELSE_BLOCKS:
raise ValueError("You cannot invoke IfElse.block() inside a block")
self.is_true = is_true
self.ie = ifelse
if is_true:
self.cond_block = ifelse.conditional_true_block
else:
self.cond_block = ifelse.conditional_false_block
if not isinstance(self.cond_block, ConditionalBlock):
raise TypeError("Unexpected situation")
self.cond_block = self.cond_block.block()
def __enter__(self):
self.ie.status = IfElse.IN_IF_ELSE_TRUE_BLOCKS if self.is_true else IfElse.IN_IF_ELSE_FALSE_BLOCKS
self.cond_block.__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.cond_block.__exit__(exc_type, exc_val, exc_tb):
# re-raise inside exception
return False
if len(self.ie.output_table[1 if self.is_true else 0]) == 0:
raise ValueError("Must set output inside block")
self.ie.status = IfElse.OUT_IF_ELSE_BLOCKS
class IfElse(object):
OUT_IF_ELSE_BLOCKS = 0
IN_IF_ELSE_TRUE_BLOCKS = 1
IN_IF_ELSE_FALSE_BLOCKS = 2
def __init__(self, cond, name=None):
if not isinstance(cond, Variable):
raise TypeError("cond must be a Variable")
self.helper = LayerHelper('ifelse', name=name)
self.cond = cond
self.input_table = {}
self.status = IfElse.OUT_IF_ELSE_BLOCKS
self.conditional_true_block = ConditionalBlock(inputs=[self.cond])
self.conditional_false_block = ConditionalBlock(inputs=[self.cond])
self.output_table = ([], []) # (true_outs, false_outs)
def input(self, x):
if self.status == IfElse.OUT_IF_ELSE_BLOCKS:
raise ValueError("input must in true/false blocks")
if id(x) not in self.input_table:
parent_block = self._parent_block()
out_true = parent_block.create_var(
name=unique_name.generate_with_ignorable_key('ifelse_input' +
self.helper.name),
dtype=x.dtype)
out_false = parent_block.create_var(
name=unique_name.generate_with_ignorable_key('ifelse_input' +
self.helper.name),
dtype=x.dtype)
parent_block.append_op(
type='split_lod_tensor',
inputs={
'X': x,
'Mask': self.cond,
},
outputs={'OutTrue': out_true,
'OutFalse': out_false},
attrs={'level': 0})
self.input_table[id(x)] = (out_true, out_false)
else:
out_true, out_false = self.input_table[id(x)]
if self.status == IfElse.IN_IF_ELSE_TRUE_BLOCKS:
return out_true
else:
return out_false
def _parent_block(self):
current_block = self.helper.main_program.current_block()
return self.helper.main_program.block(current_block.parent_idx)
def true_block(self):
return IfElseBlockGuard(True, self)
def false_block(self):
return IfElseBlockGuard(False, self)
def output(self, *outs):
if self.status == self.OUT_IF_ELSE_BLOCKS:
raise ValueError("output can only be invoked in the sub-block")
out_table = self.output_table[1 if self.status ==
self.IN_IF_ELSE_TRUE_BLOCKS else 0]
parent_block = self._parent_block()
for each_out in outs:
if not isinstance(each_out, Variable):
raise TypeError("Each output should be a variable")
# create outside tensor
outside_out = parent_block.create_var(
name=unique_name.generate_with_ignorable_key("_".join(
[self.helper.name, 'output'])),
dtype=each_out.dtype)
out_table.append(outside_out)
# assign local var to outside
assign(input=each_out, output=outside_out)
def __call__(self):
if self.status != self.OUT_IF_ELSE_BLOCKS:
raise ValueError("IfElse::__call__ must be out of sub-block")
false_len, true_len = list(map(len, self.output_table))
if false_len == 0 and true_len == 0:
raise ValueError("Must invoke true_block/false_block before "
"__call__")
elif false_len != true_len and false_len != 0 and true_len != 0:
raise ValueError("The output side must be same")
elif false_len == 0 or true_len == 0:
return self.output_table[0 if false_len != 0 else 1]
# else none of false_len/true_len is zero
# merge together
rlist = []
for false_var, true_var in zip(*self.output_table):
rlist.append(
merge_lod_tensor(
in_true=true_var,
in_false=false_var,
mask=self.cond,
x=self.cond,
level=0))
return rlist
class DynamicRNN(object):
BEFORE_RNN = 0
IN_RNN = 1
AFTER_RNN = 2
def __init__(self, name=None):
self.helper = LayerHelper('dynamic_rnn', name=name)
self.status = DynamicRNN.BEFORE_RNN
self.lod_rank_table = None
self.max_seq_len = None
self.step_idx = None
self.zero_idx = None
self.mem_dict = dict()
self.output_array = []
self.outputs = []
self.cond = self.helper.create_variable_for_type_inference(dtype='bool')
self.cond.stop_gradient = False
self.while_op = While(self.cond)
self.input_array = []
self.mem_link = []
def step_input(self, x, level=0):
self._assert_in_rnn_block_("step_input")
if not isinstance(x, Variable):
raise TypeError(
"step_input() can only take a Variable as its input.")
parent_block = self._parent_block_()
if self.lod_rank_table is None:
self.lod_rank_table = parent_block.create_var(
name=unique_name.generate('lod_rank_table'),
type=core.VarDesc.VarType.LOD_RANK_TABLE)
self.lod_rank_table.stop_gradient = True
parent_block.append_op(
type='lod_rank_table',
inputs={"X": x},
outputs={"Out": self.lod_rank_table},
attrs={"level": level})
self.max_seq_len = parent_block.create_var(
name=unique_name.generate('dynamic_rnn_max_seq_len'),
dtype='int64')
self.max_seq_len.stop_gradient = False
parent_block.append_op(
type='max_sequence_len',
inputs={'RankTable': self.lod_rank_table},
outputs={"Out": self.max_seq_len})
self.cond.stop_gradient = True
parent_block.append_op(
type='less_than',
inputs={'X': self.step_idx,
'Y': self.max_seq_len},
outputs={'Out': self.cond},
attrs={'force_cpu': True})
input_array = parent_block.create_var(
name=unique_name.generate('dynamic_rnn_input_array'),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=x.dtype)
self.input_array.append((input_array, x.dtype))
parent_block.append_op(
type='lod_tensor_to_array',
inputs={'X': x,
'RankTable': self.lod_rank_table},
outputs={'Out': input_array})
return array_read(array=input_array, i=self.step_idx)
def static_input(self, x):
self._assert_in_rnn_block_("static_input")
if not isinstance(x, Variable):
raise TypeError(
"static_input() can only take a Variable as its input")
if self.lod_rank_table is None:
raise RuntimeError(
"static_input() must be called after step_input().")
parent_block = self._parent_block_()
x_reordered = parent_block.create_var(
name=unique_name.generate("dynamic_rnn_static_input_reordered"),
type=core.VarDesc.VarType.LOD_TENSOR,
dtype=x.dtype)
parent_block.append_op(
type='reorder_lod_tensor_by_rank',
inputs={'X': [x],
'RankTable': [self.lod_rank_table]},
outputs={'Out': [x_reordered]})
return shrink_memory(x_reordered, self.step_idx, self.lod_rank_table)
@signature_safe_contextmanager
def block(self):
if self.status != DynamicRNN.BEFORE_RNN:
raise ValueError("rnn.block() can only be invoke once")
self.step_idx = fill_constant(
shape=[1], dtype='int64', value=0, force_cpu=True)
self.step_idx.stop_gradient = False
self.status = DynamicRNN.IN_RNN
with self.while_op.block():
yield
increment(x=self.step_idx, value=1.0, in_place=True)
for new_mem, mem_array in self.mem_link:
array_write(x=new_mem, i=self.step_idx, array=mem_array)
less_than(
x=self.step_idx,
y=self.max_seq_len,
force_cpu=True,
cond=self.cond)
self.status = DynamicRNN.AFTER_RNN
for each_array in self.output_array:
self.outputs.append(
array_to_lod_tensor(
x=each_array, table=self.lod_rank_table))
def __call__(self, *args, **kwargs):
if self.status != DynamicRNN.AFTER_RNN:
raise ValueError(("Output of the dynamic RNN can only be visited "
"outside the rnn block."))
if len(self.outputs) == 1:
return self.outputs[0]
else:
return self.outputs
def memory(self,
init=None,
shape=None,
value=0.0,
need_reorder=False,
dtype='float32'):
self._assert_in_rnn_block_('memory')
self._init_zero_idx_()
if init is not None:
if not isinstance(init, Variable):
raise TypeError(
"The input arg `init` of memory() must be a Variable")
parent_block = self._parent_block_()
init_tensor = init
if need_reorder == True:
if self.lod_rank_table is None:
raise ValueError(
'If set need_reorder to True, make sure step_input be '
'invoked before '
'memory(init=init, need_reordered=True, ...).')
init_reordered = parent_block.create_var(
name=unique_name.generate('dynamic_rnn_mem_init_reordered'),
type=core.VarDesc.VarType.LOD_TENSOR,
dtype=init.dtype)
parent_block.append_op(
type='reorder_lod_tensor_by_rank',
inputs={
'X': [init_tensor],
'RankTable': [self.lod_rank_table]
},
outputs={'Out': [init_reordered]})
init_tensor = init_reordered
mem_array = parent_block.create_var(
name=unique_name.generate('dynamic_rnn_mem_array'),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=init.dtype)
parent_block.append_op(
type='write_to_array',
inputs={'X': init_tensor,
'I': self.zero_idx},
outputs={'Out': mem_array})
retv = array_read(array=mem_array, i=self.step_idx)
retv = shrink_memory(
x=retv, i=self.step_idx, table=self.lod_rank_table)
self.mem_dict[retv.name] = mem_array
return retv
else:
if len(self.input_array) == 0:
raise ValueError(
"step_input should be invoked before memory(shape=..., value=...)"
)
parent_block = self._parent_block_()
init = parent_block.create_var(
name=unique_name.generate('mem_init'), dtype=dtype)
arr, dtype = self.input_array[0]
in0 = parent_block.create_var(
name=unique_name.generate('in0'), dtype=dtype)
parent_block.append_op(
type='read_from_array',
inputs={'X': [arr],
'I': [self.zero_idx]},
outputs={'Out': [in0]})
parent_block.append_op(
type='fill_constant_batch_size_like',
inputs={'Input': [in0]},
outputs={'Out': [init]},
attrs={
'shape': [-1] + shape,
'value': float(value),
'dtype': init.dtype
})
return self.memory(init=init)
def update_memory(self, ex_mem, new_mem):
self._assert_in_rnn_block_('update_memory')
if not isinstance(ex_mem, Variable):
raise TypeError("The input arg `ex_mem` of update_memory() must "
"be a Variable")
if not isinstance(new_mem, Variable):
raise TypeError("The input arg `new_mem` of update_memory() must "
"be a Variable")
mem_array = self.mem_dict.get(ex_mem.name, None)
if mem_array is None:
raise ValueError("Please invoke memory before update_memory")
if self.lod_rank_table is None:
raise ValueError("Please invoke step_input before update_memory")
self.mem_link.append((new_mem, mem_array))
def output(self, *outputs):
self._assert_in_rnn_block_('output')
parent_block = self._parent_block_()
for each in outputs:
outside_array = parent_block.create_var(
name=unique_name.generate_with_ignorable_key("_".join(
[self.helper.name, "output_array", each.name])),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=each.dtype)
array_write(x=each, i=self.step_idx, array=outside_array)
self.output_array.append(outside_array)
def _init_zero_idx_(self):
if self.zero_idx is None:
parent_block = self._parent_block_()
self.zero_idx = parent_block.create_var(
name=unique_name.generate('zero_idx'), dtype='int64')
parent_block.append_op(
type='fill_constant',
inputs={},
outputs={'Out': [self.zero_idx]},
attrs={
'shape': [1],
'dtype': self.zero_idx.dtype,
'value': float(0),
'force_cpu': True
})
def _parent_block_(self):
prog = self.helper.main_program
parent_idx = prog.current_block().parent_idx
assert parent_idx >= 0
parent_block = prog.block(parent_idx)
return parent_block
def _assert_in_rnn_block_(self, method):
if self.status != DynamicRNN.IN_RNN:
raise ValueError("{0} can only be invoked inside rnn block.".format(
method))
def switch_case(branch_index, branch_fns, default=None, name=None):
helper = LayerHelper('switch_case', **locals())
def _check_args(branch_index, branch_fns, default):
if not isinstance(branch_index, Variable):
raise TypeError(
_error_message("The type", "branch_index", "switch_case",
"Variable", type(branch_index)))
if convert_dtype(branch_index.dtype) not in ["uint8", "int32", "int64"]:
raise TypeError(
_error_message("The data type", "branch_index", "switch_case",
"uint8, int32 or int64",
convert_dtype(branch_index.dtype)))
if convert_dtype(branch_index.dtype) != "int64":
branch_index = cast(branch_index, "int64")
if not isinstance(branch_fns, (list, tuple, dict)):
raise TypeError(
_error_message("The type", "branch_fns", "switch_case",
"dict, tuple or list", type(branch_fns)))
branch_fns = branch_fns.items() if isinstance(branch_fns,
dict) else branch_fns
branch_fns = list(enumerate(branch_fns)) if all(
callable(fn) for fn in branch_fns) else branch_fns
keys_of_fns = []
for index_fn_pair in branch_fns:
if not isinstance(index_fn_pair, tuple):
raise TypeError(
_error_message("The elements' type", "branch_fns",
"switch_case", "tuple", type(branch_fns)))
if len(index_fn_pair) != 2:
raise TypeError(
_error_message("The tuple's size", "branch_fns",
"switch_case", "2",
str(len(index_fn_pair)) + "-tuple"))
key, fn = index_fn_pair
if not isinstance(key, int):
raise TypeError(
_error_message("The key's type", "branch_fns",
"switch_case", "int", type(key)))
if key in keys_of_fns:
raise ValueError(
"The key in 'branch_fns' must be unique, but '{}' appears more than once.".
format(key))
else:
keys_of_fns.append(key)
if not callable(fn):
raise TypeError(
_error_message("The type of function for key {}".format(
key), "branch_fns", "switch_case", "callable", type(
fn)))
if default is None:
default = sorted(branch_fns)[-1][1]
branch_fns = sorted(branch_fns)[:-1]
elif not callable(default):
raise TypeError("The default in Op(case) must be callable.")
pred_fn_pairs = []
for index, fn in branch_fns:
new_index = fill_constant(shape=[1], dtype="int64", value=index)
pred = equal(branch_index, new_index)
pred_fn_pairs.append((pred, fn))
return pred_fn_pairs, default
pred_fn_pairs, default = _check_args(branch_index, branch_fns, default)
false_fn = default
for pred, true_fn in pred_fn_pairs:
false_fn = partial(cond, pred=pred, true_fn=true_fn, false_fn=false_fn)
final_fn = false_fn
return final_fn()
@templatedoc()
def reorder_lod_tensor_by_rank(x, rank_table):
helper = LayerHelper('reorder_lod_tensor_by_rank', **locals())
helper.is_instance('x', Variable)
helper.is_instance('rank_table', Variable)
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='reorder_lod_tensor_by_rank',
inputs={'X': [x],
'RankTable': [rank_table]},
outputs={'Out': [out]})
return out
def is_empty(x, cond=None):
helper = LayerHelper("is_empty", **locals())
if cond is None:
cond = helper.create_variable_for_type_inference(dtype='bool')
cond.stop_gradient = True
elif not isinstance(cond, Variable):
raise TypeError("cond takes a variable")
elif cond.dtype != 'bool':
raise TypeError("The data type of cond must be bool")
helper.append_op(
type='is_empty', inputs={'X': [x]}, outputs={'Out': [cond]})
return cond
| true | true |
f73237cb25ef0fec908c5cdf07d7c7b7b9907d50 | 332 | py | Python | ogreserver/forms/search.py | oii/ogreserver | 942d8ee612206fb094f04b3ff976187abebf3069 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | ogreserver/forms/search.py | oii/ogreserver | 942d8ee612206fb094f04b3ff976187abebf3069 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | ogreserver/forms/search.py | oii/ogreserver | 942d8ee612206fb094f04b3ff976187abebf3069 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | from __future__ import absolute_import
from __future__ import unicode_literals
from flask_wtf import FlaskForm
from wtforms import TextField, BooleanField
class SearchForm(FlaskForm):
s = TextField('s')
is_curated = BooleanField('Curated Only?', default=True)
is_fiction = BooleanField('Fiction Only?', default=True)
| 27.666667 | 60 | 0.783133 | from __future__ import absolute_import
from __future__ import unicode_literals
from flask_wtf import FlaskForm
from wtforms import TextField, BooleanField
class SearchForm(FlaskForm):
s = TextField('s')
is_curated = BooleanField('Curated Only?', default=True)
is_fiction = BooleanField('Fiction Only?', default=True)
| true | true |
f732394175903cc275173763bb703893ecd75976 | 369 | py | Python | tests/test_initial_data.py | luiscberrocal/django-acp-calendar | 7251d7cbb1ba16983bbc3ba9af6178eb31408bee | [
"BSD-3-Clause"
] | 1 | 2016-10-05T05:17:35.000Z | 2016-10-05T05:17:35.000Z | tests/test_initial_data.py | luiscberrocal/django-acp-calendar | 7251d7cbb1ba16983bbc3ba9af6178eb31408bee | [
"BSD-3-Clause"
] | 17 | 2016-09-30T13:43:20.000Z | 2021-06-10T20:44:40.000Z | tests/test_initial_data.py | luiscberrocal/django-acp-calendar | 7251d7cbb1ba16983bbc3ba9af6178eb31408bee | [
"BSD-3-Clause"
] | 6 | 2016-04-11T14:41:44.000Z | 2017-10-20T21:16:39.000Z | from django.test import TestCase
from acp_calendar.initial_data import get_holidays_list
class TestInitialData(TestCase):
def test_get_holidays_list(self):
holidays = get_holidays_list()
self.assertEqual(144, len(holidays))
self.assertEqual('2006-01-01', holidays[0]['date'])
self.assertEqual('2018-12-25', holidays[-1]['date'])
| 28.384615 | 60 | 0.710027 | from django.test import TestCase
from acp_calendar.initial_data import get_holidays_list
class TestInitialData(TestCase):
def test_get_holidays_list(self):
holidays = get_holidays_list()
self.assertEqual(144, len(holidays))
self.assertEqual('2006-01-01', holidays[0]['date'])
self.assertEqual('2018-12-25', holidays[-1]['date'])
| true | true |
f73239a9d45e6658e36f048b96ff430af7d2667e | 7,748 | py | Python | ironicclient/tests/unit/v1/test_driver_shell.py | sapcc/python-ironicclient | 8dcbf5b6d0bc2c2dc3881dbc557e2e403e2fe2b4 | [
"Apache-2.0"
] | null | null | null | ironicclient/tests/unit/v1/test_driver_shell.py | sapcc/python-ironicclient | 8dcbf5b6d0bc2c2dc3881dbc557e2e403e2fe2b4 | [
"Apache-2.0"
] | null | null | null | ironicclient/tests/unit/v1/test_driver_shell.py | sapcc/python-ironicclient | 8dcbf5b6d0bc2c2dc3881dbc557e2e403e2fe2b4 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from ironicclient.common import cliutils
from ironicclient.tests.unit import utils
import ironicclient.v1.driver as v1_driver
import ironicclient.v1.driver_shell as d_shell
class DriverShellTest(utils.BaseTestCase):
def setUp(self):
super(DriverShellTest, self).setUp()
client_mock = mock.MagicMock()
driver_mock = mock.MagicMock(spec=v1_driver.DriverManager)
client_mock.driver = driver_mock
self.client_mock = client_mock
def test_driver_show(self):
actual = {}
fake_print_dict = lambda data, *args, **kwargs: actual.update(data)
with mock.patch.object(cliutils, 'print_dict', fake_print_dict):
driver = object()
d_shell._print_driver_show(driver)
exp = ['hosts', 'name', 'type',
'default_bios_interface', 'default_boot_interface',
'default_console_interface', 'default_deploy_interface',
'default_inspect_interface', 'default_management_interface',
'default_network_interface', 'default_power_interface',
'default_raid_interface', 'default_rescue_interface',
'default_storage_interface', 'default_vendor_interface',
'enabled_bios_interfaces', 'enabled_boot_interfaces',
'enabled_console_interfaces', 'enabled_deploy_interfaces',
'enabled_inspect_interfaces', 'enabled_management_interfaces',
'enabled_network_interfaces', 'enabled_power_interfaces',
'enabled_raid_interfaces', 'enabled_rescue_interfaces',
'enabled_storage_interfaces', 'enabled_vendor_interfaces']
act = actual.keys()
self.assertEqual(sorted(exp), sorted(act))
def test_do_driver_vendor_passthru_with_args(self):
client_mock = self.client_mock
args = mock.MagicMock()
args.driver_name = 'driver_name'
args.http_method = 'POST'
args.method = 'method'
args.arguments = [['arg1=val1', 'arg2=val2']]
d_shell.do_driver_vendor_passthru(client_mock, args)
client_mock.driver.vendor_passthru.assert_called_once_with(
args.driver_name, args.method, http_method=args.http_method,
args={'arg1': 'val1', 'arg2': 'val2'})
def test_do_driver_vendor_passthru_without_args(self):
client_mock = self.client_mock
args = mock.MagicMock()
args.driver_name = 'driver_name'
args.http_method = 'POST'
args.method = 'method'
args.arguments = [[]]
d_shell.do_driver_vendor_passthru(client_mock, args)
client_mock.driver.vendor_passthru.assert_called_once_with(
args.driver_name, args.method, args={},
http_method=args.http_method)
def test_do_driver_properties(self):
client_mock = self.client_mock
args = mock.MagicMock()
args.driver_name = 'driver_name'
args.json = False
d_shell.do_driver_properties(client_mock, args)
client_mock.driver.properties.assert_called_once_with("driver_name")
@mock.patch('ironicclient.common.cliutils.print_dict', autospec=True)
def test_do_driver_properties_with_wrap_default(self, mock_print_dict):
client_mock = self.client_mock
client_mock.driver.properties.return_value = {
'foo': 'bar',
'baz': 'qux'}
args = mock.MagicMock()
args.driver_name = 'driver_name'
args.wrap = 0
args.json = False
d_shell.do_driver_properties(client_mock, args)
mock_print_dict.assert_called_with(
{'foo': 'bar', 'baz': 'qux'},
dict_value='Description',
json_flag=False,
wrap=0)
@mock.patch('ironicclient.common.cliutils.print_dict', autospec=True)
def test_do_driver_properties_with_wrap(self, mock_print_dict):
client_mock = self.client_mock
client_mock.driver.properties.return_value = {
'foo': 'bar',
'baz': 'qux'}
args = mock.MagicMock()
args.driver_name = 'driver_name'
args.wrap = 80
args.json = False
d_shell.do_driver_properties(client_mock, args)
mock_print_dict.assert_called_with(
{'foo': 'bar', 'baz': 'qux'},
dict_value='Description',
json_flag=False,
wrap=80)
@mock.patch('ironicclient.common.cliutils.print_dict', autospec=True)
def _test_do_driver_raid_logical_disk(self, print_dict_mock, wrap=0):
cli_mock = self.client_mock
cli_mock.driver.raid_logical_disk_properties.return_value = {
'foo': 'bar'}
args = mock.MagicMock()
args.driver_name = 'driver_name'
args.wrap = wrap
d_shell.do_driver_raid_logical_disk_properties(cli_mock, args)
cli_mock.driver.raid_logical_disk_properties.assert_called_once_with(
"driver_name")
print_dict_mock.assert_called_with(
{'foo': 'bar'},
dict_value='Description',
wrap=wrap)
def test_do_driver_raid_logical_disk_default_wrap(self):
self._test_do_driver_raid_logical_disk()
def test_do_driver_raid_logical_disk_with_wrap(self):
self._test_do_driver_raid_logical_disk(wrap=80)
def test_do_driver_show(self):
client_mock = self.client_mock
args = mock.MagicMock()
args.driver_name = 'fake'
args.json = False
d_shell.do_driver_show(client_mock, args)
client_mock.driver.get.assert_called_once_with('fake')
def test_do_driver_list(self):
client_mock = self.client_mock
args = mock.MagicMock()
args.type = None
args.detail = None
args.json = False
d_shell.do_driver_list(client_mock, args)
client_mock.driver.list.assert_called_once_with(driver_type=None,
detail=None)
def test_do_driver_list_with_type_and_no_detail(self):
client_mock = self.client_mock
args = mock.MagicMock()
args.type = 'classic'
args.detail = False
args.json = False
d_shell.do_driver_list(client_mock, args)
client_mock.driver.list.assert_called_once_with(driver_type='classic',
detail=False)
def test_do_driver_list_with_detail(self):
client_mock = self.client_mock
args = mock.MagicMock()
args.type = None
args.detail = True
args.json = False
d_shell.do_driver_list(client_mock, args)
client_mock.driver.list.assert_called_once_with(driver_type=None,
detail=True)
def test_do_driver_get_vendor_passthru_methods(self):
client_mock = mock.MagicMock()
args = mock.MagicMock()
args.driver_name = 'fake'
d_shell.do_driver_get_vendor_passthru_methods(client_mock, args)
mock_method = client_mock.driver.get_vendor_passthru_methods
mock_method.assert_called_once_with('fake')
| 39.329949 | 78 | 0.657073 |
import mock
from ironicclient.common import cliutils
from ironicclient.tests.unit import utils
import ironicclient.v1.driver as v1_driver
import ironicclient.v1.driver_shell as d_shell
class DriverShellTest(utils.BaseTestCase):
def setUp(self):
super(DriverShellTest, self).setUp()
client_mock = mock.MagicMock()
driver_mock = mock.MagicMock(spec=v1_driver.DriverManager)
client_mock.driver = driver_mock
self.client_mock = client_mock
def test_driver_show(self):
actual = {}
fake_print_dict = lambda data, *args, **kwargs: actual.update(data)
with mock.patch.object(cliutils, 'print_dict', fake_print_dict):
driver = object()
d_shell._print_driver_show(driver)
exp = ['hosts', 'name', 'type',
'default_bios_interface', 'default_boot_interface',
'default_console_interface', 'default_deploy_interface',
'default_inspect_interface', 'default_management_interface',
'default_network_interface', 'default_power_interface',
'default_raid_interface', 'default_rescue_interface',
'default_storage_interface', 'default_vendor_interface',
'enabled_bios_interfaces', 'enabled_boot_interfaces',
'enabled_console_interfaces', 'enabled_deploy_interfaces',
'enabled_inspect_interfaces', 'enabled_management_interfaces',
'enabled_network_interfaces', 'enabled_power_interfaces',
'enabled_raid_interfaces', 'enabled_rescue_interfaces',
'enabled_storage_interfaces', 'enabled_vendor_interfaces']
act = actual.keys()
self.assertEqual(sorted(exp), sorted(act))
def test_do_driver_vendor_passthru_with_args(self):
client_mock = self.client_mock
args = mock.MagicMock()
args.driver_name = 'driver_name'
args.http_method = 'POST'
args.method = 'method'
args.arguments = [['arg1=val1', 'arg2=val2']]
d_shell.do_driver_vendor_passthru(client_mock, args)
client_mock.driver.vendor_passthru.assert_called_once_with(
args.driver_name, args.method, http_method=args.http_method,
args={'arg1': 'val1', 'arg2': 'val2'})
def test_do_driver_vendor_passthru_without_args(self):
client_mock = self.client_mock
args = mock.MagicMock()
args.driver_name = 'driver_name'
args.http_method = 'POST'
args.method = 'method'
args.arguments = [[]]
d_shell.do_driver_vendor_passthru(client_mock, args)
client_mock.driver.vendor_passthru.assert_called_once_with(
args.driver_name, args.method, args={},
http_method=args.http_method)
def test_do_driver_properties(self):
client_mock = self.client_mock
args = mock.MagicMock()
args.driver_name = 'driver_name'
args.json = False
d_shell.do_driver_properties(client_mock, args)
client_mock.driver.properties.assert_called_once_with("driver_name")
@mock.patch('ironicclient.common.cliutils.print_dict', autospec=True)
def test_do_driver_properties_with_wrap_default(self, mock_print_dict):
client_mock = self.client_mock
client_mock.driver.properties.return_value = {
'foo': 'bar',
'baz': 'qux'}
args = mock.MagicMock()
args.driver_name = 'driver_name'
args.wrap = 0
args.json = False
d_shell.do_driver_properties(client_mock, args)
mock_print_dict.assert_called_with(
{'foo': 'bar', 'baz': 'qux'},
dict_value='Description',
json_flag=False,
wrap=0)
@mock.patch('ironicclient.common.cliutils.print_dict', autospec=True)
def test_do_driver_properties_with_wrap(self, mock_print_dict):
client_mock = self.client_mock
client_mock.driver.properties.return_value = {
'foo': 'bar',
'baz': 'qux'}
args = mock.MagicMock()
args.driver_name = 'driver_name'
args.wrap = 80
args.json = False
d_shell.do_driver_properties(client_mock, args)
mock_print_dict.assert_called_with(
{'foo': 'bar', 'baz': 'qux'},
dict_value='Description',
json_flag=False,
wrap=80)
@mock.patch('ironicclient.common.cliutils.print_dict', autospec=True)
def _test_do_driver_raid_logical_disk(self, print_dict_mock, wrap=0):
cli_mock = self.client_mock
cli_mock.driver.raid_logical_disk_properties.return_value = {
'foo': 'bar'}
args = mock.MagicMock()
args.driver_name = 'driver_name'
args.wrap = wrap
d_shell.do_driver_raid_logical_disk_properties(cli_mock, args)
cli_mock.driver.raid_logical_disk_properties.assert_called_once_with(
"driver_name")
print_dict_mock.assert_called_with(
{'foo': 'bar'},
dict_value='Description',
wrap=wrap)
def test_do_driver_raid_logical_disk_default_wrap(self):
self._test_do_driver_raid_logical_disk()
def test_do_driver_raid_logical_disk_with_wrap(self):
self._test_do_driver_raid_logical_disk(wrap=80)
def test_do_driver_show(self):
client_mock = self.client_mock
args = mock.MagicMock()
args.driver_name = 'fake'
args.json = False
d_shell.do_driver_show(client_mock, args)
client_mock.driver.get.assert_called_once_with('fake')
def test_do_driver_list(self):
client_mock = self.client_mock
args = mock.MagicMock()
args.type = None
args.detail = None
args.json = False
d_shell.do_driver_list(client_mock, args)
client_mock.driver.list.assert_called_once_with(driver_type=None,
detail=None)
def test_do_driver_list_with_type_and_no_detail(self):
client_mock = self.client_mock
args = mock.MagicMock()
args.type = 'classic'
args.detail = False
args.json = False
d_shell.do_driver_list(client_mock, args)
client_mock.driver.list.assert_called_once_with(driver_type='classic',
detail=False)
def test_do_driver_list_with_detail(self):
client_mock = self.client_mock
args = mock.MagicMock()
args.type = None
args.detail = True
args.json = False
d_shell.do_driver_list(client_mock, args)
client_mock.driver.list.assert_called_once_with(driver_type=None,
detail=True)
def test_do_driver_get_vendor_passthru_methods(self):
client_mock = mock.MagicMock()
args = mock.MagicMock()
args.driver_name = 'fake'
d_shell.do_driver_get_vendor_passthru_methods(client_mock, args)
mock_method = client_mock.driver.get_vendor_passthru_methods
mock_method.assert_called_once_with('fake')
| true | true |
f73239b4487781d61b40f39c0bd8795ca4336a53 | 484 | py | Python | json_encoder.py | luksurious/faster-teaching | 1493311d5b723ca3f216f537bda8db5907196443 | [
"MIT"
] | 2 | 2020-08-06T13:21:51.000Z | 2021-04-15T04:29:03.000Z | json_encoder.py | luksurious/faster-teaching | 1493311d5b723ca3f216f537bda8db5907196443 | [
"MIT"
] | null | null | null | json_encoder.py | luksurious/faster-teaching | 1493311d5b723ca3f216f537bda8db5907196443 | [
"MIT"
] | null | null | null | import json
import numpy as np
from actions import Actions
class CustomEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, Actions):
return str(obj)
else:
return super(CustomEncoder, self).default(obj)
| 25.473684 | 58 | 0.609504 | import json
import numpy as np
from actions import Actions
class CustomEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, Actions):
return str(obj)
else:
return super(CustomEncoder, self).default(obj)
| true | true |
f73239bc46550b56b6efc3d11e7395420f4d3bb7 | 1,554 | py | Python | src/fonduer/utils/utils_parser.py | SenWu/fonduer | c4f8d95cec97552b34412c6787eb7370ae17424f | [
"MIT"
] | 1 | 2018-11-02T06:02:13.000Z | 2018-11-02T06:02:13.000Z | src/fonduer/utils/utils_parser.py | SenWu/fonduer | c4f8d95cec97552b34412c6787eb7370ae17424f | [
"MIT"
] | null | null | null | src/fonduer/utils/utils_parser.py | SenWu/fonduer | c4f8d95cec97552b34412c6787eb7370ae17424f | [
"MIT"
] | 1 | 2018-09-24T03:27:04.000Z | 2018-09-24T03:27:04.000Z | from typing import List, Optional, Tuple
def build_node(type: str, name: str, content: str) -> str:
"""
Wrap up content in to a html node.
:param type: content type (e.g., doc, section, text, figure)
:type path: str
:param name: content name (e.g., the name of the section)
:type path: str
:param name: actual content
:type path: str
:return: new String with content in html format
"""
if type == "doc":
return f"<html>{content}</html>"
if type == "section":
return f"<section name='{name}'>{content}</section>"
if type == "text":
return f"<p name='{name}'>{content}</p>"
if type == "figure":
return f"<img name='{name}' src='{content}'/>"
raise RuntimeError(f"unknown type")
def column_constructor(
text: str,
name: Optional[str] = None,
type: str = "text",
delim: Optional[str] = None,
) -> List[Tuple[str, str, str]]:
"""
Converts raw content to a list of strutured tuple where each tuple contains
(type, name, content).
:param text: content to be converted ()
:type path: str
:param type: content name (default: None)
:type path: str
:param type: content type (default: text)
:type path: str
:param delim: delimiter to split the content
:type path: str
:return: A list of tuple where each tuple contains
(content type, content name, content)
"""
if delim is None:
return [(type, name, text)]
return [(type, name, content) for content in text.split(delim)]
| 30.470588 | 79 | 0.611326 | from typing import List, Optional, Tuple
def build_node(type: str, name: str, content: str) -> str:
if type == "doc":
return f"<html>{content}</html>"
if type == "section":
return f"<section name='{name}'>{content}</section>"
if type == "text":
return f"<p name='{name}'>{content}</p>"
if type == "figure":
return f"<img name='{name}' src='{content}'/>"
raise RuntimeError(f"unknown type")
def column_constructor(
text: str,
name: Optional[str] = None,
type: str = "text",
delim: Optional[str] = None,
) -> List[Tuple[str, str, str]]:
if delim is None:
return [(type, name, text)]
return [(type, name, content) for content in text.split(delim)]
| true | true |
f7323bb40b17dc7bb6c240025103a25a642c8bc1 | 259 | py | Python | ecdc_status/crime_scene/urls.py | ess-dmsc/ecdc-status | 8057b2995f2404b6eac6b6a723f8776137a71328 | [
"BSD-2-Clause"
] | null | null | null | ecdc_status/crime_scene/urls.py | ess-dmsc/ecdc-status | 8057b2995f2404b6eac6b6a723f8776137a71328 | [
"BSD-2-Clause"
] | null | null | null | ecdc_status/crime_scene/urls.py | ess-dmsc/ecdc-status | 8057b2995f2404b6eac6b6a723f8776137a71328 | [
"BSD-2-Clause"
] | null | null | null | from django.urls import path
from . import views
urlpatterns = [
path('', views.list),
path('random/', views.random_crime_scene),
path('<int:id>/', views.crime_scene),
path('data/<int:id>', views.crime_scene_data, name="crime_scene_data"),
] | 25.9 | 75 | 0.675676 | from django.urls import path
from . import views
urlpatterns = [
path('', views.list),
path('random/', views.random_crime_scene),
path('<int:id>/', views.crime_scene),
path('data/<int:id>', views.crime_scene_data, name="crime_scene_data"),
] | true | true |
f7323bfabed34ca36a6d78e1a86dbd6f1606129a | 1,888 | py | Python | handlers/message.py | cvatricks/VoiceChatPyroBot | 4f45b2d75027e652fe1992369eaec4a7bf925b85 | [
"MIT"
] | 3 | 2021-01-23T07:33:43.000Z | 2021-02-05T18:21:06.000Z | handlers/message.py | Vivektp/VoiceChatPyroBot-1 | e3fda36ee0267a271d79938048a60d7d9ffeb383 | [
"MIT"
] | null | null | null | handlers/message.py | Vivektp/VoiceChatPyroBot-1 | e3fda36ee0267a271d79938048a60d7d9ffeb383 | [
"MIT"
] | 3 | 2020-12-31T12:06:28.000Z | 2021-03-23T16:32:39.000Z | from pyrogram import filters
from pyrogram.handlers import MessageHandler
from helpers import is_youtube
from ytdl import download
import player
from config import LOG_GROUP
async def message(client, message):
if message.text.startswith("/"):
return
if not is_youtube(message.text):
await message.reply_text("This (link) is not valid.")
return
if "list=" in message.text:
await message.reply_text("Send me a video link, not a playlist link.")
return
await message.reply_text("Download scheduled.", quote=True)
download(
(
message.reply_text,
("Downloading...",)
),
(
message.reply_text,
(f"Downloaded and scheduled to play at position {player.q.qsize() + 1}.",)
),
[
player.play,
[
None,
(
message.reply_text,
("Playing...",)
),
(
message.reply_text,
("Finished playing...",)
),
None,
None,
message.from_user.id,
message.from_user.first_name,
[
client.send_message,
[
LOG_GROUP,
"<b>NOW PLAYING</b>\n"
"Title: <a href=\"{}\">{}</a>\n"
"Requested By: <a href=\"tg://user?id={}\">{}</a>"
]
] if LOG_GROUP else None
]
],
message.text,
)
__handlers__ = [
[
MessageHandler(
message,
filters.text
& filters.private
),
2
]
]
| 26.222222 | 87 | 0.423729 | from pyrogram import filters
from pyrogram.handlers import MessageHandler
from helpers import is_youtube
from ytdl import download
import player
from config import LOG_GROUP
async def message(client, message):
if message.text.startswith("/"):
return
if not is_youtube(message.text):
await message.reply_text("This (link) is not valid.")
return
if "list=" in message.text:
await message.reply_text("Send me a video link, not a playlist link.")
return
await message.reply_text("Download scheduled.", quote=True)
download(
(
message.reply_text,
("Downloading...",)
),
(
message.reply_text,
(f"Downloaded and scheduled to play at position {player.q.qsize() + 1}.",)
),
[
player.play,
[
None,
(
message.reply_text,
("Playing...",)
),
(
message.reply_text,
("Finished playing...",)
),
None,
None,
message.from_user.id,
message.from_user.first_name,
[
client.send_message,
[
LOG_GROUP,
"<b>NOW PLAYING</b>\n"
"Title: <a href=\"{}\">{}</a>\n"
"Requested By: <a href=\"tg://user?id={}\">{}</a>"
]
] if LOG_GROUP else None
]
],
message.text,
)
__handlers__ = [
[
MessageHandler(
message,
filters.text
& filters.private
),
2
]
]
| true | true |
f7323c1c4fea94b6dad136f49474d89ef42d1a21 | 1,459 | py | Python | utils/test/testapi/opnfv_testapi/resources/project_models.py | kkltcjk/reporting | 460731b8b2da037159649b02ffed798656dad8a9 | [
"Apache-2.0"
] | null | null | null | utils/test/testapi/opnfv_testapi/resources/project_models.py | kkltcjk/reporting | 460731b8b2da037159649b02ffed798656dad8a9 | [
"Apache-2.0"
] | null | null | null | utils/test/testapi/opnfv_testapi/resources/project_models.py | kkltcjk/reporting | 460731b8b2da037159649b02ffed798656dad8a9 | [
"Apache-2.0"
] | null | null | null | ##############################################################################
# Copyright (c) 2015 Orange
# guyrodrigue.koffi@orange.com / koffirodrigue@gmail.com
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
import models
from opnfv_testapi.tornado_swagger import swagger
@swagger.model()
class ProjectCreateRequest(models.ModelBase):
def __init__(self, name, description=''):
self.name = name
self.description = description
@swagger.model()
class ProjectUpdateRequest(models.ModelBase):
def __init__(self, name='', description=''):
self.name = name
self.description = description
@swagger.model()
class Project(models.ModelBase):
def __init__(self,
name=None, _id=None, description=None, create_date=None):
self._id = _id
self.name = name
self.description = description
self.creation_date = create_date
@swagger.model()
class Projects(models.ModelBase):
"""
@property projects:
@ptype projects: C{list} of L{Project}
"""
def __init__(self):
self.projects = list()
@staticmethod
def attr_parser():
return {'projects': Project}
| 29.77551 | 78 | 0.611378 | true | true | |
f7323c996636da5b03b4665b07a0e5c0fa576a77 | 274 | py | Python | alert_rules/alr_get.py | PaloAltoNetworks/pcs-migration-management | 766c8c861befa92e593b23ad6d248e33f62054bb | [
"ISC"
] | 1 | 2022-03-17T12:51:45.000Z | 2022-03-17T12:51:45.000Z | alert_rules/alr_get.py | PaloAltoNetworks/pcs-migration-management | 766c8c861befa92e593b23ad6d248e33f62054bb | [
"ISC"
] | 2 | 2021-11-03T15:34:40.000Z | 2021-12-14T19:50:20.000Z | alert_rules/alr_get.py | PaloAltoNetworks/pcs-migration-management | 766c8c861befa92e593b23ad6d248e33f62054bb | [
"ISC"
] | 4 | 2021-11-09T17:57:01.000Z | 2022-01-24T17:41:21.000Z | def get_alert_rules(session: object, logger):
'''
Accepts a tenant session object.
Gets all alert rules from a tenant
'''
logger.debug('API - Getting Alert Rules')
res = session.request("GET", "/v2/alert/rule")
data = res.json()
return data | 24.909091 | 50 | 0.638686 | def get_alert_rules(session: object, logger):
logger.debug('API - Getting Alert Rules')
res = session.request("GET", "/v2/alert/rule")
data = res.json()
return data | true | true |
f7323d4c992046056cb702fd74ddd0fe7f0a0f02 | 6,820 | py | Python | CornerNetEngine.py | gordonjun2/CornerNet | d1a8d87903433ddbe0fa8b96c7388b955021e53c | [
"BSD-3-Clause"
] | 2 | 2020-01-22T06:22:16.000Z | 2020-02-10T08:47:20.000Z | CornerNetEngine.py | gordonjun2/CornerNet | d1a8d87903433ddbe0fa8b96c7388b955021e53c | [
"BSD-3-Clause"
] | null | null | null | CornerNetEngine.py | gordonjun2/CornerNet | d1a8d87903433ddbe0fa8b96c7388b955021e53c | [
"BSD-3-Clause"
] | null | null | null | import argparse
import time
import cv2
from config import system_configs
from utils.drawer import Drawer # Import Drawer to add bboxes
import os
import torch
import pprint
import json
import importlib
import numpy as np
import matplotlib
from test.coco_video import kp_detection
from nnet.py_factory_video import NetworkFactory # Import CornerNet Model
from db.detection_video import db_configs # Import 'db' parameters
image_ext = ['jpg', 'jpeg', 'png', 'webp']
video_ext = ['mp4', 'mov', 'avi', 'mkv']
class CornerNetEngine(object):
def __init__(self):
model = "./cache/nnet/CornerNet/CornerNet_500000.pkl"
json_file = "./config/CornerNet.json"
with open(json_file, "r") as f:
configs = json.load(f)
configs["system"]["snapshot_name"] = "CornerNet"
system_configs.update_config(configs["system"]) # Update config.py based on retrieved 'system' parameters
db_configs.update_config(configs["db"])
self.nnet = NetworkFactory()
self.nnet.load_params("500000")
#drawer = Drawer()
self.nnet.cuda()
self.nnet.eval_mode()
def show_image(self, img, score_min = 0.5, save = False):
det_list = list()
start_time = time.time()
detections = kp_detection(img, self.nnet, score_min)
end_time = time.time()
infer_time = end_time - start_time
print("Inference Time:" + str(infer_time) + "s")
for i, det in enumerate(detections):
detection = {
'bbox': [int(det["bbox"][0]), int(det["bbox"][1]), int(det["bbox"][2]), int(det["bbox"][3])],
'class': det["category_id"],
'score': det["score"]
}
det_list.append(detection)
return det_list
def show_video(self, video_file, nnet, drawer, score_min, save = False): # , debug): <--- UNTESTED (Another way of adding bboxes)
cap = cv2.VideoCapture(video_file)
fps = cap.get(cv2.CAP_PROP_FPS)
print("FPS:" + str(fps))
#sample = 0.5 # every <sample> sec take one frame # Use only if you do not want the infer every frame
#sample_num = sample * fps
if not cap.isOpened():
print("Error in opening video stream or file")
frame_count = 0
while cap.isOpened():
ret, frame = cap.read()
if ret:
frame_count += 1
start_time = time.time()
detections = kp_detection(frame, nnet, score_min) # , debug) <--- UNTESTED (Another way of adding bboxes)
end_time = time.time()
infer_time = end_time - start_time
print("Inference Time:" + str(infer_time) + "s")
# print("~~~~~Detections~~~~~")
# print(detections)
#if sample_num%frame_count != 0: # Use only if you do not want the infer every frame
# continue
# do what you want
# TODO get center and corner (nnet)
# TODO user drawer on frame
frame_det = drawer.draw_dets_video(frame, detections, infer_time)
cv2.imshow("Frame", frame_det)
if save:
cv2.imwrite('./Video_Frames/To_Convert/' + str(frame_count) + ".jpg", frame_det)
if cv2.waitKey(25) & 0xFF == ord("q"):
break
else:
break
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
parser = argparse.ArgumentParser("Video Demo")
parser.add_argument("--model", dest="json_file", help="which .json file in ./confg", type=str) # CenterNet-52 or CenterNet-104
parser.add_argument("--testiter", dest="testiter",
help="test at iteration i",
default=None, type=int) # Used to identify pretrained model
parser.add_argument("--file", dest="file_dir", help="video file path", type=str) # Path to video for detection
parser.add_argument("--score", dest="score_min", help="Remove bboxes of those scores < score",
type=float) # Minimise bboxes
parser.add_argument("--save", action="store_true")
#parser.add_argument("--debug", action="store_true")
args = parser.parse_args()
print("Video File:" + str(args.file_dir))
json_file = os.path.join(system_configs.config_dir, args.json_file + ".json")
print("json_file: {}".format(json_file))
with open(json_file, "r") as f:
configs = json.load(f) # Read .json file to retrieve 'system' and 'db' parameters
configs["system"]["snapshot_name"] = args.json_file # Insert model's name into configuration file
system_configs.update_config(configs["system"]) # Update config.py based on retrieved 'system' parameters
db_configs.update_config(configs["db"]) # Update db/base.py based on retrieved 'db' parameters
print("system config...")
pprint.pprint(system_configs.full) # Show 'system' parameters in terminal
print("db config...")
pprint.pprint(db_configs.full) # Show 'db' parameters in terminal
print("loading parameters at iteration: {}".format(args.testiter)) # Show args.testiter in terminal
print("building neural network...")
nnet = NetworkFactory() # Initialise CenterNet's neural network
print("loading parameters...")
nnet.load_params(args.testiter) # To locate CenterNet's pretrained model
drawer = Drawer() # Initialise Drawer to add bboxes in frames later
#nnet.cpu() # Uncomment if using cpu
nnet.cuda() # Comment if using cpu
nnet.eval_mode()
if args.file_dir[args.file_dir.rfind('.') + 1:].lower() in video_ext:
show_video(args.file_dir, nnet, drawer, args.score_min, args.save)
else:
show_image(args.file_dir, nnet, drawer, args.score_min, args.save)
| 42.098765 | 164 | 0.533871 | import argparse
import time
import cv2
from config import system_configs
from utils.drawer import Drawer
import os
import torch
import pprint
import json
import importlib
import numpy as np
import matplotlib
from test.coco_video import kp_detection
from nnet.py_factory_video import NetworkFactory
from db.detection_video import db_configs
image_ext = ['jpg', 'jpeg', 'png', 'webp']
video_ext = ['mp4', 'mov', 'avi', 'mkv']
class CornerNetEngine(object):
def __init__(self):
model = "./cache/nnet/CornerNet/CornerNet_500000.pkl"
json_file = "./config/CornerNet.json"
with open(json_file, "r") as f:
configs = json.load(f)
configs["system"]["snapshot_name"] = "CornerNet"
system_configs.update_config(configs["system"])
db_configs.update_config(configs["db"])
self.nnet = NetworkFactory()
self.nnet.load_params("500000")
self.nnet.cuda()
self.nnet.eval_mode()
def show_image(self, img, score_min = 0.5, save = False):
det_list = list()
start_time = time.time()
detections = kp_detection(img, self.nnet, score_min)
end_time = time.time()
infer_time = end_time - start_time
print("Inference Time:" + str(infer_time) + "s")
for i, det in enumerate(detections):
detection = {
'bbox': [int(det["bbox"][0]), int(det["bbox"][1]), int(det["bbox"][2]), int(det["bbox"][3])],
'class': det["category_id"],
'score': det["score"]
}
det_list.append(detection)
return det_list
def show_video(self, video_file, nnet, drawer, score_min, save = False):
cap = cv2.VideoCapture(video_file)
fps = cap.get(cv2.CAP_PROP_FPS)
print("FPS:" + str(fps))
ret, frame = cap.read()
if ret:
frame_count += 1
start_time = time.time()
detections = kp_detection(frame, nnet, score_min)
end_time = time.time()
infer_time = end_time - start_time
print("Inference Time:" + str(infer_time) + "s")
frame_det = drawer.draw_dets_video(frame, detections, infer_time)
cv2.imshow("Frame", frame_det)
if save:
cv2.imwrite('./Video_Frames/To_Convert/' + str(frame_count) + ".jpg", frame_det)
if cv2.waitKey(25) & 0xFF == ord("q"):
break
else:
break
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
parser = argparse.ArgumentParser("Video Demo")
parser.add_argument("--model", dest="json_file", help="which .json file in ./confg", type=str)
parser.add_argument("--testiter", dest="testiter",
help="test at iteration i",
default=None, type=int)
parser.add_argument("--file", dest="file_dir", help="video file path", type=str)
parser.add_argument("--score", dest="score_min", help="Remove bboxes of those scores < score",
type=float)
parser.add_argument("--save", action="store_true")
args = parser.parse_args()
print("Video File:" + str(args.file_dir))
json_file = os.path.join(system_configs.config_dir, args.json_file + ".json")
print("json_file: {}".format(json_file))
with open(json_file, "r") as f:
configs = json.load(f)
configs["system"]["snapshot_name"] = args.json_file
system_configs.update_config(configs["system"]) # Update config.py based on retrieved 'system' parameters
db_configs.update_config(configs["db"]) # Update db/base.py based on retrieved 'db' parameters
print("system config...")
pprint.pprint(system_configs.full) # Show 'system' parameters in terminal
print("db config...")
pprint.pprint(db_configs.full) # Show 'db' parameters in terminal
print("loading parameters at iteration: {}".format(args.testiter)) # Show args.testiter in terminal
print("building neural network...")
nnet = NetworkFactory() # Initialise CenterNet's neural network
print("loading parameters...")
nnet.load_params(args.testiter)
drawer = Drawer() # Initialise Drawer to add bboxes in frames later
#nnet.cpu() # Uncomment if using cpu
nnet.cuda() # Comment if using cpu
nnet.eval_mode()
if args.file_dir[args.file_dir.rfind('.') + 1:].lower() in video_ext:
show_video(args.file_dir, nnet, drawer, args.score_min, args.save)
else:
show_image(args.file_dir, nnet, drawer, args.score_min, args.save)
| true | true |
f7323d86d734c82e47f24220097e9e149b478eae | 2,663 | py | Python | tests/trees.py | andrewguy9/farmfs | 1cad69237ace53869b044afcb322773acf9bf447 | [
"MIT"
] | 5 | 2015-01-28T19:09:33.000Z | 2022-03-01T07:35:02.000Z | tests/trees.py | andrewguy9/farmfs | 1cad69237ace53869b044afcb322773acf9bf447 | [
"MIT"
] | 22 | 2015-01-01T19:10:28.000Z | 2022-01-18T21:52:39.000Z | tests/trees.py | andrewguy9/farmfs | 1cad69237ace53869b044afcb322773acf9bf447 | [
"MIT"
] | 1 | 2021-05-06T03:39:34.000Z | 2021-05-06T03:39:34.000Z | from farmfs.fs import sep, ROOT, Path, LINK, DIR
from itertools import permutations, combinations, chain, product
from collections import defaultdict
def permute_deep(options):
options = [permutations(options, pick) for pick in range(1,1+len(options))]
return list(chain.from_iterable(options))
def combine_deep(options):
options = [combinations(options, pick) for pick in range(1,1+len(options))]
return list(chain.from_iterable(options))
def orphans(paths):
accum = set()
for path in paths:
accum.add(path)
parent = path.parent()
if path != ROOT and parent not in accum:
yield path
def has_orphans(paths):
return len(list(orphans(paths))) > 0
def no_orphans(paths):
return not has_orphans(paths)
def tree_shapes(names):
paths = generate_paths(names)
shapes = combine_deep(paths)
return filter(no_orphans, shapes)
def generate_trees(segments, csums):
shapes = tree_shapes(segments)
trees = list(chain(*list(map(lambda tree: makeTreeOptions(tree, csums), shapes))))
return trees
def permuteOptions(seq, options):
optionSeq = [options[item] for item in seq]
return product(*optionSeq)
def makeTreeOptions(tree, csums):
return permuteOptions(tree, makeTreeOptionDict(tree, csums))
#TODO we are generating Path here, but keySnap needs to be tolerant of that. It wants BaseString
def generate_paths(names):
return list(map(Path, ["/"]+list(map(lambda segs: "/"+"/".join(segs), permute_deep(names)))))
def makeTreeOptionDict(paths, csums):
ppaths = parents(paths)
assert ROOT in ppaths
lpaths = leaves(paths)
dirPaths = ppaths.union(lpaths)
linkPaths = lpaths
dirCombos = makeDirectoryPermutations(dirPaths)
linkCombos = makeLinkPermutations(linkPaths, csums)
combined = {path: dirCombos[path] + linkCombos[path] for path in paths}
return combined
def parents(paths):
ppaths = set([ROOT]).union(map(lambda p: p.parent(), paths))
return ppaths
def leaves(paths):
ppaths = parents(paths)
lpaths = set(paths).difference(ppaths)
return lpaths
def makeLinkPermutations(paths, csum_options):
path_csum = product(paths, csum_options)
links = {path:
list(map(lambda csum: makeLink(path, csum), csum_options))
for path in paths}
return defaultdict(list, links)
def makeDirectoryPermutations(paths):
dirs = {path: [makeDir(path)] for path in paths}
return defaultdict(list, dirs)
def makeDir(path):
return {"path": path, "type": DIR}
def makeLink(path, csum):
assert isinstance(path, Path)
return {"path": path, "csum": csum, "type": LINK}
| 30.965116 | 97 | 0.699211 | from farmfs.fs import sep, ROOT, Path, LINK, DIR
from itertools import permutations, combinations, chain, product
from collections import defaultdict
def permute_deep(options):
options = [permutations(options, pick) for pick in range(1,1+len(options))]
return list(chain.from_iterable(options))
def combine_deep(options):
options = [combinations(options, pick) for pick in range(1,1+len(options))]
return list(chain.from_iterable(options))
def orphans(paths):
accum = set()
for path in paths:
accum.add(path)
parent = path.parent()
if path != ROOT and parent not in accum:
yield path
def has_orphans(paths):
return len(list(orphans(paths))) > 0
def no_orphans(paths):
return not has_orphans(paths)
def tree_shapes(names):
paths = generate_paths(names)
shapes = combine_deep(paths)
return filter(no_orphans, shapes)
def generate_trees(segments, csums):
shapes = tree_shapes(segments)
trees = list(chain(*list(map(lambda tree: makeTreeOptions(tree, csums), shapes))))
return trees
def permuteOptions(seq, options):
optionSeq = [options[item] for item in seq]
return product(*optionSeq)
def makeTreeOptions(tree, csums):
return permuteOptions(tree, makeTreeOptionDict(tree, csums))
def generate_paths(names):
return list(map(Path, ["/"]+list(map(lambda segs: "/"+"/".join(segs), permute_deep(names)))))
def makeTreeOptionDict(paths, csums):
ppaths = parents(paths)
assert ROOT in ppaths
lpaths = leaves(paths)
dirPaths = ppaths.union(lpaths)
linkPaths = lpaths
dirCombos = makeDirectoryPermutations(dirPaths)
linkCombos = makeLinkPermutations(linkPaths, csums)
combined = {path: dirCombos[path] + linkCombos[path] for path in paths}
return combined
def parents(paths):
ppaths = set([ROOT]).union(map(lambda p: p.parent(), paths))
return ppaths
def leaves(paths):
ppaths = parents(paths)
lpaths = set(paths).difference(ppaths)
return lpaths
def makeLinkPermutations(paths, csum_options):
path_csum = product(paths, csum_options)
links = {path:
list(map(lambda csum: makeLink(path, csum), csum_options))
for path in paths}
return defaultdict(list, links)
def makeDirectoryPermutations(paths):
dirs = {path: [makeDir(path)] for path in paths}
return defaultdict(list, dirs)
def makeDir(path):
return {"path": path, "type": DIR}
def makeLink(path, csum):
assert isinstance(path, Path)
return {"path": path, "csum": csum, "type": LINK}
| true | true |
f7323e093756717649d847aad33677262eb94277 | 12,590 | py | Python | cirq/google/devices/serializable_device.py | abhik-99/Cirq | d244bf71ba78cee461bfd83a5edcf62dbbc5b3ca | [
"Apache-2.0"
] | null | null | null | cirq/google/devices/serializable_device.py | abhik-99/Cirq | d244bf71ba78cee461bfd83a5edcf62dbbc5b3ca | [
"Apache-2.0"
] | null | null | null | cirq/google/devices/serializable_device.py | abhik-99/Cirq | d244bf71ba78cee461bfd83a5edcf62dbbc5b3ca | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Device object for converting from device specification protos"""
from typing import (Any, Callable, cast, Dict, Iterable, Optional, List, Set,
Tuple, Type, TYPE_CHECKING, FrozenSet)
from cirq import circuits, devices
from cirq.google import serializable_gate_set
from cirq.google.api import v2
from cirq.value import Duration
if TYPE_CHECKING:
import cirq
class _GateDefinition:
"""Class for keeping track of gate definitions within SerializableDevice"""
def __init__(
self,
duration: 'cirq.DURATION_LIKE',
target_set: Set[Tuple['cirq.Qid', ...]],
number_of_qubits: int,
is_permutation: bool,
can_serialize_predicate: Callable[['cirq.Operation'], bool] = lambda
x: True,
):
self.duration = Duration(duration)
self.target_set = target_set
self.is_permutation = is_permutation
self.number_of_qubits = number_of_qubits
self.can_serialize_predicate = can_serialize_predicate
# Compute the set of all qubits in all target sets.
self.flattened_qubits = {
q for qubit_tuple in target_set for q in qubit_tuple
}
def with_can_serialize_predicate(
self, can_serialize_predicate: Callable[['cirq.Operation'], bool]
) -> '_GateDefinition':
"""Creates a new _GateDefintion as a copy of the existing definition
but with a new with_can_serialize_predicate. This is useful if multiple
definitions exist for the same gate, but with different conditions.
An example is if gates at certain angles of a gate take longer or are
not allowed.
"""
return _GateDefinition(
self.duration,
self.target_set,
self.number_of_qubits,
self.is_permutation,
can_serialize_predicate,
)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self.__dict__ == other.__dict__
class SerializableDevice(devices.Device):
"""Device object generated from a device specification proto.
Given a device specification proto and a gate_set to translate the
serialized gate_ids to cirq Gates, this will generate a Device that can
verify operations and circuits for the hardware specified by the device.
Expected usage is through constructing this class through a proto using
the static function call from_proto().
This class only supports GridQubits and NamedQubits. NamedQubits with names
that conflict (such as "4_3") may be converted to GridQubits on
deserialization.
"""
def __init__(
self, qubits: List['cirq.Qid'],
gate_definitions: Dict[Type['cirq.Gate'], List[_GateDefinition]]):
"""Constructor for SerializableDevice using python objects.
Note that the preferred method of constructing this object is through
the static from_proto() call.
Args:
qubits: A list of valid Qid for the device.
gate_definitions: Maps cirq gates to device properties for that
gate.
"""
self.qubits = qubits
self.gate_definitions = gate_definitions
def qubit_set(self) -> FrozenSet['cirq.Qid']:
return frozenset(self.qubits)
@classmethod
def from_proto(
cls, proto: v2.device_pb2.DeviceSpecification,
gate_sets: Iterable[serializable_gate_set.SerializableGateSet]
) -> 'SerializableDevice':
"""
Args:
proto: A proto describing the qubits on the device, as well as the
supported gates and timing information.
gate_set: A SerializableGateSet that can translate the gate_ids
into cirq Gates.
"""
# Store target sets, since they are refered to by name later
allowed_targets: Dict[str, Set[Tuple['cirq.Qid', ...]]] = {}
permutation_ids: Set[str] = set()
for ts in proto.valid_targets:
allowed_targets[ts.name] = cls._create_target_set(ts)
if ts.target_ordering == v2.device_pb2.TargetSet.SUBSET_PERMUTATION:
permutation_ids.add(ts.name)
# Store gate definitions from proto
gate_definitions: Dict[str, _GateDefinition] = {}
for gs in proto.valid_gate_sets:
for gate_def in gs.valid_gates:
# Combine all valid targets in the gate's listed target sets
gate_target_set = {
target for ts_name in gate_def.valid_targets
for target in allowed_targets[ts_name]
}
which_are_permutations = [
t in permutation_ids for t in gate_def.valid_targets
]
is_permutation = any(which_are_permutations)
if is_permutation:
if not all(which_are_permutations):
raise NotImplementedError(
f'Id {gate_def.id} in {gs.name} mixes '
'SUBSET_PERMUTATION with other types which is not '
'currently allowed.')
gate_definitions[gate_def.id] = _GateDefinition(
duration=Duration(picos=gate_def.gate_duration_picos),
target_set=gate_target_set,
is_permutation=is_permutation,
number_of_qubits=gate_def.number_of_qubits)
# Loop through serializers and map gate_definitions to type
gates_by_type: Dict[Type['cirq.Gate'], List[_GateDefinition]] = {}
for gate_set in gate_sets:
for gate_type in gate_set.supported_gate_types():
for serializer in gate_set.serializers[gate_type]:
gate_id = serializer.serialized_gate_id
if gate_id not in gate_definitions:
raise ValueError(
f'Serializer has {gate_id} which is not supported '
'by the device specification')
if gate_type not in gates_by_type:
gates_by_type[gate_type] = []
gate_def = gate_definitions[
gate_id].with_can_serialize_predicate(
serializer.can_serialize_predicate)
gates_by_type[gate_type].append(gate_def)
return SerializableDevice(
qubits=[cls._qid_from_str(q) for q in proto.valid_qubits],
gate_definitions=gates_by_type,
)
@staticmethod
def _qid_from_str(id_str: str) -> 'cirq.Qid':
"""Translates a qubit id string info cirq.Qid objects.
Tries to translate to GridQubit if possible (e.g. '4_3'), otherwise
falls back to using NamedQubit.
"""
try:
return v2.grid_qubit_from_proto_id(id_str)
except ValueError:
return v2.named_qubit_from_proto_id(id_str)
@classmethod
def _create_target_set(cls, ts: v2.device_pb2.TargetSet
) -> Set[Tuple['cirq.Qid', ...]]:
"""Transform a TargetSet proto into a set of qubit tuples"""
target_set = set()
for target in ts.targets:
qid_tuple = tuple(cls._qid_from_str(q) for q in target.ids)
target_set.add(qid_tuple)
if ts.target_ordering == v2.device_pb2.TargetSet.SYMMETRIC:
target_set.add(qid_tuple[::-1])
return target_set
def __str__(self) -> str:
# If all qubits are grid qubits, render an appropriate text diagram.
if all(isinstance(q, devices.GridQubit) for q in self.qubits):
diagram = circuits.TextDiagramDrawer()
qubits = cast(List['cirq.GridQubit'], self.qubits)
# Don't print out extras newlines if the row/col doesn't start at 0
min_col = min(q.col for q in qubits)
min_row = min(q.row for q in qubits)
for q in qubits:
diagram.write(q.col - min_col, q.row - min_row, str(q))
# Find pairs that are connected by two-qubit gates.
Pair = Tuple['cirq.GridQubit', 'cirq.GridQubit']
pairs = {
cast(Pair, pair)
for gate_defs in self.gate_definitions.values()
for gate_def in gate_defs if gate_def.number_of_qubits == 2
for pair in gate_def.target_set if len(pair) == 2
}
# Draw lines between connected pairs. Limit to horizontal/vertical
# lines since that is all the diagram drawer can handle.
for q1, q2 in sorted(pairs):
if q1.row == q2.row or q1.col == q2.col:
diagram.grid_line(q1.col - min_col, q1.row - min_row,
q2.col - min_col, q2.row - min_row)
return diagram.render(horizontal_spacing=3,
vertical_spacing=2,
use_unicode_characters=True)
return super().__str__()
def _repr_pretty_(self, p: Any, cycle: bool) -> None:
"""Creates ASCII diagram for Jupyter, IPython, etc."""
# There should never be a cycle, but just in case use the default repr.
p.text(repr(self) if cycle else str(self))
def _find_operation_type(self,
op: 'cirq.Operation') -> Optional[_GateDefinition]:
"""Finds the type (or a compatible type) of an operation from within
a dictionary with keys of Gate type.
Returns:
the value corresponding to that key or None if no type matches
"""
for type_key, gate_defs in self.gate_definitions.items():
if isinstance(op.gate, type_key):
for gate_def in gate_defs:
if gate_def.can_serialize_predicate(op):
return gate_def
return None
def duration_of(self, operation: 'cirq.Operation') -> Duration:
gate_def = self._find_operation_type(operation)
if gate_def is None:
raise ValueError(
f'Operation {operation} does not have a known duration')
return gate_def.duration
def validate_operation(self, operation: 'cirq.Operation') -> None:
for q in operation.qubits:
if q not in self.qubits:
raise ValueError('Qubit not on device: {!r}'.format(q))
gate_def = self._find_operation_type(operation)
if gate_def is None:
raise ValueError(f'{operation} is not a supported gate')
req_num_qubits = gate_def.number_of_qubits
if req_num_qubits > 0:
if len(operation.qubits) != req_num_qubits:
raise ValueError(f'{operation} has {len(operation.qubits)} '
f'qubits but expected {req_num_qubits}')
if gate_def.is_permutation:
# A permutation gate can have any combination of qubits
if not gate_def.target_set:
# All qubits are valid
return
if not all(
q in gate_def.flattened_qubits for q in operation.qubits):
raise ValueError(
'Operation does not use valid qubits: {operation}.')
return
if len(operation.qubits) > 1:
# TODO: verify args.
# Github issue: https://github.com/quantumlib/Cirq/issues/2964
if not gate_def.target_set:
# All qubit combinations are valid
return
qubit_tuple = tuple(operation.qubits)
if qubit_tuple not in gate_def.target_set:
# Target is not within the target sets specified by the gate.
raise ValueError(
f'Operation does not use valid qubit target: {operation}.')
| 40.876623 | 80 | 0.608261 |
from typing import (Any, Callable, cast, Dict, Iterable, Optional, List, Set,
Tuple, Type, TYPE_CHECKING, FrozenSet)
from cirq import circuits, devices
from cirq.google import serializable_gate_set
from cirq.google.api import v2
from cirq.value import Duration
if TYPE_CHECKING:
import cirq
class _GateDefinition:
def __init__(
self,
duration: 'cirq.DURATION_LIKE',
target_set: Set[Tuple['cirq.Qid', ...]],
number_of_qubits: int,
is_permutation: bool,
can_serialize_predicate: Callable[['cirq.Operation'], bool] = lambda
x: True,
):
self.duration = Duration(duration)
self.target_set = target_set
self.is_permutation = is_permutation
self.number_of_qubits = number_of_qubits
self.can_serialize_predicate = can_serialize_predicate
self.flattened_qubits = {
q for qubit_tuple in target_set for q in qubit_tuple
}
def with_can_serialize_predicate(
self, can_serialize_predicate: Callable[['cirq.Operation'], bool]
) -> '_GateDefinition':
return _GateDefinition(
self.duration,
self.target_set,
self.number_of_qubits,
self.is_permutation,
can_serialize_predicate,
)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self.__dict__ == other.__dict__
class SerializableDevice(devices.Device):
def __init__(
self, qubits: List['cirq.Qid'],
gate_definitions: Dict[Type['cirq.Gate'], List[_GateDefinition]]):
self.qubits = qubits
self.gate_definitions = gate_definitions
def qubit_set(self) -> FrozenSet['cirq.Qid']:
return frozenset(self.qubits)
@classmethod
def from_proto(
cls, proto: v2.device_pb2.DeviceSpecification,
gate_sets: Iterable[serializable_gate_set.SerializableGateSet]
) -> 'SerializableDevice':
allowed_targets: Dict[str, Set[Tuple['cirq.Qid', ...]]] = {}
permutation_ids: Set[str] = set()
for ts in proto.valid_targets:
allowed_targets[ts.name] = cls._create_target_set(ts)
if ts.target_ordering == v2.device_pb2.TargetSet.SUBSET_PERMUTATION:
permutation_ids.add(ts.name)
gate_definitions: Dict[str, _GateDefinition] = {}
for gs in proto.valid_gate_sets:
for gate_def in gs.valid_gates:
gate_target_set = {
target for ts_name in gate_def.valid_targets
for target in allowed_targets[ts_name]
}
which_are_permutations = [
t in permutation_ids for t in gate_def.valid_targets
]
is_permutation = any(which_are_permutations)
if is_permutation:
if not all(which_are_permutations):
raise NotImplementedError(
f'Id {gate_def.id} in {gs.name} mixes '
'SUBSET_PERMUTATION with other types which is not '
'currently allowed.')
gate_definitions[gate_def.id] = _GateDefinition(
duration=Duration(picos=gate_def.gate_duration_picos),
target_set=gate_target_set,
is_permutation=is_permutation,
number_of_qubits=gate_def.number_of_qubits)
# Loop through serializers and map gate_definitions to type
gates_by_type: Dict[Type['cirq.Gate'], List[_GateDefinition]] = {}
for gate_set in gate_sets:
for gate_type in gate_set.supported_gate_types():
for serializer in gate_set.serializers[gate_type]:
gate_id = serializer.serialized_gate_id
if gate_id not in gate_definitions:
raise ValueError(
f'Serializer has {gate_id} which is not supported '
'by the device specification')
if gate_type not in gates_by_type:
gates_by_type[gate_type] = []
gate_def = gate_definitions[
gate_id].with_can_serialize_predicate(
serializer.can_serialize_predicate)
gates_by_type[gate_type].append(gate_def)
return SerializableDevice(
qubits=[cls._qid_from_str(q) for q in proto.valid_qubits],
gate_definitions=gates_by_type,
)
@staticmethod
def _qid_from_str(id_str: str) -> 'cirq.Qid':
try:
return v2.grid_qubit_from_proto_id(id_str)
except ValueError:
return v2.named_qubit_from_proto_id(id_str)
@classmethod
def _create_target_set(cls, ts: v2.device_pb2.TargetSet
) -> Set[Tuple['cirq.Qid', ...]]:
target_set = set()
for target in ts.targets:
qid_tuple = tuple(cls._qid_from_str(q) for q in target.ids)
target_set.add(qid_tuple)
if ts.target_ordering == v2.device_pb2.TargetSet.SYMMETRIC:
target_set.add(qid_tuple[::-1])
return target_set
def __str__(self) -> str:
# If all qubits are grid qubits, render an appropriate text diagram.
if all(isinstance(q, devices.GridQubit) for q in self.qubits):
diagram = circuits.TextDiagramDrawer()
qubits = cast(List['cirq.GridQubit'], self.qubits)
# Don't print out extras newlines if the row/col doesn't start at 0
min_col = min(q.col for q in qubits)
min_row = min(q.row for q in qubits)
for q in qubits:
diagram.write(q.col - min_col, q.row - min_row, str(q))
# Find pairs that are connected by two-qubit gates.
Pair = Tuple['cirq.GridQubit', 'cirq.GridQubit']
pairs = {
cast(Pair, pair)
for gate_defs in self.gate_definitions.values()
for gate_def in gate_defs if gate_def.number_of_qubits == 2
for pair in gate_def.target_set if len(pair) == 2
}
# Draw lines between connected pairs. Limit to horizontal/vertical
# lines since that is all the diagram drawer can handle.
for q1, q2 in sorted(pairs):
if q1.row == q2.row or q1.col == q2.col:
diagram.grid_line(q1.col - min_col, q1.row - min_row,
q2.col - min_col, q2.row - min_row)
return diagram.render(horizontal_spacing=3,
vertical_spacing=2,
use_unicode_characters=True)
return super().__str__()
def _repr_pretty_(self, p: Any, cycle: bool) -> None:
# There should never be a cycle, but just in case use the default repr.
p.text(repr(self) if cycle else str(self))
def _find_operation_type(self,
op: 'cirq.Operation') -> Optional[_GateDefinition]:
for type_key, gate_defs in self.gate_definitions.items():
if isinstance(op.gate, type_key):
for gate_def in gate_defs:
if gate_def.can_serialize_predicate(op):
return gate_def
return None
def duration_of(self, operation: 'cirq.Operation') -> Duration:
gate_def = self._find_operation_type(operation)
if gate_def is None:
raise ValueError(
f'Operation {operation} does not have a known duration')
return gate_def.duration
def validate_operation(self, operation: 'cirq.Operation') -> None:
for q in operation.qubits:
if q not in self.qubits:
raise ValueError('Qubit not on device: {!r}'.format(q))
gate_def = self._find_operation_type(operation)
if gate_def is None:
raise ValueError(f'{operation} is not a supported gate')
req_num_qubits = gate_def.number_of_qubits
if req_num_qubits > 0:
if len(operation.qubits) != req_num_qubits:
raise ValueError(f'{operation} has {len(operation.qubits)} '
f'qubits but expected {req_num_qubits}')
if gate_def.is_permutation:
# A permutation gate can have any combination of qubits
if not gate_def.target_set:
# All qubits are valid
return
if not all(
q in gate_def.flattened_qubits for q in operation.qubits):
raise ValueError(
'Operation does not use valid qubits: {operation}.')
return
if len(operation.qubits) > 1:
# TODO: verify args.
# Github issue: https://github.com/quantumlib/Cirq/issues/2964
if not gate_def.target_set:
# All qubit combinations are valid
return
qubit_tuple = tuple(operation.qubits)
if qubit_tuple not in gate_def.target_set:
# Target is not within the target sets specified by the gate.
raise ValueError(
f'Operation does not use valid qubit target: {operation}.')
| true | true |
f7323e47ca19d683e6fb6a280cc58c5744dc8f71 | 124 | py | Python | output_test.py | AnkurDesai11/PY4E | bfd185ef89d4b054a2286ca8a6eae476c086b782 | [
"BSD-3-Clause"
] | null | null | null | output_test.py | AnkurDesai11/PY4E | bfd185ef89d4b054a2286ca8a6eae476c086b782 | [
"BSD-3-Clause"
] | null | null | null | output_test.py | AnkurDesai11/PY4E | bfd185ef89d4b054a2286ca8a6eae476c086b782 | [
"BSD-3-Clause"
] | null | null | null | '''
Created on 30 Aug, 2020
@author: ABD
'''
#total = 0
#for abc in range(5):
# total = total + abc
#print(total) | 13.777778 | 24 | 0.564516 | true | true | |
f7323f11dbc39df24cc3c39879fb06935d88ff17 | 2,454 | py | Python | fixtures/createJson.py | AKSHANSH47/crowdsource-platform2 | a31446d44bc10dca56a0d534cab226947a6bbb4e | [
"MIT"
] | null | null | null | fixtures/createJson.py | AKSHANSH47/crowdsource-platform2 | a31446d44bc10dca56a0d534cab226947a6bbb4e | [
"MIT"
] | null | null | null | fixtures/createJson.py | AKSHANSH47/crowdsource-platform2 | a31446d44bc10dca56a0d534cab226947a6bbb4e | [
"MIT"
] | 2 | 2020-01-27T05:35:50.000Z | 2020-02-29T12:55:39.000Z | __author__ = 'Megha'
# Script to transfer csv containing data about various models to json
# Input csv file constituting of the model data
# Output json file representing the csv data as json object
# Assumes model name to be first line
# Field names of the model on the second line
# Data seperated by __DELIM__
# Example:
# L01 ModelName: registrationmodel
# L02 FieldNames: user,activation_key,created_timestamp,last_updated
# L03 Data: 1,qwer,2015-05-01T00:17:40.085Z,2015-05-01T00:17:40.085Z
# L04 Data: 2,assd,2015-05-01T00:17:40.085Z,2015-05-01T00:17:40.085Z
import numpy as np
import pandas as pd
import json as json
__MODULE_NAME__ = 7 # Number of lines after which Model Name
__INPUT_FILE__ = 'meghaWorkerData.csv'
__OUTPUT_FILE__ = 'meghaWorkerData.json'
__NEWLINE__ = '\n'
__KEY1__ = 0
__KEY2__ = 0
__DELIM__ = ','
__APPEND__ = 'crowdsourcing.'
__KEY_MODEL__ = 'model'
__KEY_FIELDS__ = 'fields'
__KEY_PK__ = 'pk'
def create_dict(input_dict, module, data_collection):
for key, value in input_dict.items():
data_dict = {}
data_dict[__KEY_FIELDS__] = value
data_dict[__KEY_PK__] = key
data_dict[__KEY_MODEL__] = __APPEND__ + module
data_collection.append(data_dict)
return data_collection
def create_data_json(file):
in_fp = open(file, 'rb')
file_lines = in_fp.readlines()
in_fp.close()
data_collection = []
for line_no in range(0, len(file_lines)):
if line_no % __MODULE_NAME__ == 0:
columns = file_lines[line_no + 1].strip(__NEWLINE__).split(__DELIM__)
instance1 = file_lines[line_no + 2].strip(__NEWLINE__).split(__DELIM__)
instance2 = file_lines[line_no + 3].strip(__NEWLINE__).split(__DELIM__)
instance3 = file_lines[line_no + 4].strip(__NEWLINE__).split(__DELIM__)
instance4 = file_lines[line_no + 5].strip(__NEWLINE__).split(__DELIM__)
instance5 = file_lines[line_no + 6].strip(__NEWLINE__).split(__DELIM__)
data = np.array([instance1, instance2, instance3, instance4, instance5])
df = pd.DataFrame(data, columns=columns)
create_dict(df.transpose().to_dict(), file_lines[line_no].strip(__NEWLINE__), data_collection)
del (df)
print(data_collection)
out_fp = open(__OUTPUT_FILE__, 'wb')
out_fp.write(json.dumps(data_collection, indent=2))
out_fp.close()
if __name__ == '__main__':
create_data_json(__INPUT_FILE__)
| 37.181818 | 106 | 0.707416 | __author__ = 'Megha'
import numpy as np
import pandas as pd
import json as json
__MODULE_NAME__ = 7
__INPUT_FILE__ = 'meghaWorkerData.csv'
__OUTPUT_FILE__ = 'meghaWorkerData.json'
__NEWLINE__ = '\n'
__KEY1__ = 0
__KEY2__ = 0
__DELIM__ = ','
__APPEND__ = 'crowdsourcing.'
__KEY_MODEL__ = 'model'
__KEY_FIELDS__ = 'fields'
__KEY_PK__ = 'pk'
def create_dict(input_dict, module, data_collection):
for key, value in input_dict.items():
data_dict = {}
data_dict[__KEY_FIELDS__] = value
data_dict[__KEY_PK__] = key
data_dict[__KEY_MODEL__] = __APPEND__ + module
data_collection.append(data_dict)
return data_collection
def create_data_json(file):
in_fp = open(file, 'rb')
file_lines = in_fp.readlines()
in_fp.close()
data_collection = []
for line_no in range(0, len(file_lines)):
if line_no % __MODULE_NAME__ == 0:
columns = file_lines[line_no + 1].strip(__NEWLINE__).split(__DELIM__)
instance1 = file_lines[line_no + 2].strip(__NEWLINE__).split(__DELIM__)
instance2 = file_lines[line_no + 3].strip(__NEWLINE__).split(__DELIM__)
instance3 = file_lines[line_no + 4].strip(__NEWLINE__).split(__DELIM__)
instance4 = file_lines[line_no + 5].strip(__NEWLINE__).split(__DELIM__)
instance5 = file_lines[line_no + 6].strip(__NEWLINE__).split(__DELIM__)
data = np.array([instance1, instance2, instance3, instance4, instance5])
df = pd.DataFrame(data, columns=columns)
create_dict(df.transpose().to_dict(), file_lines[line_no].strip(__NEWLINE__), data_collection)
del (df)
print(data_collection)
out_fp = open(__OUTPUT_FILE__, 'wb')
out_fp.write(json.dumps(data_collection, indent=2))
out_fp.close()
if __name__ == '__main__':
create_data_json(__INPUT_FILE__)
| true | true |
f73241d287661520cf1d7ff6db55cdc259ea7d50 | 1,170 | py | Python | python_modules/supersense_list.py | cltl/MFS_classifier | ef3ea52f23aebe798241057046d4b49f181328f3 | [
"Apache-2.0"
] | 2 | 2016-08-12T05:11:36.000Z | 2020-09-20T09:23:28.000Z | python_modules/supersense_list.py | cltl/MFS_classifier | ef3ea52f23aebe798241057046d4b49f181328f3 | [
"Apache-2.0"
] | null | null | null | python_modules/supersense_list.py | cltl/MFS_classifier | ef3ea52f23aebe798241057046d4b49f181328f3 | [
"Apache-2.0"
] | null | null | null | SS = {}
SS['00'] = 'adj.all'
SS['01'] = 'adj.pert'
SS['02'] = 'adv.all'
SS['03'] = 'noun.Tops'
SS['04'] = 'noun.act'
SS['05'] = 'noun.animal'
SS['06'] = 'noun.artifact'
SS['07'] = 'noun.attribute'
SS['08'] = 'noun.body'
SS['09'] = 'noun.cognition'
SS['10'] = 'noun.communication'
SS['11'] = 'noun.event'
SS['12'] = 'noun.feeling'
SS['13'] = 'noun.food'
SS['14'] = 'noun.group'
SS['15'] = 'noun.location'
SS['16'] = 'noun.motive'
SS['17'] = 'noun.object'
SS['18'] = 'noun.person'
SS['19'] = 'noun.phenomenon'
SS['20'] = 'noun.plant'
SS['21'] = 'noun.possession'
SS['22'] = 'noun.process'
SS['23'] = 'noun.quantity'
SS['24'] = 'noun.relation'
SS['25'] = 'noun.shape'
SS['26'] = 'noun.state'
SS['27'] = 'noun.substance'
SS['28'] = 'noun.time'
SS['29'] = 'verb.body'
SS['30'] = 'verb.change'
SS['31'] = 'verb.cognition'
SS['32'] = 'verb.communication'
SS['33'] = 'verb.competition'
SS['34'] = 'verb.consumption'
SS['35'] = 'verb.contact'
SS['36'] = 'verb.creation'
SS['37'] = 'verb.emotion'
SS['38'] = 'verb.motion'
SS['39'] = 'verb.perception'
SS['40'] = 'verb.possession'
SS['41'] = 'verb.social'
SS['42'] = 'verb.stative'
SS['43'] = 'verb.weather'
SS['44'] = 'adj.ppl'
| 23.877551 | 31 | 0.570085 | SS = {}
SS['00'] = 'adj.all'
SS['01'] = 'adj.pert'
SS['02'] = 'adv.all'
SS['03'] = 'noun.Tops'
SS['04'] = 'noun.act'
SS['05'] = 'noun.animal'
SS['06'] = 'noun.artifact'
SS['07'] = 'noun.attribute'
SS['08'] = 'noun.body'
SS['09'] = 'noun.cognition'
SS['10'] = 'noun.communication'
SS['11'] = 'noun.event'
SS['12'] = 'noun.feeling'
SS['13'] = 'noun.food'
SS['14'] = 'noun.group'
SS['15'] = 'noun.location'
SS['16'] = 'noun.motive'
SS['17'] = 'noun.object'
SS['18'] = 'noun.person'
SS['19'] = 'noun.phenomenon'
SS['20'] = 'noun.plant'
SS['21'] = 'noun.possession'
SS['22'] = 'noun.process'
SS['23'] = 'noun.quantity'
SS['24'] = 'noun.relation'
SS['25'] = 'noun.shape'
SS['26'] = 'noun.state'
SS['27'] = 'noun.substance'
SS['28'] = 'noun.time'
SS['29'] = 'verb.body'
SS['30'] = 'verb.change'
SS['31'] = 'verb.cognition'
SS['32'] = 'verb.communication'
SS['33'] = 'verb.competition'
SS['34'] = 'verb.consumption'
SS['35'] = 'verb.contact'
SS['36'] = 'verb.creation'
SS['37'] = 'verb.emotion'
SS['38'] = 'verb.motion'
SS['39'] = 'verb.perception'
SS['40'] = 'verb.possession'
SS['41'] = 'verb.social'
SS['42'] = 'verb.stative'
SS['43'] = 'verb.weather'
SS['44'] = 'adj.ppl'
| true | true |
f73242295fa78051db57900a4402d3561b804f9b | 3,229 | py | Python | sport/solutions/cf/1196 div3/1196B-4.py | Epikem/dev-tips | ed5a258334dd18ef505f51e320f7a9f5ee535cf9 | [
"MIT"
] | null | null | null | sport/solutions/cf/1196 div3/1196B-4.py | Epikem/dev-tips | ed5a258334dd18ef505f51e320f7a9f5ee535cf9 | [
"MIT"
] | 8 | 2020-04-03T15:33:54.000Z | 2022-03-02T10:24:22.000Z | sport/solutions/cf/1196 div3/1196B-4.py | Epikem/dev-tips | ed5a258334dd18ef505f51e320f7a9f5ee535cf9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#!/usr/bin/python
false = False
true = True
null = None
# import math
TEST = false
try:
import sys
for arg in sys.argv:
if(arg == 'test'):
print('test mode')
TEST = True
pass
except:
pass
def AddImports(libraryNames):
for libname in libraryNames:
if (type(libname) == type(tuple())):
short = libname[1]
libname = libname[0]
else:
short = None
try:
lib = __import__(libname)
except ImportError:
pass
else:
if short:
globals()[short] = lib
else:
globals()[libname] = lib
return True
# libnames = ['fileinput', 'codecs', 'operator', 'functools', 'math',
# 'io', 'platform', 'collections', 'mmap', 'logging', 'logging.handlers']
libnames = ['functools', 'math', 'collections']
# libnames = ['math']
AddImports(libnames)
IntellisenseHint = False
if IntellisenseHint:
import functools
import math
import collections
# import mmap
# import logging
# import logging.handlers
# import defs
# class memoized(object, ):
# "Decorator. Caches a function's return value each time it is called.\n\tIf called later with the same arguments, the cached value is returned\n\t(not reevaluated).\n\t"
# def __init__(self, func):
# self.func = func
# self.cache = {}
# def __call__(self, *args):
# if (not isinstance(args, collections.Hashable)):
# return self.func(*args)
# if (args in self.cache):
# return self.cache[args]
# else:
# value = self.func(*args)
# self.cache[args] = value
# return value
# def __repr__(self):
# "Return the function's docstring."
# return self.func.__doc__
# def __get__(self, obj, objtype):
# 'Support instance methods.'
# return functools.partial(self.__call__, obj)
def it(args, *arg):
if(TEST):
print(args, *arg)
# print(args, vargs)
def floatEqual(a, b):
diff = math.fabs(a-b)
if(diff < 1e-10):
return True
else:
return diff <= 1e-8 * max(math.fabs(a), math.fabs(b))
def ria():
return list(map(int, input().strip(' ').split(' ')))
def solve():
q = ria()[0]
for i in range(q):
it('stepstepstepstepstep')
[n,k] = ria()
# it(n,k)
arr = ria()
odds = []
cand = []
ans = ''
for j in range(len(arr)):
if(arr[j] % 2 == 1):
odds.append(j)
if(len(cand)<k-1):
cand.append(j+1)
ans = ans + str(j+1) + ' '
pass
cand.append(n)
ans = ans + str(n)
if(k <= len(odds) and (len(odds)-k) % 2 == 0):
print('YES')
# print(' '.join(map(str, cand)))
print(ans)
else:
print('NO')
pass
pass
pass
solve()
| 23.918519 | 175 | 0.482502 |
false = False
true = True
null = None
TEST = false
try:
import sys
for arg in sys.argv:
if(arg == 'test'):
print('test mode')
TEST = True
pass
except:
pass
def AddImports(libraryNames):
for libname in libraryNames:
if (type(libname) == type(tuple())):
short = libname[1]
libname = libname[0]
else:
short = None
try:
lib = __import__(libname)
except ImportError:
pass
else:
if short:
globals()[short] = lib
else:
globals()[libname] = lib
return True
libnames = ['functools', 'math', 'collections']
AddImports(libnames)
IntellisenseHint = False
if IntellisenseHint:
import functools
import math
import collections
# def __init__(self, func):
# self.func = func
# self.cache = {}
# def __call__(self, *args):
# if (not isinstance(args, collections.Hashable)):
# return self.func(*args)
# if (args in self.cache):
# return self.cache[args]
# else:
# value = self.func(*args)
# self.cache[args] = value
# return value
# def __repr__(self):
# "Return the function's docstring."
def it(args, *arg):
if(TEST):
print(args, *arg)
def floatEqual(a, b):
diff = math.fabs(a-b)
if(diff < 1e-10):
return True
else:
return diff <= 1e-8 * max(math.fabs(a), math.fabs(b))
def ria():
return list(map(int, input().strip(' ').split(' ')))
def solve():
q = ria()[0]
for i in range(q):
it('stepstepstepstepstep')
[n,k] = ria()
arr = ria()
odds = []
cand = []
ans = ''
for j in range(len(arr)):
if(arr[j] % 2 == 1):
odds.append(j)
if(len(cand)<k-1):
cand.append(j+1)
ans = ans + str(j+1) + ' '
pass
cand.append(n)
ans = ans + str(n)
if(k <= len(odds) and (len(odds)-k) % 2 == 0):
print('YES')
print(ans)
else:
print('NO')
pass
pass
pass
solve()
| true | true |
f7324374f56ad89ad7d856e2040b3f5fad0425c3 | 46,804 | py | Python | venv/lib/python3.7/site-packages/ccxt/okcoinusd.py | balibou/ccxt-ohlcv-fetcher | a64cd43cbfd3fe09de34d8a66416ecc6c10d3b2f | [
"MIT"
] | 2 | 2020-12-17T16:07:27.000Z | 2021-02-10T18:25:41.000Z | venv/lib/python3.7/site-packages/ccxt/okcoinusd.py | balibou/ccxt-ohlcv-fetcher | a64cd43cbfd3fe09de34d8a66416ecc6c10d3b2f | [
"MIT"
] | null | null | null | venv/lib/python3.7/site-packages/ccxt/okcoinusd.py | balibou/ccxt-ohlcv-fetcher | a64cd43cbfd3fe09de34d8a66416ecc6c10d3b2f | [
"MIT"
] | 1 | 2020-03-29T02:05:41.000Z | 2020-03-29T02:05:41.000Z | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import DDoSProtection
class okcoinusd (Exchange):
def describe(self):
return self.deep_extend(super(okcoinusd, self).describe(), {
'id': 'okcoinusd',
'name': 'OKCoin USD',
'countries': ['CN', 'US'],
'version': 'v1',
'rateLimit': 1000, # up to 3000 requests per 5 minutes ≈ 600 requests per minute ≈ 10 requests per second ≈ 100 ms
'has': {
'CORS': False,
'fetchOHLCV': True,
'fetchOrder': True,
'fetchOrders': False,
'fetchOpenOrders': True,
'fetchClosedOrders': True,
'fetchTickers': True,
'withdraw': True,
'futures': False,
},
'extension': '.do', # appended to endpoint URL
'timeframes': {
'1m': '1min',
'3m': '3min',
'5m': '5min',
'15m': '15min',
'30m': '30min',
'1h': '1hour',
'2h': '2hour',
'4h': '4hour',
'6h': '6hour',
'12h': '12hour',
'1d': '1day',
'3d': '3day',
'1w': '1week',
},
'api': {
'web': {
'get': [
'futures/pc/market/marketOverview',
'spot/markets/index-tickers',
'spot/markets/currencies',
'spot/markets/products',
'spot/markets/tickers',
'spot/user-level',
],
'post': [
'futures/pc/market/futuresCoin',
],
},
'public': {
'get': [
'depth',
'exchange_rate',
'future_depth',
'future_estimated_price',
'future_hold_amount',
'future_index',
'future_kline',
'future_price_limit',
'future_ticker',
'future_trades',
'kline',
'otcs',
'ticker',
'tickers',
'trades',
],
},
'private': {
'post': [
'account_records',
'batch_trade',
'borrow_money',
'borrow_order_info',
'borrows_info',
'cancel_borrow',
'cancel_order',
'cancel_otc_order',
'cancel_withdraw',
'funds_transfer',
'future_batch_trade',
'future_cancel',
'future_devolve',
'future_explosive',
'future_order_info',
'future_orders_info',
'future_position',
'future_position_4fix',
'future_trade',
'future_trades_history',
'future_userinfo',
'future_userinfo_4fix',
'lend_depth',
'order_fee',
'order_history',
'order_info',
'orders_info',
'otc_order_history',
'otc_order_info',
'repayment',
'submit_otc_order',
'trade',
'trade_history',
'trade_otc_order',
'wallet_info',
'withdraw',
'withdraw_info',
'unrepayments_info',
'userinfo',
],
},
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766791-89ffb502-5ee5-11e7-8a5b-c5950b68ac65.jpg',
'api': {
'web': 'https://www.okcoin.com/v2',
'public': 'https://www.okcoin.com/api',
'private': 'https://www.okcoin.com',
},
'www': 'https://www.okcoin.com',
'doc': [
'https://www.okcoin.com/docs/en/',
'https://www.npmjs.com/package/okcoin.com',
],
'referral': 'https://www.okcoin.com/account/register?flag=activity&channelId=600001513',
},
# these are okcoin.com fees, okex fees are in okex.js
'fees': {
'trading': {
'taker': 0.001,
'maker': 0.0005,
},
},
'exceptions': {
# see https://github.com/okcoin-okex/API-docs-OKEx.com/blob/master/API-For-Spot-EN/Error%20Code%20For%20Spot.md
'10000': ExchangeError, # "Required field, can not be null"
'10001': DDoSProtection, # "Request frequency too high to exceed the limit allowed"
'10005': AuthenticationError, # "'SecretKey' does not exist"
'10006': AuthenticationError, # "'Api_key' does not exist"
'10007': AuthenticationError, # "Signature does not match"
'1002': InsufficientFunds, # "The transaction amount exceed the balance"
'1003': InvalidOrder, # "The transaction amount is less than the minimum requirement"
'1004': InvalidOrder, # "The transaction amount is less than 0"
'1013': InvalidOrder, # no contract type(PR-1101)
'1027': InvalidOrder, # createLimitBuyOrder(symbol, 0, 0): Incorrect parameter may exceeded limits
'1050': InvalidOrder, # returned when trying to cancel an order that was filled or canceled previously
'1217': InvalidOrder, # "Order was sent at ±5% of the current market price. Please resend"
'10014': InvalidOrder, # "Order price must be between 0 and 1,000,000"
'1009': OrderNotFound, # for spot markets, cancelling closed order
'1019': OrderNotFound, # order closed?("Undo order failed")
'1051': OrderNotFound, # for spot markets, cancelling "just closed" order
'10009': OrderNotFound, # for spot markets, "Order does not exist"
'20015': OrderNotFound, # for future markets
'10008': BadRequest, # Illegal URL parameter
# todo: sort out below
# 10000 Required parameter is empty
# 10001 Request frequency too high to exceed the limit allowed
# 10002 Authentication failure
# 10002 System error
# 10003 This connection has requested other user data
# 10004 Request failed
# 10005 api_key or sign is invalid, 'SecretKey' does not exist
# 10006 'Api_key' does not exist
# 10007 Signature does not match
# 10008 Illegal parameter, Parameter erorr
# 10009 Order does not exist
# 10010 Insufficient funds
# 10011 Amount too low
# 10012 Only btc_usd ltc_usd supported
# 10013 Only support https request
# 10014 Order price must be between 0 and 1,000,000
# 10015 Order price differs from current market price too much / Channel subscription temporally not available
# 10016 Insufficient coins balance
# 10017 API authorization error / WebSocket authorization error
# 10018 borrow amount less than lower limit [usd:100,btc:0.1,ltc:1]
# 10019 loan agreement not checked
# 1002 The transaction amount exceed the balance
# 10020 rate cannot exceed 1%
# 10021 rate cannot less than 0.01%
# 10023 fail to get latest ticker
# 10024 balance not sufficient
# 10025 quota is full, cannot borrow temporarily
# 10026 Loan(including reserved loan) and margin cannot be withdrawn
# 10027 Cannot withdraw within 24 hrs of authentication information modification
# 10028 Withdrawal amount exceeds daily limit
# 10029 Account has unpaid loan, please cancel/pay off the loan before withdraw
# 1003 The transaction amount is less than the minimum requirement
# 10031 Deposits can only be withdrawn after 6 confirmations
# 10032 Please enabled phone/google authenticator
# 10033 Fee higher than maximum network transaction fee
# 10034 Fee lower than minimum network transaction fee
# 10035 Insufficient BTC/LTC
# 10036 Withdrawal amount too low
# 10037 Trade password not set
# 1004 The transaction amount is less than 0
# 10040 Withdrawal cancellation fails
# 10041 Withdrawal address not exsit or approved
# 10042 Admin password error
# 10043 Account equity error, withdrawal failure
# 10044 fail to cancel borrowing order
# 10047 self function is disabled for sub-account
# 10048 withdrawal information does not exist
# 10049 User can not have more than 50 unfilled small orders(amount<0.15BTC)
# 10050 can't cancel more than once
# 10051 order completed transaction
# 10052 not allowed to withdraw
# 10064 after a USD deposit, that portion of assets will not be withdrawable for the next 48 hours
# 1007 No trading market information
# 1008 No latest market information
# 1009 No order
# 1010 Different user of the cancelled order and the original order
# 10100 User account frozen
# 10101 order type is wrong
# 10102 incorrect ID
# 10103 the private otc order's key incorrect
# 10106 API key domain not matched
# 1011 No documented user
# 1013 No order type
# 1014 No login
# 1015 No market depth information
# 1017 Date error
# 1018 Order failed
# 1019 Undo order failed
# 10216 Non-available API / non-public API
# 1024 Currency does not exist
# 1025 No chart type
# 1026 No base currency quantity
# 1027 Incorrect parameter may exceeded limits
# 1028 Reserved decimal failed
# 1029 Preparing
# 1030 Account has margin and futures, transactions can not be processed
# 1031 Insufficient Transferring Balance
# 1032 Transferring Not Allowed
# 1035 Password incorrect
# 1036 Google Verification code Invalid
# 1037 Google Verification code incorrect
# 1038 Google Verification replicated
# 1039 Message Verification Input exceed the limit
# 1040 Message Verification invalid
# 1041 Message Verification incorrect
# 1042 Wrong Google Verification Input exceed the limit
# 1043 Login password cannot be same as the trading password
# 1044 Old password incorrect
# 1045 2nd Verification Needed
# 1046 Please input old password
# 1048 Account Blocked
# 1050 Orders have been withdrawn or withdrawn
# 1051 Order completed
# 1201 Account Deleted at 00: 00
# 1202 Account Not Exist
# 1203 Insufficient Balance
# 1204 Invalid currency
# 1205 Invalid Account
# 1206 Cash Withdrawal Blocked
# 1207 Transfer Not Support
# 1208 No designated account
# 1209 Invalid api
# 1216 Market order temporarily suspended. Please send limit order
# 1217 Order was sent at ±5% of the current market price. Please resend
# 1218 Place order failed. Please try again later
# 20001 User does not exist
# 20002 Account frozen
# 20003 Account frozen due to forced liquidation
# 20004 Contract account frozen
# 20005 User contract account does not exist
# 20006 Required field missing
# 20007 Illegal parameter
# 20008 Contract account balance is too low
# 20009 Contract status error
# 20010 Risk rate ratio does not exist
# 20011 Risk rate lower than 90%/80% before opening BTC position with 10x/20x leverage. or risk rate lower than 80%/60% before opening LTC position with 10x/20x leverage
# 20012 Risk rate lower than 90%/80% after opening BTC position with 10x/20x leverage. or risk rate lower than 80%/60% after opening LTC position with 10x/20x leverage
# 20013 Temporally no counter party price
# 20014 System error
# 20015 Order does not exist
# 20016 Close amount bigger than your open positions, liquidation quantity bigger than holding
# 20017 Not authorized/illegal operation/illegal order ID
# 20018 Order price cannot be more than 103-105% or less than 95-97% of the previous minute price
# 20019 IP restricted from accessing the resource
# 20020 Secret key does not exist
# 20021 Index information does not exist
# 20022 Wrong API interface(Cross margin mode shall call cross margin API, fixed margin mode shall call fixed margin API)
# 20023 Account in fixed-margin mode
# 20024 Signature does not match
# 20025 Leverage rate error
# 20026 API Permission Error
# 20027 no transaction record
# 20028 no such contract
# 20029 Amount is large than available funds
# 20030 Account still has debts
# 20038 Due to regulation, self function is not availavle in the country/region your currently reside in.
# 20049 Request frequency too high
# 20100 request time out
# 20101 the format of data is error
# 20102 invalid login
# 20103 event type error
# 20104 subscription type error
# 20107 JSON format error
# 20115 The quote is not match
# 20116 Param not match
# 21020 Contracts are being delivered, orders cannot be placed
# 21021 Contracts are being settled, contracts cannot be placed
},
'options': {
'marketBuyPrice': False,
'fetchOHLCVWarning': True,
'contractTypes': {
'1': 'this_week',
'2': 'next_week',
'4': 'quarter',
},
'fetchTickersMethod': 'fetch_tickers_from_api',
},
})
def fetch_markets(self, params={}):
# TODO: they have a new fee schedule as of Feb 7
# the new fees are progressive and depend on 30-day traded volume
# the following is the worst case
result = []
spotResponse = self.webGetSpotMarketsProducts()
#
# {
# "code": 0,
# "data": [
# {
# "baseCurrency":0,
# "brokerId":0,
# "callAuctionOrCallNoCancelAuction":false,
# "callNoCancelSwitchTime":{},
# "collect":"0",
# "continuousSwitchTime":{},
# "groupId":1,
# "isMarginOpen":true,
# "listDisplay":0,
# "marginRiskPreRatio":1.2,
# "marginRiskRatio":1.1,
# "marketFrom":118,
# "maxMarginLeverage":5,
# "maxPriceDigit":1,
# "maxSizeDigit":8,
# "mergeTypes":"0.1,1,10",
# "minTradeSize":0.00100000,
# "online":1,
# "productId":20,
# "quoteCurrency":7,
# "quoteIncrement":"0.1",
# "quotePrecision":2,
# "sort":30038,
# "symbol":"btc_usdt",
# "tradingMode":3
# },
# ]
# }
#
spotMarkets = self.safe_value(spotResponse, 'data', [])
markets = spotMarkets
if self.has['futures']:
futuresResponse = self.webPostFuturesPcMarketFuturesCoin()
#
# {
# "msg":"success",
# "code":0,
# "detailMsg":"",
# "data": [
# {
# "symbolId":0,
# "symbol":"f_usd_btc",
# "iceSingleAvgMinAmount":2,
# "minTradeSize":1,
# "iceSingleAvgMaxAmount":500,
# "contractDepthLevel":["0.01","0.2"],
# "dealAllMaxAmount":999,
# "maxSizeDigit":4,
# "contracts":[
# {"marketFrom":34, "id":201905240000034, "type":1, "desc":"BTC0524"},
# {"marketFrom":13, "id":201905310000013, "type":2, "desc":"BTC0531"},
# {"marketFrom":12, "id":201906280000012, "type":4, "desc":"BTC0628"},
# ],
# "maxPriceDigit":2,
# "nativeRate":1,
# "quote":"usd",
# "nativeCurrency":"usd",
# "nativeCurrencyMark":"$",
# "contractSymbol":0,
# "unitAmount":100.00,
# "symbolMark":"฿",
# "symbolDesc":"BTC"
# },
# ]
# }
#
futuresMarkets = self.safe_value(futuresResponse, 'data', [])
markets = self.array_concat(spotMarkets, futuresMarkets)
for i in range(0, len(markets)):
market = markets[i]
id = self.safe_string(market, 'symbol')
symbol = None
base = None
quote = None
baseId = None
quoteId = None
baseNumericId = None
quoteNumericId = None
lowercaseId = None
uppercaseBaseId = None
precision = {
'amount': self.safe_integer(market, 'maxSizeDigit'),
'price': self.safe_integer(market, 'maxPriceDigit'),
}
minAmount = self.safe_float(market, 'minTradeSize')
minPrice = math.pow(10, -precision['price'])
contracts = self.safe_value(market, 'contracts')
if contracts is None:
# spot markets
lowercaseId = id
parts = id.split('_')
baseId = parts[0]
quoteId = parts[1]
baseNumericId = self.safe_integer(market, 'baseCurrency')
quoteNumericId = self.safe_integer(market, 'quoteCurrency')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
contracts = [{}]
else:
# futures markets
quoteId = self.safe_string(market, 'quote')
uppercaseBaseId = self.safe_string(market, 'symbolDesc')
baseId = uppercaseBaseId.lower()
lowercaseId = baseId + '_' + quoteId
base = self.safe_currency_code(uppercaseBaseId)
quote = self.safe_currency_code(quoteId)
for k in range(0, len(contracts)):
contract = contracts[k]
type = self.safe_string(contract, 'type', 'spot')
contractType = None
spot = True
future = False
active = True
if type == 'spot':
symbol = base + '/' + quote
active = market['online'] != 0
else:
contractId = self.safe_string(contract, 'id')
symbol = base + '-' + quote + '-' + contractId[2:8]
contractType = self.safe_string(self.options['contractTypes'], type)
type = 'future'
spot = False
future = True
fees = self.safe_value_2(self.fees, type, 'trading', {})
result.append(self.extend(fees, {
'id': id,
'lowercaseId': lowercaseId,
'contractType': contractType,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'baseNumericId': baseNumericId,
'quoteNumericId': quoteNumericId,
'info': market,
'type': type,
'spot': spot,
'future': future,
'active': active,
'precision': precision,
'limits': {
'amount': {
'min': minAmount,
'max': None,
},
'price': {
'min': minPrice,
'max': None,
},
'cost': {
'min': minAmount * minPrice,
'max': None,
},
},
}))
return result
def calculate_fee(self, symbol, type, side, amount, price, takerOrMaker='taker', params={}):
market = self.markets[symbol]
key = 'quote'
rate = market[takerOrMaker]
cost = float(self.cost_to_precision(symbol, amount * rate))
if side == 'sell':
cost *= price
else:
key = 'base'
return {
'type': takerOrMaker,
'currency': market[key],
'rate': rate,
'cost': float(self.fee_to_precision(symbol, cost)),
}
def fetch_tickers_from_api(self, symbols=None, params={}):
self.load_markets()
request = {}
response = self.publicGetTickers(self.extend(request, params))
tickers = response['tickers']
timestamp = self.safe_timestamp(response, 'date')
result = {}
for i in range(0, len(tickers)):
ticker = tickers[i]
ticker = self.parse_ticker(self.extend(tickers[i], {'timestamp': timestamp}))
symbol = ticker['symbol']
result[symbol] = ticker
return result
def fetch_tickers_from_web(self, symbols=None, params={}):
self.load_markets()
request = {}
response = self.webGetSpotMarketsTickers(self.extend(request, params))
tickers = self.safe_value(response, 'data')
result = {}
for i in range(0, len(tickers)):
ticker = self.parse_ticker(tickers[i])
symbol = ticker['symbol']
result[symbol] = ticker
return result
def fetch_tickers(self, symbols=None, params={}):
method = self.options['fetchTickersMethod']
return getattr(self, method)(symbols, params)
def fetch_order_book(self, symbol=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
method = 'publicGetFutureDepth' if market['future'] else 'publicGetDepth'
request = self.create_request(market, params)
if limit is not None:
request['size'] = limit
response = getattr(self, method)(request)
return self.parse_order_book(response)
def parse_ticker(self, ticker, market=None):
#
# { buy: "48.777300",
# change: "-1.244500",
# changePercentage: "-2.47%",
# close: "49.064000",
# createdDate: 1531704852254,
# currencyId: 527,
# dayHigh: "51.012500",
# dayLow: "48.124200",
# high: "51.012500",
# inflows: "0",
# last: "49.064000",
# low: "48.124200",
# marketFrom: 627,
# name: {},
# open: "50.308500",
# outflows: "0",
# productId: 527,
# sell: "49.064000",
# symbol: "zec_okb",
# volume: "1049.092535" }
#
timestamp = self.safe_integer_2(ticker, 'timestamp', 'createdDate')
symbol = None
if market is None:
if 'symbol' in ticker:
marketId = ticker['symbol']
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
else:
baseId, quoteId = ticker['symbol'].split('_')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
if market is not None:
symbol = market['symbol']
last = self.safe_float(ticker, 'last')
open = self.safe_float(ticker, 'open')
change = self.safe_float(ticker, 'change')
percentage = self.safe_float(ticker, 'changePercentage')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': self.safe_float(ticker, 'buy'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'sell'),
'askVolume': None,
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': change,
'percentage': percentage,
'average': None,
'baseVolume': self.safe_float_2(ticker, 'vol', 'volume'),
'quoteVolume': None,
'info': ticker,
}
def fetch_ticker(self, symbol=None, params={}):
self.load_markets()
market = self.market(symbol)
method = 'publicGetFutureTicker' if market['future'] else 'publicGetTicker'
request = self.create_request(market, params)
response = getattr(self, method)(request)
ticker = self.safe_value(response, 'ticker')
if ticker is None:
raise ExchangeError(self.id + ' fetchTicker returned an empty response: ' + self.json(response))
timestamp = self.safe_timestamp(response, 'date')
if timestamp is not None:
ticker = self.extend(ticker, {'timestamp': timestamp})
return self.parse_ticker(ticker, market)
def parse_trade(self, trade, market=None):
symbol = None
if market:
symbol = market['symbol']
timestamp = self.safe_integer(trade, 'date_ms')
id = self.safe_string(trade, 'tid')
type = None
side = self.safe_string(trade, 'type')
price = self.safe_float(trade, 'price')
amount = self.safe_float(trade, 'amount')
cost = None
if price is not None:
if amount is not None:
cost = price * amount
return {
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': None,
'type': type,
'side': side,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': None,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
method = 'publicGetFutureTrades' if market['future'] else 'publicGetTrades'
request = self.create_request(market, params)
response = getattr(self, method)(request)
return self.parse_trades(response, market, since, limit)
def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None):
numElements = len(ohlcv)
volumeIndex = 6 if (numElements > 6) else 5
return [
ohlcv[0], # timestamp
float(ohlcv[1]), # Open
float(ohlcv[2]), # High
float(ohlcv[3]), # Low
float(ohlcv[4]), # Close
# float(ohlcv[5]), # quote volume
# float(ohlcv[6]), # base volume
float(ohlcv[volumeIndex]), # okex will return base volume in the 7th element for future markets
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
method = 'publicGetFutureKline' if market['future'] else 'publicGetKline'
request = self.create_request(market, {
'type': self.timeframes[timeframe],
})
if since is not None:
request['since'] = int((self.milliseconds() - 86400000) / 1000) # default last 24h
if limit is not None:
if self.options['fetchOHLCVWarning']:
raise ExchangeError(self.id + ' fetchOHLCV counts "limit" candles backwards in chronological ascending order, therefore the "limit" argument for ' + self.id + ' is disabled. Set ' + self.id + '.options["fetchOHLCVWarning"] = False to suppress self warning message.')
request['size'] = int(limit) # max is 1440 candles
response = getattr(self, method)(self.extend(request, params))
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def fetch_balance(self, params={}):
self.load_markets()
response = self.privatePostUserinfo(params)
info = self.safe_value(response, 'info', {})
balances = self.safe_value(info, 'funds', {})
result = {'info': response}
ids = list(balances['free'].keys())
usedField = 'freezed'
# wtf, okex?
# https://github.com/okcoin-okex/API-docs-OKEx.com/commit/01cf9dd57b1f984a8737ef76a037d4d3795d2ac7
if not(usedField in list(balances.keys())):
usedField = 'holds'
usedKeys = list(balances[usedField].keys())
ids = self.array_concat(ids, usedKeys)
for i in range(0, len(ids)):
id = ids[i]
code = self.safe_currency_code(id)
account = self.account()
account['free'] = self.safe_float(balances['free'], id)
account['used'] = self.safe_float(balances[usedField], id)
result[code] = account
return self.parse_balance(result)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
method = 'privatePostFutureTrade' if market['future'] else 'privatePostTrade'
orderSide = (side + '_market') if (type == 'market') else side
isMarketBuy = ((market['spot']) and (type == 'market') and (side == 'buy') and (not self.options['marketBuyPrice']))
orderPrice = self.safe_float(params, 'cost') if isMarketBuy else price
request = self.create_request(market, {
'type': orderSide,
})
if market['future']:
request['match_price'] = 1 if (type == 'market') else 0 # match best counter party price? 0 or 1, ignores price if 1
request['lever_rate'] = 10 # leverage rate value: 10 or 20(10 by default)
request['type'] = '1' if (side == 'buy') else '2'
elif type == 'market':
if side == 'buy':
if not orderPrice:
if self.options['marketBuyPrice']:
# eslint-disable-next-line quotes
raise ExchangeError(self.id + " market buy orders require a price argument(the amount you want to spend or the cost of the order) when self.options['marketBuyPrice'] is True.")
else:
# eslint-disable-next-line quotes
raise ExchangeError(self.id + " market buy orders require an additional cost parameter, cost = price * amount. If you want to pass the cost of the market order(the amount you want to spend) in the price argument(the default " + self.id + " behaviour), set self.options['marketBuyPrice'] = True. It will effectively suppress self warning exception as well.")
else:
request['price'] = orderPrice
else:
request['amount'] = amount
if type != 'market':
request['price'] = orderPrice
request['amount'] = amount
params = self.omit(params, 'cost')
response = getattr(self, method)(self.extend(request, params))
timestamp = self.milliseconds()
return {
'info': response,
'id': self.safe_string(response, 'order_id'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': None,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'filled': None,
'remaining': None,
'cost': None,
'trades': None,
'fee': None,
}
def cancel_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
method = 'privatePostFutureCancel' if market['future'] else 'privatePostCancelOrder'
request = self.create_request(market, {
'order_id': id,
})
response = getattr(self, method)(self.extend(request, params))
return response
def parse_order_status(self, status):
statuses = {
'-1': 'canceled',
'0': 'open',
'1': 'open',
'2': 'closed',
'3': 'open',
'4': 'canceled',
}
return self.safe_value(statuses, status, status)
def parse_order_side(self, side):
if side == 1:
return 'buy' # open long position
elif side == 2:
return 'sell' # open short position
elif side == 3:
return 'sell' # liquidate long position
elif side == 4:
return 'buy' # liquidate short position
return side
def parse_order(self, order, market=None):
side = None
type = None
if 'type' in order:
if (order['type'] == 'buy') or (order['type'] == 'sell'):
side = order['type']
type = 'limit'
elif order['type'] == 'buy_market':
side = 'buy'
type = 'market'
elif order['type'] == 'sell_market':
side = 'sell'
type = 'market'
else:
side = self.parse_order_side(order['type'])
if ('contract_name' in list(order.keys())) or ('lever_rate' in list(order.keys())):
type = 'margin'
status = self.parse_order_status(self.safe_string(order, 'status'))
symbol = None
if market is None:
marketId = self.safe_string(order, 'symbol')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
if market:
symbol = market['symbol']
createDateField = self.get_create_date_field()
timestamp = self.safe_integer(order, createDateField)
amount = self.safe_float(order, 'amount')
filled = self.safe_float(order, 'deal_amount')
amount = max(amount, filled)
remaining = max(0, amount - filled)
if type == 'market':
remaining = 0
average = self.safe_float(order, 'avg_price')
# https://github.com/ccxt/ccxt/issues/2452
average = self.safe_float(order, 'price_avg', average)
cost = average * filled
return {
'info': order,
'id': self.safe_string(order, 'order_id'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'side': side,
'price': self.safe_float(order, 'price'),
'average': average,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': None,
}
def get_create_date_field(self):
# needed for derived exchanges
# allcoin typo create_data instead of create_date
return 'create_date'
def get_orders_field(self):
# needed for derived exchanges
# allcoin typo order instead of orders(expected based on their API docs)
return 'orders'
def fetch_order(self, id, symbol=None, params={}):
if symbol is None:
raise ExchangeError(self.id + ' fetchOrder requires a symbol argument')
self.load_markets()
market = self.market(symbol)
method = 'privatePostFutureOrderInfo' if market['future'] else 'privatePostOrderInfo'
request = self.create_request(market, {
'order_id': id,
# 'status': 0, # 0 for unfilled orders, 1 for filled orders
# 'current_page': 1, # current page number
# 'page_length': 200, # number of orders returned per page, maximum 200
})
response = getattr(self, method)(self.extend(request, params))
ordersField = self.get_orders_field()
numOrders = len(response[ordersField])
if numOrders > 0:
return self.parse_order(response[ordersField][0])
raise OrderNotFound(self.id + ' order ' + id + ' not found')
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ExchangeError(self.id + ' fetchOrders requires a symbol argument')
self.load_markets()
market = self.market(symbol)
method = 'privatePostFutureOrdersInfo' if market['future'] else 'privatePost'
request = self.create_request(market)
order_id_in_params = ('order_id' in list(params.keys()))
if market['future']:
if not order_id_in_params:
raise ExchangeError(self.id + ' fetchOrders() requires order_id param for futures market ' + symbol + '(a string of one or more order ids, comma-separated)')
else:
status = params['type'] if ('type' in list(params.keys())) else params['status']
if status is None:
name = 'type' if order_id_in_params else 'status'
raise ExchangeError(self.id + ' fetchOrders() requires ' + name + ' param for spot market ' + symbol + '(0 - for unfilled orders, 1 - for filled/canceled orders)')
if order_id_in_params:
method += 'OrdersInfo'
request = self.extend(request, {
'type': status,
'order_id': params['order_id'],
})
else:
method += 'OrderHistory'
request = self.extend(request, {
'status': status,
'current_page': 1, # current page number
'page_length': 200, # number of orders returned per page, maximum 200
})
params = self.omit(params, ['type', 'status'])
response = getattr(self, method)(self.extend(request, params))
ordersField = self.get_orders_field()
return self.parse_orders(response[ordersField], market, since, limit)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
request = {
'status': 0, # 0 for unfilled orders, 1 for filled orders
}
return self.fetch_orders(symbol, since, limit, self.extend(request, params))
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
request = {
'status': 1, # 0 for unfilled orders, 1 for filled orders
}
return self.fetch_orders(symbol, since, limit, self.extend(request, params))
def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
self.load_markets()
currency = self.currency(code)
# if amount < 0.01:
# raise ExchangeError(self.id + ' withdraw() requires amount > 0.01')
# for some reason they require to supply a pair of currencies for withdrawing one currency
currencyId = currency['id'] + '_usd'
if tag:
address = address + ':' + tag
request = {
'symbol': currencyId,
'withdraw_address': address,
'withdraw_amount': amount,
'target': 'address', # or 'okcn', 'okcom', 'okex'
}
query = params
if 'chargefee' in query:
request['chargefee'] = query['chargefee']
query = self.omit(query, 'chargefee')
else:
raise ExchangeError(self.id + ' withdraw() requires a `chargefee` parameter')
if self.password:
request['trade_pwd'] = self.password
elif 'password' in query:
request['trade_pwd'] = query['password']
query = self.omit(query, 'password')
elif 'trade_pwd' in query:
request['trade_pwd'] = query['trade_pwd']
query = self.omit(query, 'trade_pwd')
passwordInRequest = ('trade_pwd' in list(request.keys()))
if not passwordInRequest:
raise ExchangeError(self.id + ' withdraw() requires self.password set on the exchange instance or a password / trade_pwd parameter')
response = self.privatePostWithdraw(self.extend(request, query))
return {
'info': response,
'id': self.safe_string(response, 'withdraw_id'),
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = '/'
if api != 'web':
url += self.version + '/'
url += path
if api != 'web':
url += self.extension
if api == 'private':
self.check_required_credentials()
query = self.keysort(self.extend({
'api_key': self.apiKey,
}, params))
# secret key must be at the end of query
queryString = self.rawencode(query) + '&secret_key=' + self.secret
query['sign'] = self.hash(self.encode(queryString)).upper()
body = self.urlencode(query)
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
else:
if params:
url += '?' + self.urlencode(params)
url = self.urls['api'][api] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def create_request(self, market, params={}):
if market['future']:
return self.deep_extend({
'symbol': market['lowercaseId'],
'contract_type': market['contractType'],
}, params)
return self.deep_extend({
'symbol': market['id'],
}, params)
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
if 'error_code' in response:
error = self.safe_string(response, 'error_code')
message = self.id + ' ' + self.json(response)
if error in self.exceptions:
ExceptionClass = self.exceptions[error]
raise ExceptionClass(message)
else:
raise ExchangeError(message)
if 'result' in response:
if not response['result']:
raise ExchangeError(self.id + ' ' + self.json(response))
| 45.396702 | 381 | 0.510512 |
ge import Exchange
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import DDoSProtection
class okcoinusd (Exchange):
def describe(self):
return self.deep_extend(super(okcoinusd, self).describe(), {
'id': 'okcoinusd',
'name': 'OKCoin USD',
'countries': ['CN', 'US'],
'version': 'v1',
'rateLimit': 1000,
'has': {
'CORS': False,
'fetchOHLCV': True,
'fetchOrder': True,
'fetchOrders': False,
'fetchOpenOrders': True,
'fetchClosedOrders': True,
'fetchTickers': True,
'withdraw': True,
'futures': False,
},
'extension': '.do',
'timeframes': {
'1m': '1min',
'3m': '3min',
'5m': '5min',
'15m': '15min',
'30m': '30min',
'1h': '1hour',
'2h': '2hour',
'4h': '4hour',
'6h': '6hour',
'12h': '12hour',
'1d': '1day',
'3d': '3day',
'1w': '1week',
},
'api': {
'web': {
'get': [
'futures/pc/market/marketOverview',
'spot/markets/index-tickers',
'spot/markets/currencies',
'spot/markets/products',
'spot/markets/tickers',
'spot/user-level',
],
'post': [
'futures/pc/market/futuresCoin',
],
},
'public': {
'get': [
'depth',
'exchange_rate',
'future_depth',
'future_estimated_price',
'future_hold_amount',
'future_index',
'future_kline',
'future_price_limit',
'future_ticker',
'future_trades',
'kline',
'otcs',
'ticker',
'tickers',
'trades',
],
},
'private': {
'post': [
'account_records',
'batch_trade',
'borrow_money',
'borrow_order_info',
'borrows_info',
'cancel_borrow',
'cancel_order',
'cancel_otc_order',
'cancel_withdraw',
'funds_transfer',
'future_batch_trade',
'future_cancel',
'future_devolve',
'future_explosive',
'future_order_info',
'future_orders_info',
'future_position',
'future_position_4fix',
'future_trade',
'future_trades_history',
'future_userinfo',
'future_userinfo_4fix',
'lend_depth',
'order_fee',
'order_history',
'order_info',
'orders_info',
'otc_order_history',
'otc_order_info',
'repayment',
'submit_otc_order',
'trade',
'trade_history',
'trade_otc_order',
'wallet_info',
'withdraw',
'withdraw_info',
'unrepayments_info',
'userinfo',
],
},
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766791-89ffb502-5ee5-11e7-8a5b-c5950b68ac65.jpg',
'api': {
'web': 'https://www.okcoin.com/v2',
'public': 'https://www.okcoin.com/api',
'private': 'https://www.okcoin.com',
},
'www': 'https://www.okcoin.com',
'doc': [
'https://www.okcoin.com/docs/en/',
'https://www.npmjs.com/package/okcoin.com',
],
'referral': 'https://www.okcoin.com/account/register?flag=activity&channelId=600001513',
},
'fees': {
'trading': {
'taker': 0.001,
'maker': 0.0005,
},
},
'exceptions': {
'10000': ExchangeError,
'10001': DDoSProtection,
'10005': AuthenticationError,
'10006': AuthenticationError,
'10007': AuthenticationError,
'1002': InsufficientFunds,
'1003': InvalidOrder,
'1004': InvalidOrder,
'1013': InvalidOrder,
'1027': InvalidOrder,
'1050': InvalidOrder,
'1217': InvalidOrder,
'10014': InvalidOrder,
'1009': OrderNotFound,
'1019': OrderNotFound,
'1051': OrderNotFound,
'10009': OrderNotFound,
'20015': OrderNotFound,
'10008': BadRequest,
# 10051 order completed transaction
# 10052 not allowed to withdraw
# 10064 after a USD deposit, that portion of assets will not be withdrawable for the next 48 hours
# 1007 No trading market information
# 1008 No latest market information
# 1009 No order
# 1010 Different user of the cancelled order and the original order
# 10100 User account frozen
# 10101 order type is wrong
# 10102 incorrect ID
# 10103 the private otc order's key incorrect
},
'options': {
'marketBuyPrice': False,
'fetchOHLCVWarning': True,
'contractTypes': {
'1': 'this_week',
'2': 'next_week',
'4': 'quarter',
},
'fetchTickersMethod': 'fetch_tickers_from_api',
},
})
def fetch_markets(self, params={}):
result = []
spotResponse = self.webGetSpotMarketsProducts()
spotMarkets = self.safe_value(spotResponse, 'data', [])
markets = spotMarkets
if self.has['futures']:
futuresResponse = self.webPostFuturesPcMarketFuturesCoin()
futuresMarkets = self.safe_value(futuresResponse, 'data', [])
markets = self.array_concat(spotMarkets, futuresMarkets)
for i in range(0, len(markets)):
market = markets[i]
id = self.safe_string(market, 'symbol')
symbol = None
base = None
quote = None
baseId = None
quoteId = None
baseNumericId = None
quoteNumericId = None
lowercaseId = None
uppercaseBaseId = None
precision = {
'amount': self.safe_integer(market, 'maxSizeDigit'),
'price': self.safe_integer(market, 'maxPriceDigit'),
}
minAmount = self.safe_float(market, 'minTradeSize')
minPrice = math.pow(10, -precision['price'])
contracts = self.safe_value(market, 'contracts')
if contracts is None:
lowercaseId = id
parts = id.split('_')
baseId = parts[0]
quoteId = parts[1]
baseNumericId = self.safe_integer(market, 'baseCurrency')
quoteNumericId = self.safe_integer(market, 'quoteCurrency')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
contracts = [{}]
else:
quoteId = self.safe_string(market, 'quote')
uppercaseBaseId = self.safe_string(market, 'symbolDesc')
baseId = uppercaseBaseId.lower()
lowercaseId = baseId + '_' + quoteId
base = self.safe_currency_code(uppercaseBaseId)
quote = self.safe_currency_code(quoteId)
for k in range(0, len(contracts)):
contract = contracts[k]
type = self.safe_string(contract, 'type', 'spot')
contractType = None
spot = True
future = False
active = True
if type == 'spot':
symbol = base + '/' + quote
active = market['online'] != 0
else:
contractId = self.safe_string(contract, 'id')
symbol = base + '-' + quote + '-' + contractId[2:8]
contractType = self.safe_string(self.options['contractTypes'], type)
type = 'future'
spot = False
future = True
fees = self.safe_value_2(self.fees, type, 'trading', {})
result.append(self.extend(fees, {
'id': id,
'lowercaseId': lowercaseId,
'contractType': contractType,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'baseNumericId': baseNumericId,
'quoteNumericId': quoteNumericId,
'info': market,
'type': type,
'spot': spot,
'future': future,
'active': active,
'precision': precision,
'limits': {
'amount': {
'min': minAmount,
'max': None,
},
'price': {
'min': minPrice,
'max': None,
},
'cost': {
'min': minAmount * minPrice,
'max': None,
},
},
}))
return result
def calculate_fee(self, symbol, type, side, amount, price, takerOrMaker='taker', params={}):
market = self.markets[symbol]
key = 'quote'
rate = market[takerOrMaker]
cost = float(self.cost_to_precision(symbol, amount * rate))
if side == 'sell':
cost *= price
else:
key = 'base'
return {
'type': takerOrMaker,
'currency': market[key],
'rate': rate,
'cost': float(self.fee_to_precision(symbol, cost)),
}
def fetch_tickers_from_api(self, symbols=None, params={}):
self.load_markets()
request = {}
response = self.publicGetTickers(self.extend(request, params))
tickers = response['tickers']
timestamp = self.safe_timestamp(response, 'date')
result = {}
for i in range(0, len(tickers)):
ticker = tickers[i]
ticker = self.parse_ticker(self.extend(tickers[i], {'timestamp': timestamp}))
symbol = ticker['symbol']
result[symbol] = ticker
return result
def fetch_tickers_from_web(self, symbols=None, params={}):
self.load_markets()
request = {}
response = self.webGetSpotMarketsTickers(self.extend(request, params))
tickers = self.safe_value(response, 'data')
result = {}
for i in range(0, len(tickers)):
ticker = self.parse_ticker(tickers[i])
symbol = ticker['symbol']
result[symbol] = ticker
return result
def fetch_tickers(self, symbols=None, params={}):
method = self.options['fetchTickersMethod']
return getattr(self, method)(symbols, params)
def fetch_order_book(self, symbol=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
method = 'publicGetFutureDepth' if market['future'] else 'publicGetDepth'
request = self.create_request(market, params)
if limit is not None:
request['size'] = limit
response = getattr(self, method)(request)
return self.parse_order_book(response)
def parse_ticker(self, ticker, market=None):
timestamp = self.safe_integer_2(ticker, 'timestamp', 'createdDate')
symbol = None
if market is None:
if 'symbol' in ticker:
marketId = ticker['symbol']
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
else:
baseId, quoteId = ticker['symbol'].split('_')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
if market is not None:
symbol = market['symbol']
last = self.safe_float(ticker, 'last')
open = self.safe_float(ticker, 'open')
change = self.safe_float(ticker, 'change')
percentage = self.safe_float(ticker, 'changePercentage')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': self.safe_float(ticker, 'buy'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'sell'),
'askVolume': None,
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': change,
'percentage': percentage,
'average': None,
'baseVolume': self.safe_float_2(ticker, 'vol', 'volume'),
'quoteVolume': None,
'info': ticker,
}
def fetch_ticker(self, symbol=None, params={}):
self.load_markets()
market = self.market(symbol)
method = 'publicGetFutureTicker' if market['future'] else 'publicGetTicker'
request = self.create_request(market, params)
response = getattr(self, method)(request)
ticker = self.safe_value(response, 'ticker')
if ticker is None:
raise ExchangeError(self.id + ' fetchTicker returned an empty response: ' + self.json(response))
timestamp = self.safe_timestamp(response, 'date')
if timestamp is not None:
ticker = self.extend(ticker, {'timestamp': timestamp})
return self.parse_ticker(ticker, market)
def parse_trade(self, trade, market=None):
symbol = None
if market:
symbol = market['symbol']
timestamp = self.safe_integer(trade, 'date_ms')
id = self.safe_string(trade, 'tid')
type = None
side = self.safe_string(trade, 'type')
price = self.safe_float(trade, 'price')
amount = self.safe_float(trade, 'amount')
cost = None
if price is not None:
if amount is not None:
cost = price * amount
return {
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': None,
'type': type,
'side': side,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': None,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
method = 'publicGetFutureTrades' if market['future'] else 'publicGetTrades'
request = self.create_request(market, params)
response = getattr(self, method)(request)
return self.parse_trades(response, market, since, limit)
def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None):
numElements = len(ohlcv)
volumeIndex = 6 if (numElements > 6) else 5
return [
ohlcv[0],
float(ohlcv[1]),
float(ohlcv[2]),
float(ohlcv[3]),
float(ohlcv[4]),
loat(ohlcv[volumeIndex]),
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
method = 'publicGetFutureKline' if market['future'] else 'publicGetKline'
request = self.create_request(market, {
'type': self.timeframes[timeframe],
})
if since is not None:
request['since'] = int((self.milliseconds() - 86400000) / 1000)
if limit is not None:
if self.options['fetchOHLCVWarning']:
raise ExchangeError(self.id + ' fetchOHLCV counts "limit" candles backwards in chronological ascending order, therefore the "limit" argument for ' + self.id + ' is disabled. Set ' + self.id + '.options["fetchOHLCVWarning"] = False to suppress self warning message.')
request['size'] = int(limit)
response = getattr(self, method)(self.extend(request, params))
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def fetch_balance(self, params={}):
self.load_markets()
response = self.privatePostUserinfo(params)
info = self.safe_value(response, 'info', {})
balances = self.safe_value(info, 'funds', {})
result = {'info': response}
ids = list(balances['free'].keys())
usedField = 'freezed'
if not(usedField in list(balances.keys())):
usedField = 'holds'
usedKeys = list(balances[usedField].keys())
ids = self.array_concat(ids, usedKeys)
for i in range(0, len(ids)):
id = ids[i]
code = self.safe_currency_code(id)
account = self.account()
account['free'] = self.safe_float(balances['free'], id)
account['used'] = self.safe_float(balances[usedField], id)
result[code] = account
return self.parse_balance(result)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
method = 'privatePostFutureTrade' if market['future'] else 'privatePostTrade'
orderSide = (side + '_market') if (type == 'market') else side
isMarketBuy = ((market['spot']) and (type == 'market') and (side == 'buy') and (not self.options['marketBuyPrice']))
orderPrice = self.safe_float(params, 'cost') if isMarketBuy else price
request = self.create_request(market, {
'type': orderSide,
})
if market['future']:
request['match_price'] = 1 if (type == 'market') else 0
request['lever_rate'] = 10
request['type'] = '1' if (side == 'buy') else '2'
elif type == 'market':
if side == 'buy':
if not orderPrice:
if self.options['marketBuyPrice']:
raise ExchangeError(self.id + " market buy orders require a price argument(the amount you want to spend or the cost of the order) when self.options['marketBuyPrice'] is True.")
else:
raise ExchangeError(self.id + " market buy orders require an additional cost parameter, cost = price * amount. If you want to pass the cost of the market order(the amount you want to spend) in the price argument(the default " + self.id + " behaviour), set self.options['marketBuyPrice'] = True. It will effectively suppress self warning exception as well.")
else:
request['price'] = orderPrice
else:
request['amount'] = amount
if type != 'market':
request['price'] = orderPrice
request['amount'] = amount
params = self.omit(params, 'cost')
response = getattr(self, method)(self.extend(request, params))
timestamp = self.milliseconds()
return {
'info': response,
'id': self.safe_string(response, 'order_id'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': None,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'filled': None,
'remaining': None,
'cost': None,
'trades': None,
'fee': None,
}
def cancel_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
method = 'privatePostFutureCancel' if market['future'] else 'privatePostCancelOrder'
request = self.create_request(market, {
'order_id': id,
})
response = getattr(self, method)(self.extend(request, params))
return response
def parse_order_status(self, status):
statuses = {
'-1': 'canceled',
'0': 'open',
'1': 'open',
'2': 'closed',
'3': 'open',
'4': 'canceled',
}
return self.safe_value(statuses, status, status)
def parse_order_side(self, side):
if side == 1:
return 'buy'
elif side == 2:
return 'sell'
elif side == 3:
return 'sell'
elif side == 4:
return 'buy'
return side
def parse_order(self, order, market=None):
side = None
type = None
if 'type' in order:
if (order['type'] == 'buy') or (order['type'] == 'sell'):
side = order['type']
type = 'limit'
elif order['type'] == 'buy_market':
side = 'buy'
type = 'market'
elif order['type'] == 'sell_market':
side = 'sell'
type = 'market'
else:
side = self.parse_order_side(order['type'])
if ('contract_name' in list(order.keys())) or ('lever_rate' in list(order.keys())):
type = 'margin'
status = self.parse_order_status(self.safe_string(order, 'status'))
symbol = None
if market is None:
marketId = self.safe_string(order, 'symbol')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
if market:
symbol = market['symbol']
createDateField = self.get_create_date_field()
timestamp = self.safe_integer(order, createDateField)
amount = self.safe_float(order, 'amount')
filled = self.safe_float(order, 'deal_amount')
amount = max(amount, filled)
remaining = max(0, amount - filled)
if type == 'market':
remaining = 0
average = self.safe_float(order, 'avg_price')
average = self.safe_float(order, 'price_avg', average)
cost = average * filled
return {
'info': order,
'id': self.safe_string(order, 'order_id'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'side': side,
'price': self.safe_float(order, 'price'),
'average': average,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': None,
}
def get_create_date_field(self):
return 'create_date'
def get_orders_field(self):
return 'orders'
def fetch_order(self, id, symbol=None, params={}):
if symbol is None:
raise ExchangeError(self.id + ' fetchOrder requires a symbol argument')
self.load_markets()
market = self.market(symbol)
method = 'privatePostFutureOrderInfo' if market['future'] else 'privatePostOrderInfo'
request = self.create_request(market, {
'order_id': id,
ordersField = self.get_orders_field()
numOrders = len(response[ordersField])
if numOrders > 0:
return self.parse_order(response[ordersField][0])
raise OrderNotFound(self.id + ' order ' + id + ' not found')
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ExchangeError(self.id + ' fetchOrders requires a symbol argument')
self.load_markets()
market = self.market(symbol)
method = 'privatePostFutureOrdersInfo' if market['future'] else 'privatePost'
request = self.create_request(market)
order_id_in_params = ('order_id' in list(params.keys()))
if market['future']:
if not order_id_in_params:
raise ExchangeError(self.id + ' fetchOrders() requires order_id param for futures market ' + symbol + '(a string of one or more order ids, comma-separated)')
else:
status = params['type'] if ('type' in list(params.keys())) else params['status']
if status is None:
name = 'type' if order_id_in_params else 'status'
raise ExchangeError(self.id + ' fetchOrders() requires ' + name + ' param for spot market ' + symbol + '(0 - for unfilled orders, 1 - for filled/canceled orders)')
if order_id_in_params:
method += 'OrdersInfo'
request = self.extend(request, {
'type': status,
'order_id': params['order_id'],
})
else:
method += 'OrderHistory'
request = self.extend(request, {
'status': status,
'current_page': 1,
'page_length': 200,
})
params = self.omit(params, ['type', 'status'])
response = getattr(self, method)(self.extend(request, params))
ordersField = self.get_orders_field()
return self.parse_orders(response[ordersField], market, since, limit)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
request = {
'status': 0,
}
return self.fetch_orders(symbol, since, limit, self.extend(request, params))
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
request = {
'status': 1,
}
return self.fetch_orders(symbol, since, limit, self.extend(request, params))
def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
self.load_markets()
currency = self.currency(code)
currencyId = currency['id'] + '_usd'
if tag:
address = address + ':' + tag
request = {
'symbol': currencyId,
'withdraw_address': address,
'withdraw_amount': amount,
'target': 'address',
}
query = params
if 'chargefee' in query:
request['chargefee'] = query['chargefee']
query = self.omit(query, 'chargefee')
else:
raise ExchangeError(self.id + ' withdraw() requires a `chargefee` parameter')
if self.password:
request['trade_pwd'] = self.password
elif 'password' in query:
request['trade_pwd'] = query['password']
query = self.omit(query, 'password')
elif 'trade_pwd' in query:
request['trade_pwd'] = query['trade_pwd']
query = self.omit(query, 'trade_pwd')
passwordInRequest = ('trade_pwd' in list(request.keys()))
if not passwordInRequest:
raise ExchangeError(self.id + ' withdraw() requires self.password set on the exchange instance or a password / trade_pwd parameter')
response = self.privatePostWithdraw(self.extend(request, query))
return {
'info': response,
'id': self.safe_string(response, 'withdraw_id'),
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = '/'
if api != 'web':
url += self.version + '/'
url += path
if api != 'web':
url += self.extension
if api == 'private':
self.check_required_credentials()
query = self.keysort(self.extend({
'api_key': self.apiKey,
}, params))
queryString = self.rawencode(query) + '&secret_key=' + self.secret
query['sign'] = self.hash(self.encode(queryString)).upper()
body = self.urlencode(query)
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
else:
if params:
url += '?' + self.urlencode(params)
url = self.urls['api'][api] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def create_request(self, market, params={}):
if market['future']:
return self.deep_extend({
'symbol': market['lowercaseId'],
'contract_type': market['contractType'],
}, params)
return self.deep_extend({
'symbol': market['id'],
}, params)
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return
if 'error_code' in response:
error = self.safe_string(response, 'error_code')
message = self.id + ' ' + self.json(response)
if error in self.exceptions:
ExceptionClass = self.exceptions[error]
raise ExceptionClass(message)
else:
raise ExchangeError(message)
if 'result' in response:
if not response['result']:
raise ExchangeError(self.id + ' ' + self.json(response))
| true | true |
f73244177849f7ecbf3d99cbfddf1b55449d8ed5 | 1,805 | py | Python | days/day8.py | vanHavel/AdventOfCode2021 | a83ee21cffff56ba3f49de7af5113bf0b11fea7a | [
"MIT"
] | null | null | null | days/day8.py | vanHavel/AdventOfCode2021 | a83ee21cffff56ba3f49de7af5113bf0b11fea7a | [
"MIT"
] | null | null | null | days/day8.py | vanHavel/AdventOfCode2021 | a83ee21cffff56ba3f49de7af5113bf0b11fea7a | [
"MIT"
] | null | null | null | import itertools
from aocd import get_data, submit
DAY = 8
YEAR = 2021
def part1(data: str) -> str:
lines = data.splitlines()
ans = 0
for line in lines:
left, right = line.split('|')
segments = left.split(' ')
code = right.split(' ')
for item in code:
if len(item) in [2, 3, 4, 7]:
ans += 1
return str(ans)
def part2(data: str) -> str:
lines = data.splitlines()
valids = [set("abcefg"), set("cf"), set("acdeg"), set("acdfg"), set("bcdf"), set("abdfg"), set("abdefg"), set("acf"), set("abcdefg"), set("abcdfg")]
ans = 0
for line in lines:
left, right = line.split('|')
segments = left.strip().split(' ')
code = right.strip().split(' ')
for perm in itertools.permutations("abcdefg"):
mapping = {"abcdefg"[i]: perm[i] for i in range(7)}
ok = True
for index, segment in enumerate(segments):
mapped = set()
for char in segment:
mapped.add(mapping[char])
if mapped not in valids:
ok = False
break
if ok:
decoded = 0
for segment in code:
decoded *= 10
mapped = set()
for char in segment:
mapped.add(mapping[char])
digit = valids.index(mapped)
decoded += digit
ans += decoded
break
return str(ans)
if __name__ == '__main__':
input_data = get_data(day=DAY, year=YEAR)
ans1 = part1(input_data)
#submit(answer=ans1, day=DAY, year=YEAR, part=1)
ans2 = part2(input_data)
submit(answer=ans2, day=DAY, year=YEAR, part=2)
| 30.59322 | 152 | 0.491967 | import itertools
from aocd import get_data, submit
DAY = 8
YEAR = 2021
def part1(data: str) -> str:
lines = data.splitlines()
ans = 0
for line in lines:
left, right = line.split('|')
segments = left.split(' ')
code = right.split(' ')
for item in code:
if len(item) in [2, 3, 4, 7]:
ans += 1
return str(ans)
def part2(data: str) -> str:
lines = data.splitlines()
valids = [set("abcefg"), set("cf"), set("acdeg"), set("acdfg"), set("bcdf"), set("abdfg"), set("abdefg"), set("acf"), set("abcdefg"), set("abcdfg")]
ans = 0
for line in lines:
left, right = line.split('|')
segments = left.strip().split(' ')
code = right.strip().split(' ')
for perm in itertools.permutations("abcdefg"):
mapping = {"abcdefg"[i]: perm[i] for i in range(7)}
ok = True
for index, segment in enumerate(segments):
mapped = set()
for char in segment:
mapped.add(mapping[char])
if mapped not in valids:
ok = False
break
if ok:
decoded = 0
for segment in code:
decoded *= 10
mapped = set()
for char in segment:
mapped.add(mapping[char])
digit = valids.index(mapped)
decoded += digit
ans += decoded
break
return str(ans)
if __name__ == '__main__':
input_data = get_data(day=DAY, year=YEAR)
ans1 = part1(input_data)
ans2 = part2(input_data)
submit(answer=ans2, day=DAY, year=YEAR, part=2)
| true | true |
f7324580edafa22a20c02c94b053cf9a702a9918 | 6,782 | py | Python | src/network-manager/azext_network_manager/vendored_sdks/operations/_network_manager_deployment_status_operations.py | haroonf/azure-cli-extensions | 61c044d34c224372f186934fa7c9313f1cd3a525 | [
"MIT"
] | null | null | null | src/network-manager/azext_network_manager/vendored_sdks/operations/_network_manager_deployment_status_operations.py | haroonf/azure-cli-extensions | 61c044d34c224372f186934fa7c9313f1cd3a525 | [
"MIT"
] | 9 | 2022-03-25T19:35:49.000Z | 2022-03-31T06:09:47.000Z | src/network-manager/azext_network_manager/vendored_sdks/operations/_network_manager_deployment_status_operations.py | haroonf/azure-cli-extensions | 61c044d34c224372f186934fa7c9313f1cd3a525 | [
"MIT"
] | 1 | 2022-03-10T22:13:02.000Z | 2022-03-10T22:13:02.000Z | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Optional, TypeVar
from msrest import Serializer
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
subscription_id: str,
resource_group_name: str,
network_manager_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2022-02-01-preview") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkManagers/{networkManagerName}/listDeploymentStatus") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"networkManagerName": _SERIALIZER.url("network_manager_name", network_manager_name, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
_header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=_url,
params=_query_parameters,
headers=_header_parameters,
json=json,
content=content,
**kwargs
)
class NetworkManagerDeploymentStatusOperations(object):
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.network.v2022_02_01_preview.NetworkManagementClient`'s
:attr:`network_manager_deployment_status` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
args = list(args)
self._client = args.pop(0) if args else kwargs.pop("client")
self._config = args.pop(0) if args else kwargs.pop("config")
self._serialize = args.pop(0) if args else kwargs.pop("serializer")
self._deserialize = args.pop(0) if args else kwargs.pop("deserializer")
@distributed_trace
def list(
self,
resource_group_name: str,
network_manager_name: str,
parameters: "_models.NetworkManagerDeploymentStatusParameter",
**kwargs: Any
) -> "_models.NetworkManagerDeploymentStatusListResult":
"""Post to List of Network Manager Deployment Status.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_manager_name: The name of the network manager.
:type network_manager_name: str
:param parameters: Parameters supplied to specify which Managed Network deployment status is.
:type parameters:
~azure.mgmt.network.v2022_02_01_preview.models.NetworkManagerDeploymentStatusParameter
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkManagerDeploymentStatusListResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2022_02_01_preview.models.NetworkManagerDeploymentStatusListResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkManagerDeploymentStatusListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-02-01-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'NetworkManagerDeploymentStatusParameter')
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
network_manager_name=network_manager_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkManagerDeploymentStatusListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkManagers/{networkManagerName}/listDeploymentStatus"} # type: ignore
| 43.197452 | 226 | 0.69537 |
from typing import Any, Callable, Dict, Optional, TypeVar
from msrest import Serializer
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
subscription_id: str,
resource_group_name: str,
network_manager_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2022-02-01-preview")
content_type = kwargs.pop('content_type', None)
accept = "application/json"
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkManagers/{networkManagerName}/listDeploymentStatus")
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"networkManagerName": _SERIALIZER.url("network_manager_name", network_manager_name, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
_query_parameters = kwargs.pop("params", {})
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
_header_parameters = kwargs.pop("headers", {})
if content_type is not None:
_header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=_url,
params=_query_parameters,
headers=_header_parameters,
json=json,
content=content,
**kwargs
)
class NetworkManagerDeploymentStatusOperations(object):
models = _models
def __init__(self, *args, **kwargs):
args = list(args)
self._client = args.pop(0) if args else kwargs.pop("client")
self._config = args.pop(0) if args else kwargs.pop("config")
self._serialize = args.pop(0) if args else kwargs.pop("serializer")
self._deserialize = args.pop(0) if args else kwargs.pop("deserializer")
@distributed_trace
def list(
self,
resource_group_name: str,
network_manager_name: str,
parameters: "_models.NetworkManagerDeploymentStatusParameter",
**kwargs: Any
) -> "_models.NetworkManagerDeploymentStatusListResult":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-02-01-preview")
content_type = kwargs.pop('content_type', "application/json")
_json = self._serialize.body(parameters, 'NetworkManagerDeploymentStatusParameter')
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
network_manager_name=network_manager_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkManagerDeploymentStatusListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkManagers/{networkManagerName}/listDeploymentStatus"}
| true | true |
f732469f36d304c9834025f63e03b387b2c8a05a | 4,386 | py | Python | src/train.py | convergence-lab/covid19-detection | 6a57e87ec1d8688712e6170a4c3aafb6e113ca73 | [
"Apache-2.0"
] | 3 | 2020-04-24T12:55:58.000Z | 2020-07-05T22:02:36.000Z | src/train.py | convergence-lab/covid19-detection | 6a57e87ec1d8688712e6170a4c3aafb6e113ca73 | [
"Apache-2.0"
] | null | null | null | src/train.py | convergence-lab/covid19-detection | 6a57e87ec1d8688712e6170a4c3aafb6e113ca73 | [
"Apache-2.0"
] | null | null | null | import toml
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score
from logzero import logger
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
from torchvision import transforms
from model import Model
from data import load_data, CovidChestxrayDataset
def check_grad(parameters):
grad = 0
cnt = 0
for p in parameters:
grad += p.grad.norm()
cnt += 1
return grad / cnt
def train():
with open("config.toml") as f:
config = toml.load(f)
base_dir = config["data"]["base_dir"]
epochs = config["train"]["epochs"]
batch_size = config["train"]["batch_size"]
lr = config["train"]["lr"]
betas = config["train"]["betas"]
in_filters = config["model"]["in_filters"]
image_size = config["model"]["image_size"]
filters = config["model"]["filters"]
num_classes = config["model"]["num_classes"]
kernel_size = config["model"]["kernel_size"]
padding = config["model"]["padding"]
num_resblocks = config["model"]["num_resblocks"]
device = "cuda" if torch.cuda.is_available() else "cpu"
records = load_data(base_dir)
train_records, test_records = train_test_split(records, test_size=0.2)
train_transform = transforms.Compose([
transforms.Resize(image_size),
transforms.RandomAffine(10, translate=[0.1, 0.1], shear=0.1),
transforms.ColorJitter(brightness=0.7, contrast=0.7),
transforms.ToTensor(),
transforms.Normalize(0.5, 0.5)
])
test_transform = transforms.Compose([
transforms.Resize(image_size),
transforms.ToTensor(),
transforms.Normalize(0.5, 0.5)
])
trainset = CovidChestxrayDataset(train_records, base_dir, train_transform)
testset = CovidChestxrayDataset(test_records, base_dir, test_transform)
trainloader = DataLoader(trainset, batch_size=batch_size, shuffle=True)
testloader = DataLoader(testset, batch_size=1, shuffle=False)
net = Model(in_filters, image_size, filters, kernel_size, padding, num_resblocks, num_classes)
net.to(device)
criterion = nn.NLLLoss()
optimizer = optim.AdamW(net.parameters(), lr=lr, betas=betas, weight_decay=1e-2)
for epoch in range(epochs):
net.train()
train_loss = 0
train_targets = []
train_probs = []
train_preds = []
grad = 0
for batch in trainloader:
img, label = batch
train_targets += label.numpy().tolist()
img, label = img.to(device), label.to(device)
optimizer.zero_grad()
pred = net(img)
loss = criterion(pred, label)
loss.backward()
grad += check_grad(net.parameters())
torch.nn.utils.clip_grad_norm_(net.parameters(), 1)
optimizer.step()
train_loss += loss.item()
train_preds += pred.cpu().detach().numpy().argmax(axis=1).tolist()
train_probs += pred.cpu().detach().numpy()[:, 1].tolist()
acc = accuracy_score(train_targets, train_preds)
f1 = f1_score(train_targets, train_preds, average="macro")
auc = roc_auc_score(train_targets, train_probs)
logger.info(f"Epoch {epoch+1} Train loss {train_loss/len(trainloader):.5}, Acc {acc*100:.3}%, F1 {f1*100:.3}%, AUC {auc*100:.4}%, grad {grad/len(trainloader)}")
net.eval()
test_loss = 0
test_targets = []
test_preds = []
test_probs = []
for batch in testloader:
img, label = batch
test_targets += label.numpy().tolist()
img, label = img.to(device), label.to(device)
with torch.no_grad():
pred = net(img)
loss = criterion(pred, label)
test_loss += loss.item()
test_preds += pred.cpu().detach().numpy().argmax(axis=1).tolist()
test_probs += pred.cpu().detach().numpy()[:, 1].tolist()
acc = accuracy_score(test_targets, test_preds)
f1 = f1_score(test_targets, test_preds, average="macro")
auc = roc_auc_score(test_targets, test_probs)
logger.info(f"Epoch {epoch+1} Test loss {test_loss/len(testloader):.5}, Acc {acc*100:.3}%, F1 {f1*100:.3}%, AUC {auc*100:.4}%")
torch.save(net.state_dict, "net.pt")
if __name__ == "__main__":
train() | 37.810345 | 168 | 0.628363 | import toml
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score
from logzero import logger
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
from torchvision import transforms
from model import Model
from data import load_data, CovidChestxrayDataset
def check_grad(parameters):
grad = 0
cnt = 0
for p in parameters:
grad += p.grad.norm()
cnt += 1
return grad / cnt
def train():
with open("config.toml") as f:
config = toml.load(f)
base_dir = config["data"]["base_dir"]
epochs = config["train"]["epochs"]
batch_size = config["train"]["batch_size"]
lr = config["train"]["lr"]
betas = config["train"]["betas"]
in_filters = config["model"]["in_filters"]
image_size = config["model"]["image_size"]
filters = config["model"]["filters"]
num_classes = config["model"]["num_classes"]
kernel_size = config["model"]["kernel_size"]
padding = config["model"]["padding"]
num_resblocks = config["model"]["num_resblocks"]
device = "cuda" if torch.cuda.is_available() else "cpu"
records = load_data(base_dir)
train_records, test_records = train_test_split(records, test_size=0.2)
train_transform = transforms.Compose([
transforms.Resize(image_size),
transforms.RandomAffine(10, translate=[0.1, 0.1], shear=0.1),
transforms.ColorJitter(brightness=0.7, contrast=0.7),
transforms.ToTensor(),
transforms.Normalize(0.5, 0.5)
])
test_transform = transforms.Compose([
transforms.Resize(image_size),
transforms.ToTensor(),
transforms.Normalize(0.5, 0.5)
])
trainset = CovidChestxrayDataset(train_records, base_dir, train_transform)
testset = CovidChestxrayDataset(test_records, base_dir, test_transform)
trainloader = DataLoader(trainset, batch_size=batch_size, shuffle=True)
testloader = DataLoader(testset, batch_size=1, shuffle=False)
net = Model(in_filters, image_size, filters, kernel_size, padding, num_resblocks, num_classes)
net.to(device)
criterion = nn.NLLLoss()
optimizer = optim.AdamW(net.parameters(), lr=lr, betas=betas, weight_decay=1e-2)
for epoch in range(epochs):
net.train()
train_loss = 0
train_targets = []
train_probs = []
train_preds = []
grad = 0
for batch in trainloader:
img, label = batch
train_targets += label.numpy().tolist()
img, label = img.to(device), label.to(device)
optimizer.zero_grad()
pred = net(img)
loss = criterion(pred, label)
loss.backward()
grad += check_grad(net.parameters())
torch.nn.utils.clip_grad_norm_(net.parameters(), 1)
optimizer.step()
train_loss += loss.item()
train_preds += pred.cpu().detach().numpy().argmax(axis=1).tolist()
train_probs += pred.cpu().detach().numpy()[:, 1].tolist()
acc = accuracy_score(train_targets, train_preds)
f1 = f1_score(train_targets, train_preds, average="macro")
auc = roc_auc_score(train_targets, train_probs)
logger.info(f"Epoch {epoch+1} Train loss {train_loss/len(trainloader):.5}, Acc {acc*100:.3}%, F1 {f1*100:.3}%, AUC {auc*100:.4}%, grad {grad/len(trainloader)}")
net.eval()
test_loss = 0
test_targets = []
test_preds = []
test_probs = []
for batch in testloader:
img, label = batch
test_targets += label.numpy().tolist()
img, label = img.to(device), label.to(device)
with torch.no_grad():
pred = net(img)
loss = criterion(pred, label)
test_loss += loss.item()
test_preds += pred.cpu().detach().numpy().argmax(axis=1).tolist()
test_probs += pred.cpu().detach().numpy()[:, 1].tolist()
acc = accuracy_score(test_targets, test_preds)
f1 = f1_score(test_targets, test_preds, average="macro")
auc = roc_auc_score(test_targets, test_probs)
logger.info(f"Epoch {epoch+1} Test loss {test_loss/len(testloader):.5}, Acc {acc*100:.3}%, F1 {f1*100:.3}%, AUC {auc*100:.4}%")
torch.save(net.state_dict, "net.pt")
if __name__ == "__main__":
train() | true | true |
f73246b8b79ad99643dd8a62e8dbfdb9e865ba77 | 2,837 | py | Python | tests/test_cli.py | dewancse/csimpy | 58c32e40e5d991b4ca98df05e6f61020def475a9 | [
"Apache-2.0"
] | null | null | null | tests/test_cli.py | dewancse/csimpy | 58c32e40e5d991b4ca98df05e6f61020def475a9 | [
"Apache-2.0"
] | null | null | null | tests/test_cli.py | dewancse/csimpy | 58c32e40e5d991b4ca98df05e6f61020def475a9 | [
"Apache-2.0"
] | 1 | 2020-08-21T02:32:57.000Z | 2020-08-21T02:32:57.000Z | from csimpy import __main__
import csimpy
import os
import shutil
import tempfile
import unittest
class CliTestCase(unittest.TestCase):
EXAMPLE_SEDML_FILENAME = 'tests/fixtures/sine_imports.xml'
def setUp(self):
self.dirname = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.dirname)
def test_help(self):
with self.assertRaises(SystemExit):
with __main__.App(argv=['--help']) as app:
app.run()
def test_version(self):
with __main__.App(argv=['-v']) as app:
# need to work out how to do this on Windows...
# with capturer.CaptureOutput(merged=False, relay=False) as captured:
# with self.assertRaises(SystemExit):
# app.run()
# self.assertIn(csimpy.__version__, captured.stdout.get_text())
# self.assertEqual(captured.stderr.get_text(), '')
app.run()
with __main__.App(argv=['--version']) as app:
# need to work out how to do this on Windows...
# with capturer.CaptureOutput(merged=False, relay=False) as captured:
# with self.assertRaises(SystemExit):
# app.run()
# self.assertIn(csimpy.__version__, captured.stdout.get_text())
# self.assertEqual(captured.stderr.get_text(), '')
app.run()
self.assertFalse(expr=True, msg="Testing")
def test_sim_short_arg_names(self):
with __main__.App(argv=['-i', self.EXAMPLE_SEDML_FILENAME, '-o', self.dirname]) as app:
app.run()
self.assert_outputs_created(self.dirname)
def test_sim_long_arg_names(self):
with __main__.App(argv=['--sedml', self.EXAMPLE_SEDML_FILENAME, '--outout-directory', self.dirname]) as app:
app.run()
self.assert_outputs_created(self.dirname)
def assert_outputs_created(self, dirname):
self.assertEqual(set(os.listdir(dirname)), set(['ex1', 'ex2']))
self.assertEqual(set(os.listdir(os.path.join(dirname, 'ex1'))), set(['BIOMD0000000297']))
self.assertEqual(set(os.listdir(os.path.join(dirname, 'ex2'))), set(['BIOMD0000000297']))
self.assertEqual(set(os.listdir(os.path.join(dirname, 'ex1', 'BIOMD0000000297'))), set(['plot_1_task1.pdf', 'plot_3_task1.pdf']))
self.assertEqual(set(os.listdir(os.path.join(dirname, 'ex2', 'BIOMD0000000297'))), set(['plot_1_task1.pdf', 'plot_3_task1.pdf']))
files = [
os.path.join(dirname, 'ex1', 'BIOMD0000000297', 'plot_1_task1.pdf'),
os.path.join(dirname, 'ex1', 'BIOMD0000000297', 'plot_3_task1.pdf'),
os.path.join(dirname, 'ex2', 'BIOMD0000000297', 'plot_1_task1.pdf'),
os.path.join(dirname, 'ex2', 'BIOMD0000000297', 'plot_3_task1.pdf'),
]
| 42.343284 | 137 | 0.621431 | from csimpy import __main__
import csimpy
import os
import shutil
import tempfile
import unittest
class CliTestCase(unittest.TestCase):
EXAMPLE_SEDML_FILENAME = 'tests/fixtures/sine_imports.xml'
def setUp(self):
self.dirname = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.dirname)
def test_help(self):
with self.assertRaises(SystemExit):
with __main__.App(argv=['--help']) as app:
app.run()
def test_version(self):
with __main__.App(argv=['-v']) as app:
app.run()
with __main__.App(argv=['--version']) as app:
app.run()
self.assertFalse(expr=True, msg="Testing")
def test_sim_short_arg_names(self):
with __main__.App(argv=['-i', self.EXAMPLE_SEDML_FILENAME, '-o', self.dirname]) as app:
app.run()
self.assert_outputs_created(self.dirname)
def test_sim_long_arg_names(self):
with __main__.App(argv=['--sedml', self.EXAMPLE_SEDML_FILENAME, '--outout-directory', self.dirname]) as app:
app.run()
self.assert_outputs_created(self.dirname)
def assert_outputs_created(self, dirname):
self.assertEqual(set(os.listdir(dirname)), set(['ex1', 'ex2']))
self.assertEqual(set(os.listdir(os.path.join(dirname, 'ex1'))), set(['BIOMD0000000297']))
self.assertEqual(set(os.listdir(os.path.join(dirname, 'ex2'))), set(['BIOMD0000000297']))
self.assertEqual(set(os.listdir(os.path.join(dirname, 'ex1', 'BIOMD0000000297'))), set(['plot_1_task1.pdf', 'plot_3_task1.pdf']))
self.assertEqual(set(os.listdir(os.path.join(dirname, 'ex2', 'BIOMD0000000297'))), set(['plot_1_task1.pdf', 'plot_3_task1.pdf']))
files = [
os.path.join(dirname, 'ex1', 'BIOMD0000000297', 'plot_1_task1.pdf'),
os.path.join(dirname, 'ex1', 'BIOMD0000000297', 'plot_3_task1.pdf'),
os.path.join(dirname, 'ex2', 'BIOMD0000000297', 'plot_1_task1.pdf'),
os.path.join(dirname, 'ex2', 'BIOMD0000000297', 'plot_3_task1.pdf'),
]
| true | true |
f73247e50e5c0c5c07af1f4e8737b690accc78c9 | 8,242 | py | Python | contrib/devtools/security-check.py | PaulieD/dash | 5be0935889a4faadf12793bb81f4da05eee9818f | [
"MIT"
] | null | null | null | contrib/devtools/security-check.py | PaulieD/dash | 5be0935889a4faadf12793bb81f4da05eee9818f | [
"MIT"
] | 14 | 2021-07-16T00:54:07.000Z | 2022-01-04T20:56:20.000Z | contrib/devtools/security-check.py | PaulieD/dash | 5be0935889a4faadf12793bb81f4da05eee9818f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Perform basic ELF security checks on a series of executables.
Exit status will be 0 if successful, and the program will be silent.
Otherwise the exit status will be 1 and it will log which executables failed which checks.
Needs `readelf` (for ELF) and `objdump` (for PE).
'''
import subprocess
import sys
import os
READELF_CMD = os.getenv('READELF', '/usr/bin/readelf')
OBJDUMP_CMD = os.getenv('OBJDUMP', '/usr/bin/objdump')
NONFATAL = {} # checks which are non-fatal for now but only generate a warning
def check_ELF_PIE(executable):
'''
Check for position independent executable (PIE), allowing for address space randomization.
'''
p = subprocess.Popen([READELF_CMD, '-h', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
ok = False
for line in stdout.splitlines():
line = line.split()
if len(line)>=2 and line[0] == 'Type:' and line[1] == 'DYN':
ok = True
return ok
def get_ELF_program_headers(executable):
'''Return type and flags for ELF program headers'''
p = subprocess.Popen([READELF_CMD, '-l', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
in_headers = False
count = 0
headers = []
for line in stdout.splitlines():
if line.startswith('Program Headers:'):
in_headers = True
if line == '':
in_headers = False
if in_headers:
if count == 1: # header line
ofs_typ = line.find('Type')
ofs_offset = line.find('Offset')
ofs_flags = line.find('Flg')
ofs_align = line.find('Align')
if ofs_typ == -1 or ofs_offset == -1 or ofs_flags == -1 or ofs_align == -1:
raise ValueError('Cannot parse elfread -lW output')
elif count > 1:
typ = line[ofs_typ:ofs_offset].rstrip()
flags = line[ofs_flags:ofs_align].rstrip()
headers.append((typ, flags))
count += 1
return headers
def check_ELF_NX(executable):
'''
Check that no sections are writable and executable (including the stack)
'''
have_wx = False
have_gnu_stack = False
for (typ, flags) in get_ELF_program_headers(executable):
if typ == 'GNU_STACK':
have_gnu_stack = True
if 'W' in flags and 'E' in flags: # section is both writable and executable
have_wx = True
return have_gnu_stack and not have_wx
def check_ELF_RELRO(executable):
'''
Check for read-only relocations.
GNU_RELRO program header must exist
Dynamic section must have BIND_NOW flag
'''
have_gnu_relro = False
for (typ, flags) in get_ELF_program_headers(executable):
# Note: not checking flags == 'R': here as linkers set the permission differently
# This does not affect security: the permission flags of the GNU_RELRO program header are ignored, the PT_LOAD header determines the effective permissions.
# However, the dynamic linker need to write to this area so these are RW.
# Glibc itself takes care of mprotecting this area R after relocations are finished.
# See also https://marc.info/?l=binutils&m=1498883354122353
if typ == 'GNU_RELRO':
have_gnu_relro = True
have_bindnow = False
p = subprocess.Popen([READELF_CMD, '-d', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
for line in stdout.splitlines():
tokens = line.split()
if len(tokens)>1 and tokens[1] == '(BIND_NOW)' or (len(tokens)>2 and tokens[1] == '(FLAGS)' and 'BIND_NOW' in tokens[2:]):
have_bindnow = True
return have_gnu_relro and have_bindnow
def check_ELF_Canary(executable):
'''
Check for use of stack canary
'''
p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
ok = False
for line in stdout.splitlines():
if '__stack_chk_fail' in line:
ok = True
return ok
def get_PE_dll_characteristics(executable):
'''
Get PE DllCharacteristics bits.
Returns a tuple (arch,bits) where arch is 'i386:x86-64' or 'i386'
and bits is the DllCharacteristics value.
'''
p = subprocess.Popen([OBJDUMP_CMD, '-x', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
arch = ''
bits = 0
for line in stdout.splitlines():
tokens = line.split()
if len(tokens)>=2 and tokens[0] == 'architecture:':
arch = tokens[1].rstrip(',')
if len(tokens)>=2 and tokens[0] == 'DllCharacteristics':
bits = int(tokens[1],16)
return (arch,bits)
IMAGE_DLL_CHARACTERISTICS_HIGH_ENTROPY_VA = 0x0020
IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE = 0x0040
IMAGE_DLL_CHARACTERISTICS_NX_COMPAT = 0x0100
def check_PE_DYNAMIC_BASE(executable):
'''PIE: DllCharacteristics bit 0x40 signifies dynamicbase (ASLR)'''
(arch,bits) = get_PE_dll_characteristics(executable)
reqbits = IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE
return (bits & reqbits) == reqbits
# On 64 bit, must support high-entropy 64-bit address space layout randomization in addition to DYNAMIC_BASE
# to have secure ASLR.
def check_PE_HIGH_ENTROPY_VA(executable):
'''PIE: DllCharacteristics bit 0x20 signifies high-entropy ASLR'''
(arch,bits) = get_PE_dll_characteristics(executable)
if arch == 'i386:x86-64':
reqbits = IMAGE_DLL_CHARACTERISTICS_HIGH_ENTROPY_VA
else: # Unnecessary on 32-bit
assert(arch == 'i386')
reqbits = 0
return (bits & reqbits) == reqbits
def check_PE_NX(executable):
'''NX: DllCharacteristics bit 0x100 signifies nxcompat (DEP)'''
(arch,bits) = get_PE_dll_characteristics(executable)
return (bits & IMAGE_DLL_CHARACTERISTICS_NX_COMPAT) == IMAGE_DLL_CHARACTERISTICS_NX_COMPAT
CHECKS = {
'ELF': [
('PIE', check_ELF_PIE),
('NX', check_ELF_NX),
('RELRO', check_ELF_RELRO),
('Canary', check_ELF_Canary)
],
'PE': [
('DYNAMIC_BASE', check_PE_DYNAMIC_BASE),
('HIGH_ENTROPY_VA', check_PE_HIGH_ENTROPY_VA),
('NX', check_PE_NX)
]
}
def identify_executable(executable):
with open(filename, 'rb') as f:
magic = f.read(4)
if magic.startswith(b'MZ'):
return 'PE'
elif magic.startswith(b'\x7fELF'):
return 'ELF'
return None
if __name__ == '__main__':
retval = 0
for filename in sys.argv[1:]:
try:
etype = identify_executable(filename)
if etype is None:
print('%s: unknown format' % filename)
retval = 1
continue
failed = []
warning = []
for (name, func) in CHECKS[etype]:
if not func(filename):
if name in NONFATAL:
warning.append(name)
else:
failed.append(name)
if failed:
print('%s: failed %s' % (filename, ' '.join(failed)))
retval = 1
if warning:
print('%s: warning %s' % (filename, ' '.join(warning)))
except IOError:
print('%s: cannot open' % filename)
retval = 1
sys.exit(retval)
| 38.157407 | 167 | 0.639044 |
import subprocess
import sys
import os
READELF_CMD = os.getenv('READELF', '/usr/bin/readelf')
OBJDUMP_CMD = os.getenv('OBJDUMP', '/usr/bin/objdump')
NONFATAL = {}
def check_ELF_PIE(executable):
p = subprocess.Popen([READELF_CMD, '-h', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
ok = False
for line in stdout.splitlines():
line = line.split()
if len(line)>=2 and line[0] == 'Type:' and line[1] == 'DYN':
ok = True
return ok
def get_ELF_program_headers(executable):
p = subprocess.Popen([READELF_CMD, '-l', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
in_headers = False
count = 0
headers = []
for line in stdout.splitlines():
if line.startswith('Program Headers:'):
in_headers = True
if line == '':
in_headers = False
if in_headers:
if count == 1:
ofs_typ = line.find('Type')
ofs_offset = line.find('Offset')
ofs_flags = line.find('Flg')
ofs_align = line.find('Align')
if ofs_typ == -1 or ofs_offset == -1 or ofs_flags == -1 or ofs_align == -1:
raise ValueError('Cannot parse elfread -lW output')
elif count > 1:
typ = line[ofs_typ:ofs_offset].rstrip()
flags = line[ofs_flags:ofs_align].rstrip()
headers.append((typ, flags))
count += 1
return headers
def check_ELF_NX(executable):
have_wx = False
have_gnu_stack = False
for (typ, flags) in get_ELF_program_headers(executable):
if typ == 'GNU_STACK':
have_gnu_stack = True
if 'W' in flags and 'E' in flags:
have_wx = True
return have_gnu_stack and not have_wx
def check_ELF_RELRO(executable):
have_gnu_relro = False
for (typ, flags) in get_ELF_program_headers(executable):
if typ == 'GNU_RELRO':
have_gnu_relro = True
have_bindnow = False
p = subprocess.Popen([READELF_CMD, '-d', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
for line in stdout.splitlines():
tokens = line.split()
if len(tokens)>1 and tokens[1] == '(BIND_NOW)' or (len(tokens)>2 and tokens[1] == '(FLAGS)' and 'BIND_NOW' in tokens[2:]):
have_bindnow = True
return have_gnu_relro and have_bindnow
def check_ELF_Canary(executable):
p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
ok = False
for line in stdout.splitlines():
if '__stack_chk_fail' in line:
ok = True
return ok
def get_PE_dll_characteristics(executable):
p = subprocess.Popen([OBJDUMP_CMD, '-x', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
arch = ''
bits = 0
for line in stdout.splitlines():
tokens = line.split()
if len(tokens)>=2 and tokens[0] == 'architecture:':
arch = tokens[1].rstrip(',')
if len(tokens)>=2 and tokens[0] == 'DllCharacteristics':
bits = int(tokens[1],16)
return (arch,bits)
IMAGE_DLL_CHARACTERISTICS_HIGH_ENTROPY_VA = 0x0020
IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE = 0x0040
IMAGE_DLL_CHARACTERISTICS_NX_COMPAT = 0x0100
def check_PE_DYNAMIC_BASE(executable):
(arch,bits) = get_PE_dll_characteristics(executable)
reqbits = IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE
return (bits & reqbits) == reqbits
def check_PE_HIGH_ENTROPY_VA(executable):
(arch,bits) = get_PE_dll_characteristics(executable)
if arch == 'i386:x86-64':
reqbits = IMAGE_DLL_CHARACTERISTICS_HIGH_ENTROPY_VA
else:
assert(arch == 'i386')
reqbits = 0
return (bits & reqbits) == reqbits
def check_PE_NX(executable):
(arch,bits) = get_PE_dll_characteristics(executable)
return (bits & IMAGE_DLL_CHARACTERISTICS_NX_COMPAT) == IMAGE_DLL_CHARACTERISTICS_NX_COMPAT
CHECKS = {
'ELF': [
('PIE', check_ELF_PIE),
('NX', check_ELF_NX),
('RELRO', check_ELF_RELRO),
('Canary', check_ELF_Canary)
],
'PE': [
('DYNAMIC_BASE', check_PE_DYNAMIC_BASE),
('HIGH_ENTROPY_VA', check_PE_HIGH_ENTROPY_VA),
('NX', check_PE_NX)
]
}
def identify_executable(executable):
with open(filename, 'rb') as f:
magic = f.read(4)
if magic.startswith(b'MZ'):
return 'PE'
elif magic.startswith(b'\x7fELF'):
return 'ELF'
return None
if __name__ == '__main__':
retval = 0
for filename in sys.argv[1:]:
try:
etype = identify_executable(filename)
if etype is None:
print('%s: unknown format' % filename)
retval = 1
continue
failed = []
warning = []
for (name, func) in CHECKS[etype]:
if not func(filename):
if name in NONFATAL:
warning.append(name)
else:
failed.append(name)
if failed:
print('%s: failed %s' % (filename, ' '.join(failed)))
retval = 1
if warning:
print('%s: warning %s' % (filename, ' '.join(warning)))
except IOError:
print('%s: cannot open' % filename)
retval = 1
sys.exit(retval)
| true | true |
f7324802c626b3c05b5f37a799a82b7a683fbb30 | 919 | py | Python | soupy/utils/vector2function.py | cpempire/soupy | 9f65e3329fa126619c893daa4cd80478d83f840c | [
"MIT"
] | 1 | 2021-12-07T15:22:23.000Z | 2021-12-07T15:22:23.000Z | soupy/utils/vector2function.py | cpempire/soupy | 9f65e3329fa126619c893daa4cd80478d83f840c | [
"MIT"
] | null | null | null | soupy/utils/vector2function.py | cpempire/soupy | 9f65e3329fa126619c893daa4cd80478d83f840c | [
"MIT"
] | null | null | null | # Copyright (c) 2016, The University of Texas at Austin & University of
# California, Merced.
#
# All Rights reserved.
# See file COPYRIGHT for details.
#
# This file is part of the hIPPYlib library. For more information and source code
# availability see https://hippylib.github.io.
#
# hIPPYlib is free software; you can redistribute it and/or modify it under the
# terms of the GNU General Public License (as published by the Free
# Software Foundation) version 2.0 dated June 1991.
from __future__ import absolute_import, division, print_function
from dolfin import Function
def vector2Function(x,Vh, **kwargs):
"""
Wrap a finite element vector x into a finite element function in the space Vh.
kwargs is optional keywords arguments to be passed to the construction of a dolfin Function
"""
fun = Function(Vh,**kwargs)
fun.vector().zero()
fun.vector().axpy(1., x)
return fun
| 32.821429 | 95 | 0.733406 |
from __future__ import absolute_import, division, print_function
from dolfin import Function
def vector2Function(x,Vh, **kwargs):
fun = Function(Vh,**kwargs)
fun.vector().zero()
fun.vector().axpy(1., x)
return fun
| true | true |
f732485708d463cf7bbae78662a8c0acf679efce | 1,010 | py | Python | testing/CinemaRenderTest.py | JonasLukasczyk/workbench | 4a59faeb181185bc57d8bce605bdd1f0bfe4fe42 | [
"BSD-3-Clause"
] | null | null | null | testing/CinemaRenderTest.py | JonasLukasczyk/workbench | 4a59faeb181185bc57d8bce605bdd1f0bfe4fe42 | [
"BSD-3-Clause"
] | null | null | null | testing/CinemaRenderTest.py | JonasLukasczyk/workbench | 4a59faeb181185bc57d8bce605bdd1f0bfe4fe42 | [
"BSD-3-Clause"
] | null | null | null | import pytest
import pytest_xvfb
import os
import cinemasci
@pytest.fixture(autouse=True, scope='session')
def ensure_xvfb():
if not pytest_xvfb.xvfb_available():
raise Exception("Tests need Xvfb to run.")
def test_render():
# create a test database
os.system("./bin/create-database --database scratch/cinema.cdb")
# open a cinema database
cdb = cinemasci.DatabaseReader();
cdb.inputs["Path"].setValue( 'scratch/cinema.cdb' );
# Select Some Data Products\n",
query = cinemasci.DatabaseQuery();
query.inputs["Table"].setValue(cdb.outputs['Table']);
query.inputs["Query"].setValue('SELECT * FROM input LIMIT 5 OFFSET 0');
# Read Data Products
imageReader = cinemasci.ImageReader();
imageReader.inputs["Table"].setValue(query.outputs['Table'])
# Read Data Products
imageRenderer = cinemasci.ImageRenderer();
imageRenderer.inputs["Image"].setValue( imageReader.outputs["Images"] );
# print images
images = imageRenderer.outputs["Image"].getValue();
print(images)
| 28.055556 | 74 | 0.723762 | import pytest
import pytest_xvfb
import os
import cinemasci
@pytest.fixture(autouse=True, scope='session')
def ensure_xvfb():
if not pytest_xvfb.xvfb_available():
raise Exception("Tests need Xvfb to run.")
def test_render():
os.system("./bin/create-database --database scratch/cinema.cdb")
cdb = cinemasci.DatabaseReader();
cdb.inputs["Path"].setValue( 'scratch/cinema.cdb' );
query = cinemasci.DatabaseQuery();
query.inputs["Table"].setValue(cdb.outputs['Table']);
query.inputs["Query"].setValue('SELECT * FROM input LIMIT 5 OFFSET 0');
# Read Data Products
imageReader = cinemasci.ImageReader();
imageReader.inputs["Table"].setValue(query.outputs['Table'])
# Read Data Products
imageRenderer = cinemasci.ImageRenderer();
imageRenderer.inputs["Image"].setValue( imageReader.outputs["Images"] );
# print images
images = imageRenderer.outputs["Image"].getValue();
print(images)
| true | true |
f73248e41a1991164ec0c0c4fe0a9e0935251679 | 2,181 | py | Python | tests/test_local_storage.py | parikshitsaikia1619/pqai-db | bf550448fee3c2ca766cbdaf6ef8a1ccb613004e | [
"MIT"
] | 3 | 2022-01-18T04:44:13.000Z | 2022-03-29T19:10:48.000Z | tests/test_local_storage.py | parikshitsaikia1619/pqai-db | bf550448fee3c2ca766cbdaf6ef8a1ccb613004e | [
"MIT"
] | 4 | 2022-01-14T19:32:24.000Z | 2022-03-28T15:15:32.000Z | tests/test_local_storage.py | parikshitsaikia1619/pqai-db | bf550448fee3c2ca766cbdaf6ef8a1ccb613004e | [
"MIT"
] | 2 | 2022-01-31T18:54:37.000Z | 2022-03-25T14:42:43.000Z | """
Unit test for custom wrapper around local storage
"""
import unittest
import sys
import json
from pathlib import Path
BASE_DIR = Path(__file__).parent.parent
sys.path.append(str(BASE_DIR.resolve()))
#pylint: disable=wrong-import-position
from core.local_storage_wrapper import LocalStorage
import testutil
class TestLocalStorage(unittest.TestCase):
"""Testing getting, putting, deleting and listing operations of a test file
"""
def setUp(self):
"""Initial setup
"""
root = testutil.set_up_test_local_directory()
self.storage = LocalStorage(root)
def test_get_file(self):
"""Can read contents of a file given its key?
"""
key = 'patents/US7654321B2.json'
contents = self.storage.get(key)
self.assertIsInstance(contents, bytes)
self.assertGreater(len(contents), 0)
data = json.loads(contents)
self.assertEqual(data['publicationNumber'], 'US7654321B2')
def test_error_when_reading_non_existing_file(self):
"""Raises exception when reading a non existing file?
"""
invalid_key = 'patents/arbitrary.json'
attempt = lambda: self.storage.get(invalid_key)
self.assertRaises(FileNotFoundError, attempt)
def test_put_and_delete_file(self):
"""Can create new files, read them back, and delete them?
"""
key = 'patents/US7654321B2.json'
contents = self.storage.get(key)
new_key = 'patents/new.json'
self.storage.put(new_key, contents)
retrieved = self.storage.get(new_key)
self.assertEqual(retrieved, contents)
self.storage.delete(new_key)
attempt = lambda: self.storage.get(new_key)
self.assertRaises(FileNotFoundError, attempt)
def test_list_files(self):
"""Can list files?
"""
prefix = 'patents/US'
matches = self.storage.list(prefix)
self.assertIs(type(matches), list)
self.assertGreater(len(matches), 0)
key = 'patents/notexist'
output = self.storage.list(key)
self.assertEqual(len(output), 0)
if __name__ == "__main__":
unittest.main()
| 27.961538 | 79 | 0.657038 | import unittest
import sys
import json
from pathlib import Path
BASE_DIR = Path(__file__).parent.parent
sys.path.append(str(BASE_DIR.resolve()))
from core.local_storage_wrapper import LocalStorage
import testutil
class TestLocalStorage(unittest.TestCase):
def setUp(self):
root = testutil.set_up_test_local_directory()
self.storage = LocalStorage(root)
def test_get_file(self):
key = 'patents/US7654321B2.json'
contents = self.storage.get(key)
self.assertIsInstance(contents, bytes)
self.assertGreater(len(contents), 0)
data = json.loads(contents)
self.assertEqual(data['publicationNumber'], 'US7654321B2')
def test_error_when_reading_non_existing_file(self):
invalid_key = 'patents/arbitrary.json'
attempt = lambda: self.storage.get(invalid_key)
self.assertRaises(FileNotFoundError, attempt)
def test_put_and_delete_file(self):
key = 'patents/US7654321B2.json'
contents = self.storage.get(key)
new_key = 'patents/new.json'
self.storage.put(new_key, contents)
retrieved = self.storage.get(new_key)
self.assertEqual(retrieved, contents)
self.storage.delete(new_key)
attempt = lambda: self.storage.get(new_key)
self.assertRaises(FileNotFoundError, attempt)
def test_list_files(self):
prefix = 'patents/US'
matches = self.storage.list(prefix)
self.assertIs(type(matches), list)
self.assertGreater(len(matches), 0)
key = 'patents/notexist'
output = self.storage.list(key)
self.assertEqual(len(output), 0)
if __name__ == "__main__":
unittest.main()
| true | true |
f7324aefd9332ce661a8bd4ef6f4f2249ee510e7 | 13,225 | py | Python | trains/backend_config/config.py | MatthewYee92/trains | f5f13658c335250165d9c57c0ba30abffdda4171 | [
"Apache-2.0"
] | 1 | 2021-05-06T13:33:36.000Z | 2021-05-06T13:33:36.000Z | trains/backend_config/config.py | iridiumblue/trains | 101e5393d1ba73462a6a85df55a2dfb4b629cb0d | [
"Apache-2.0"
] | 4 | 2020-09-26T00:55:57.000Z | 2022-02-10T01:18:20.000Z | trains/backend_config/config.py | iridiumblue/trains | 101e5393d1ba73462a6a85df55a2dfb4b629cb0d | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
import functools
import json
import os
import sys
import warnings
from fnmatch import fnmatch
from os.path import expanduser
from typing import Any
import six
from pathlib2 import Path
from ..utilities.pyhocon import ConfigTree, ConfigFactory
from pyparsing import (
ParseFatalException,
ParseException,
RecursiveGrammarException,
ParseSyntaxException,
)
from six.moves.urllib.parse import urlparse
from .bucket_config import S3BucketConfig
from .defs import (
Environment,
DEFAULT_CONFIG_FOLDER,
LOCAL_CONFIG_PATHS,
ENV_CONFIG_PATHS,
LOCAL_CONFIG_FILES,
LOCAL_CONFIG_FILE_OVERRIDE_VAR,
ENV_CONFIG_PATH_OVERRIDE_VAR,
)
from .defs import is_config_file
from .entry import Entry, NotSet
from .errors import ConfigurationError
from .log import initialize as initialize_log, logger
from .utils import get_options
try:
from typing import Text
except ImportError:
# windows conda-less hack
Text = Any
log = logger(__file__)
class ConfigEntry(Entry):
logger = None
def __init__(self, config, *keys, **kwargs):
# type: (Config, Text, Any) -> None
super(ConfigEntry, self).__init__(*keys, **kwargs)
self.config = config
def _get(self, key):
# type: (Text) -> Any
return self.config.get(key, NotSet)
def error(self, message):
# type: (Text) -> None
log.error(message.capitalize())
class Config(object):
"""
Represents a server configuration.
If watch=True, will watch configuration folders for changes and reload itself.
NOTE: will not watch folders that were created after initialization.
"""
# used in place of None in Config.get as default value because None is a valid value
_MISSING = object()
def __init__(
self,
config_folder=None,
env=None,
verbose=True,
relative_to=None,
app=None,
is_server=False,
**_
):
self._app = app
self._verbose = verbose
self._folder_name = config_folder or DEFAULT_CONFIG_FOLDER
self._roots = []
self._config = ConfigTree()
self._env = env or os.environ.get("TRAINS_ENV", Environment.default)
self.config_paths = set()
self.is_server = is_server
if self._verbose:
print("Config env:%s" % str(self._env))
if not self._env:
raise ValueError(
"Missing environment in either init of environment variable"
)
if self._env not in get_options(Environment):
raise ValueError("Invalid environment %s" % env)
if relative_to is not None:
self.load_relative_to(relative_to)
@property
def root(self):
return self.roots[0] if self.roots else None
@property
def roots(self):
return self._roots
@roots.setter
def roots(self, value):
self._roots = value
@property
def env(self):
return self._env
def logger(self, path=None):
return logger(path)
def load_relative_to(self, *module_paths):
def normalize(p):
return Path(os.path.abspath(str(p))).with_name(self._folder_name)
self.roots = list(map(normalize, module_paths))
self.reload()
def _reload(self):
env = self._env
config = self._config.copy()
if self.is_server:
env_config_paths = ENV_CONFIG_PATHS
else:
env_config_paths = []
env_config_path_override = os.environ.get(ENV_CONFIG_PATH_OVERRIDE_VAR)
if env_config_path_override:
env_config_paths = [expanduser(env_config_path_override)]
# merge configuration from root and other environment config paths
if self.roots or env_config_paths:
config = functools.reduce(
lambda cfg, path: ConfigTree.merge_configs(
cfg,
self._read_recursive_for_env(path, env, verbose=self._verbose),
copy_trees=True,
),
self.roots + env_config_paths,
config,
)
# merge configuration from local configuration paths
if LOCAL_CONFIG_PATHS:
config = functools.reduce(
lambda cfg, path: ConfigTree.merge_configs(
cfg, self._read_recursive(path, verbose=self._verbose), copy_trees=True
),
LOCAL_CONFIG_PATHS,
config,
)
local_config_files = LOCAL_CONFIG_FILES
local_config_override = os.environ.get(LOCAL_CONFIG_FILE_OVERRIDE_VAR)
if local_config_override:
local_config_files = [expanduser(local_config_override)]
# merge configuration from local configuration files
if local_config_files:
config = functools.reduce(
lambda cfg, file_path: ConfigTree.merge_configs(
cfg,
self._read_single_file(file_path, verbose=self._verbose),
copy_trees=True,
),
local_config_files,
config,
)
config["env"] = env
return config
def replace(self, config):
self._config = config
def reload(self):
self.replace(self._reload())
def initialize_logging(self):
logging_config = self._config.get("logging", None)
if not logging_config:
return False
# handle incomplete file handlers
deleted = []
handlers = logging_config.get("handlers", {})
for name, handler in list(handlers.items()):
cls = handler.get("class", None)
is_file = cls and "FileHandler" in cls
if cls is None or (is_file and "filename" not in handler):
deleted.append(name)
del handlers[name]
elif is_file:
file = Path(handler.get("filename"))
if not file.is_file():
file.parent.mkdir(parents=True, exist_ok=True)
file.touch()
# remove dependency in deleted handlers
root_logger = logging_config.get("root", None)
loggers = list(logging_config.get("loggers", {}).values()) + (
[root_logger] if root_logger else []
)
for logger in loggers:
handlers = logger.get("handlers", None)
if not handlers:
continue
logger["handlers"] = [h for h in handlers if h not in deleted]
extra = None
if self._app:
extra = {"app": self._app}
initialize_log(logging_config, extra=extra)
return True
def __getitem__(self, key):
return self._config[key]
def get(self, key, default=_MISSING):
value = self._config.get(key, default)
if value is self._MISSING and not default:
raise KeyError(
"Unable to find value for key '{}' and default value was not provided.".format(
key
)
)
return value
def to_dict(self):
return self._config.as_plain_ordered_dict()
def as_json(self):
return json.dumps(self.to_dict(), indent=2)
def _read_recursive_for_env(self, root_path_str, env, verbose=True):
root_path = Path(root_path_str)
if root_path.exists():
default_config = self._read_recursive(
root_path / Environment.default, verbose=verbose
)
if (root_path / env) != (root_path / Environment.default):
env_config = self._read_recursive(
root_path / env, verbose=verbose
) # None is ok, will return empty config
config = ConfigTree.merge_configs(default_config, env_config, True)
else:
config = default_config
else:
config = ConfigTree()
return config
def _read_recursive(self, conf_root, verbose=True):
conf = ConfigTree()
if not conf_root:
return conf
conf_root = Path(conf_root)
if not conf_root.exists():
if verbose:
print("No config in %s" % str(conf_root))
return conf
if verbose:
print("Loading config from %s" % str(conf_root))
for root, dirs, files in os.walk(str(conf_root)):
rel_dir = str(Path(root).relative_to(conf_root))
if rel_dir == ".":
rel_dir = ""
prefix = rel_dir.replace("/", ".")
for filename in files:
if not is_config_file(filename):
continue
if prefix != "":
key = prefix + "." + Path(filename).stem
else:
key = Path(filename).stem
file_path = str(Path(root) / filename)
conf.put(key, self._read_single_file(file_path, verbose=verbose))
return conf
@staticmethod
def _read_single_file(file_path, verbose=True):
if not file_path or not Path(file_path).is_file():
return ConfigTree()
if verbose:
print("Loading config from file %s" % file_path)
try:
return ConfigFactory.parse_file(file_path)
except ParseSyntaxException as ex:
msg = "Failed parsing {0} ({1.__class__.__name__}): (at char {1.loc}, line:{1.lineno}, col:{1.column})".format(
file_path, ex
)
six.reraise(
ConfigurationError,
ConfigurationError(msg, file_path=file_path),
sys.exc_info()[2],
)
except (ParseException, ParseFatalException, RecursiveGrammarException) as ex:
msg = "Failed parsing {0} ({1.__class__.__name__}): {1}".format(
file_path, ex
)
six.reraise(ConfigurationError, ConfigurationError(msg), sys.exc_info()[2])
except Exception as ex:
print("Failed loading %s: %s" % (file_path, ex))
raise
def get_config_for_bucket(self, base_url, extra_configurations=None):
"""
Get the credentials for an AWS S3 bucket from the config
:param base_url: URL of bucket
:param extra_configurations:
:return: bucket config
:rtype: bucket config
"""
warnings.warn(
"Use backend_config.bucket_config.BucketList.get_config_for_uri",
DeprecationWarning,
)
configs = S3BucketConfig.from_list(self.get("sdk.aws.s3.credentials", []))
if extra_configurations:
configs.extend(extra_configurations)
def find_match(host=None, bucket=None):
if not host and not bucket:
raise ValueError("host or bucket required")
try:
if host:
res = {
config
for config in configs
if (config.host and fnmatch(host, config.host))
and (
not bucket
or not config.bucket
or fnmatch(bucket.lower(), config.bucket.lower())
)
}
else:
res = {
config
for config in configs
if config.bucket
and fnmatch(bucket.lower(), config.bucket.lower())
}
return next(iter(res))
except StopIteration:
pass
parsed = urlparse(base_url)
parts = Path(parsed.path.strip("/")).parts
if parsed.netloc:
# We have a netloc (either an actual hostname or an AWS bucket name).
# First, we'll try with the netloc as host, but if we don't find anything, we'll try without a host and
# with the netloc as the bucket name
match = None
if parts:
# try host/bucket only if path parts contain any element
match = find_match(host=parsed.netloc, bucket=parts[0])
if not match:
# no path parts or no config found for host/bucket, try netloc as bucket
match = find_match(bucket=parsed.netloc)
else:
# No netloc, so we'll simply search by bucket
match = find_match(bucket=parts[0])
if match:
return match
non_aws_s3_host_suffix = ":9000"
if parsed.netloc.endswith(non_aws_s3_host_suffix):
host = parsed.netloc
bucket = parts[0] if parts else None
else:
host = None
bucket = parsed.netloc
return S3BucketConfig(
key=self.get("sdk.aws.s3.key", None),
secret=self.get("sdk.aws.s3.secret", None),
region=self.get("sdk.aws.s3.region", None),
multipart=True,
bucket=bucket,
host=host,
)
| 32.256098 | 123 | 0.569603 | from __future__ import print_function
import functools
import json
import os
import sys
import warnings
from fnmatch import fnmatch
from os.path import expanduser
from typing import Any
import six
from pathlib2 import Path
from ..utilities.pyhocon import ConfigTree, ConfigFactory
from pyparsing import (
ParseFatalException,
ParseException,
RecursiveGrammarException,
ParseSyntaxException,
)
from six.moves.urllib.parse import urlparse
from .bucket_config import S3BucketConfig
from .defs import (
Environment,
DEFAULT_CONFIG_FOLDER,
LOCAL_CONFIG_PATHS,
ENV_CONFIG_PATHS,
LOCAL_CONFIG_FILES,
LOCAL_CONFIG_FILE_OVERRIDE_VAR,
ENV_CONFIG_PATH_OVERRIDE_VAR,
)
from .defs import is_config_file
from .entry import Entry, NotSet
from .errors import ConfigurationError
from .log import initialize as initialize_log, logger
from .utils import get_options
try:
from typing import Text
except ImportError:
Text = Any
log = logger(__file__)
class ConfigEntry(Entry):
logger = None
def __init__(self, config, *keys, **kwargs):
super(ConfigEntry, self).__init__(*keys, **kwargs)
self.config = config
def _get(self, key):
return self.config.get(key, NotSet)
def error(self, message):
log.error(message.capitalize())
class Config(object):
_MISSING = object()
def __init__(
self,
config_folder=None,
env=None,
verbose=True,
relative_to=None,
app=None,
is_server=False,
**_
):
self._app = app
self._verbose = verbose
self._folder_name = config_folder or DEFAULT_CONFIG_FOLDER
self._roots = []
self._config = ConfigTree()
self._env = env or os.environ.get("TRAINS_ENV", Environment.default)
self.config_paths = set()
self.is_server = is_server
if self._verbose:
print("Config env:%s" % str(self._env))
if not self._env:
raise ValueError(
"Missing environment in either init of environment variable"
)
if self._env not in get_options(Environment):
raise ValueError("Invalid environment %s" % env)
if relative_to is not None:
self.load_relative_to(relative_to)
@property
def root(self):
return self.roots[0] if self.roots else None
@property
def roots(self):
return self._roots
@roots.setter
def roots(self, value):
self._roots = value
@property
def env(self):
return self._env
def logger(self, path=None):
return logger(path)
def load_relative_to(self, *module_paths):
def normalize(p):
return Path(os.path.abspath(str(p))).with_name(self._folder_name)
self.roots = list(map(normalize, module_paths))
self.reload()
def _reload(self):
env = self._env
config = self._config.copy()
if self.is_server:
env_config_paths = ENV_CONFIG_PATHS
else:
env_config_paths = []
env_config_path_override = os.environ.get(ENV_CONFIG_PATH_OVERRIDE_VAR)
if env_config_path_override:
env_config_paths = [expanduser(env_config_path_override)]
if self.roots or env_config_paths:
config = functools.reduce(
lambda cfg, path: ConfigTree.merge_configs(
cfg,
self._read_recursive_for_env(path, env, verbose=self._verbose),
copy_trees=True,
),
self.roots + env_config_paths,
config,
)
if LOCAL_CONFIG_PATHS:
config = functools.reduce(
lambda cfg, path: ConfigTree.merge_configs(
cfg, self._read_recursive(path, verbose=self._verbose), copy_trees=True
),
LOCAL_CONFIG_PATHS,
config,
)
local_config_files = LOCAL_CONFIG_FILES
local_config_override = os.environ.get(LOCAL_CONFIG_FILE_OVERRIDE_VAR)
if local_config_override:
local_config_files = [expanduser(local_config_override)]
if local_config_files:
config = functools.reduce(
lambda cfg, file_path: ConfigTree.merge_configs(
cfg,
self._read_single_file(file_path, verbose=self._verbose),
copy_trees=True,
),
local_config_files,
config,
)
config["env"] = env
return config
def replace(self, config):
self._config = config
def reload(self):
self.replace(self._reload())
def initialize_logging(self):
logging_config = self._config.get("logging", None)
if not logging_config:
return False
deleted = []
handlers = logging_config.get("handlers", {})
for name, handler in list(handlers.items()):
cls = handler.get("class", None)
is_file = cls and "FileHandler" in cls
if cls is None or (is_file and "filename" not in handler):
deleted.append(name)
del handlers[name]
elif is_file:
file = Path(handler.get("filename"))
if not file.is_file():
file.parent.mkdir(parents=True, exist_ok=True)
file.touch()
root_logger = logging_config.get("root", None)
loggers = list(logging_config.get("loggers", {}).values()) + (
[root_logger] if root_logger else []
)
for logger in loggers:
handlers = logger.get("handlers", None)
if not handlers:
continue
logger["handlers"] = [h for h in handlers if h not in deleted]
extra = None
if self._app:
extra = {"app": self._app}
initialize_log(logging_config, extra=extra)
return True
def __getitem__(self, key):
return self._config[key]
def get(self, key, default=_MISSING):
value = self._config.get(key, default)
if value is self._MISSING and not default:
raise KeyError(
"Unable to find value for key '{}' and default value was not provided.".format(
key
)
)
return value
def to_dict(self):
return self._config.as_plain_ordered_dict()
def as_json(self):
return json.dumps(self.to_dict(), indent=2)
def _read_recursive_for_env(self, root_path_str, env, verbose=True):
root_path = Path(root_path_str)
if root_path.exists():
default_config = self._read_recursive(
root_path / Environment.default, verbose=verbose
)
if (root_path / env) != (root_path / Environment.default):
env_config = self._read_recursive(
root_path / env, verbose=verbose
)
config = ConfigTree.merge_configs(default_config, env_config, True)
else:
config = default_config
else:
config = ConfigTree()
return config
def _read_recursive(self, conf_root, verbose=True):
conf = ConfigTree()
if not conf_root:
return conf
conf_root = Path(conf_root)
if not conf_root.exists():
if verbose:
print("No config in %s" % str(conf_root))
return conf
if verbose:
print("Loading config from %s" % str(conf_root))
for root, dirs, files in os.walk(str(conf_root)):
rel_dir = str(Path(root).relative_to(conf_root))
if rel_dir == ".":
rel_dir = ""
prefix = rel_dir.replace("/", ".")
for filename in files:
if not is_config_file(filename):
continue
if prefix != "":
key = prefix + "." + Path(filename).stem
else:
key = Path(filename).stem
file_path = str(Path(root) / filename)
conf.put(key, self._read_single_file(file_path, verbose=verbose))
return conf
@staticmethod
def _read_single_file(file_path, verbose=True):
if not file_path or not Path(file_path).is_file():
return ConfigTree()
if verbose:
print("Loading config from file %s" % file_path)
try:
return ConfigFactory.parse_file(file_path)
except ParseSyntaxException as ex:
msg = "Failed parsing {0} ({1.__class__.__name__}): (at char {1.loc}, line:{1.lineno}, col:{1.column})".format(
file_path, ex
)
six.reraise(
ConfigurationError,
ConfigurationError(msg, file_path=file_path),
sys.exc_info()[2],
)
except (ParseException, ParseFatalException, RecursiveGrammarException) as ex:
msg = "Failed parsing {0} ({1.__class__.__name__}): {1}".format(
file_path, ex
)
six.reraise(ConfigurationError, ConfigurationError(msg), sys.exc_info()[2])
except Exception as ex:
print("Failed loading %s: %s" % (file_path, ex))
raise
def get_config_for_bucket(self, base_url, extra_configurations=None):
warnings.warn(
"Use backend_config.bucket_config.BucketList.get_config_for_uri",
DeprecationWarning,
)
configs = S3BucketConfig.from_list(self.get("sdk.aws.s3.credentials", []))
if extra_configurations:
configs.extend(extra_configurations)
def find_match(host=None, bucket=None):
if not host and not bucket:
raise ValueError("host or bucket required")
try:
if host:
res = {
config
for config in configs
if (config.host and fnmatch(host, config.host))
and (
not bucket
or not config.bucket
or fnmatch(bucket.lower(), config.bucket.lower())
)
}
else:
res = {
config
for config in configs
if config.bucket
and fnmatch(bucket.lower(), config.bucket.lower())
}
return next(iter(res))
except StopIteration:
pass
parsed = urlparse(base_url)
parts = Path(parsed.path.strip("/")).parts
if parsed.netloc:
# with the netloc as the bucket name
match = None
if parts:
# try host/bucket only if path parts contain any element
match = find_match(host=parsed.netloc, bucket=parts[0])
if not match:
# no path parts or no config found for host/bucket, try netloc as bucket
match = find_match(bucket=parsed.netloc)
else:
# No netloc, so we'll simply search by bucket
match = find_match(bucket=parts[0])
if match:
return match
non_aws_s3_host_suffix = ":9000"
if parsed.netloc.endswith(non_aws_s3_host_suffix):
host = parsed.netloc
bucket = parts[0] if parts else None
else:
host = None
bucket = parsed.netloc
return S3BucketConfig(
key=self.get("sdk.aws.s3.key", None),
secret=self.get("sdk.aws.s3.secret", None),
region=self.get("sdk.aws.s3.region", None),
multipart=True,
bucket=bucket,
host=host,
)
| true | true |
f7324e9d85c6b26f3d0e35e8b0a6ac93397d49bb | 1,086 | py | Python | alipay/aop/api/response/AlipayMsaasMediarecogAftscvpayTransactionInitializeResponse.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/response/AlipayMsaasMediarecogAftscvpayTransactionInitializeResponse.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/response/AlipayMsaasMediarecogAftscvpayTransactionInitializeResponse.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayMsaasMediarecogAftscvpayTransactionInitializeResponse(AlipayResponse):
def __init__(self):
super(AlipayMsaasMediarecogAftscvpayTransactionInitializeResponse, self).__init__()
self._result = None
self._transaction_id = None
@property
def result(self):
return self._result
@result.setter
def result(self, value):
self._result = value
@property
def transaction_id(self):
return self._transaction_id
@transaction_id.setter
def transaction_id(self, value):
self._transaction_id = value
def parse_response_content(self, response_content):
response = super(AlipayMsaasMediarecogAftscvpayTransactionInitializeResponse, self).parse_response_content(response_content)
if 'result' in response:
self.result = response['result']
if 'transaction_id' in response:
self.transaction_id = response['transaction_id']
| 30.166667 | 132 | 0.713628 |
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayMsaasMediarecogAftscvpayTransactionInitializeResponse(AlipayResponse):
def __init__(self):
super(AlipayMsaasMediarecogAftscvpayTransactionInitializeResponse, self).__init__()
self._result = None
self._transaction_id = None
@property
def result(self):
return self._result
@result.setter
def result(self, value):
self._result = value
@property
def transaction_id(self):
return self._transaction_id
@transaction_id.setter
def transaction_id(self, value):
self._transaction_id = value
def parse_response_content(self, response_content):
response = super(AlipayMsaasMediarecogAftscvpayTransactionInitializeResponse, self).parse_response_content(response_content)
if 'result' in response:
self.result = response['result']
if 'transaction_id' in response:
self.transaction_id = response['transaction_id']
| true | true |
f7324ff40cd974939b80147b034871e58952ecc3 | 1,329 | py | Python | dvd540crop.py | tai271828/session-video-publisher | 7c3ea170759e53b94345e655a1299ba9ab32158c | [
"0BSD"
] | null | null | null | dvd540crop.py | tai271828/session-video-publisher | 7c3ea170759e53b94345e655a1299ba9ab32158c | [
"0BSD"
] | 5 | 2020-11-15T12:45:03.000Z | 2021-12-07T08:29:40.000Z | dvd540crop.py | tai271828/session-video-publisher | 7c3ea170759e53b94345e655a1299ba9ab32158c | [
"0BSD"
] | 4 | 2018-06-23T16:48:03.000Z | 2021-04-18T09:51:29.000Z | """Crop letterbox from videos fixed by dvd540fix.
This assumes the input video is of dimension 720x540.
Example usage:
python dvd540crop.py 自製高擴充性機器學習系統 --height=480 --top=40
"""
import argparse
import os
import pathlib
import subprocess
i_dir = pathlib.Path(os.environ["VIDEO_ROOT"], "in")
o_dir = pathlib.Path(os.environ["VIDEO_ROOT"], "out")
o_dir.mkdir(parents=True, exist_ok=True)
input_mapping = {i_path.stem: i_path for i_path in i_dir.glob("*.avi")}
parser = argparse.ArgumentParser()
parser.add_argument(
"filename",
type=str,
choices=list(input_mapping.keys()),
help="Input filename, not including extension",
)
parser.add_argument(
"--top", type=int, required=True, help="Top letterbox to crop"
)
parser.add_argument(
"--height", type=int, required=True, help="Height of cropped video"
)
parser.add_argument(
"--threads",
"--thread",
type=str,
default="auto",
help="Threads to use (passed directly to FFmpeg)",
)
options = parser.parse_args()
i_path = input_mapping[options.filename]
o_path = o_dir.joinpath(f"{i_path.stem}.mp4")
subprocess.run(
f'ffmpeg -i "{i_path}" -threads {options.threads} '
f'-filter:v "crop=720:{options.height}:0:{options.top}" '
f'-codec:v libx264 -crf 0 -preset veryslow "{o_path}"',
shell=True,
check=True,
)
| 23.732143 | 71 | 0.696764 |
import argparse
import os
import pathlib
import subprocess
i_dir = pathlib.Path(os.environ["VIDEO_ROOT"], "in")
o_dir = pathlib.Path(os.environ["VIDEO_ROOT"], "out")
o_dir.mkdir(parents=True, exist_ok=True)
input_mapping = {i_path.stem: i_path for i_path in i_dir.glob("*.avi")}
parser = argparse.ArgumentParser()
parser.add_argument(
"filename",
type=str,
choices=list(input_mapping.keys()),
help="Input filename, not including extension",
)
parser.add_argument(
"--top", type=int, required=True, help="Top letterbox to crop"
)
parser.add_argument(
"--height", type=int, required=True, help="Height of cropped video"
)
parser.add_argument(
"--threads",
"--thread",
type=str,
default="auto",
help="Threads to use (passed directly to FFmpeg)",
)
options = parser.parse_args()
i_path = input_mapping[options.filename]
o_path = o_dir.joinpath(f"{i_path.stem}.mp4")
subprocess.run(
f'ffmpeg -i "{i_path}" -threads {options.threads} '
f'-filter:v "crop=720:{options.height}:0:{options.top}" '
f'-codec:v libx264 -crf 0 -preset veryslow "{o_path}"',
shell=True,
check=True,
)
| true | true |
f73250a01c56694962e8ffc0fb3a96d714bf4401 | 1,598 | py | Python | mytreeview.py | xiaoyaofe/bale-backstage | 6d1955e7a6038c9588f86530cd4519e0f719f285 | [
"MIT"
] | null | null | null | mytreeview.py | xiaoyaofe/bale-backstage | 6d1955e7a6038c9588f86530cd4519e0f719f285 | [
"MIT"
] | null | null | null | mytreeview.py | xiaoyaofe/bale-backstage | 6d1955e7a6038c9588f86530cd4519e0f719f285 | [
"MIT"
] | null | null | null | from tkinter import *
from tkinter import ttk
class MyTreeview(Frame):
def __init__(self, master):
super().__init__(master)
self.treeview = ttk.Treeview(self)
# attach a vertical scrollbar to the frame
verbar = ttk.Scrollbar(self, orient='vertical')
verbar.pack(side = 'right', fill = 'y')
verbar.configure(command=self.treeview.yview)
self.treeview.configure(yscrollcommand=verbar.set)
self.treeview.pack(fill = 'both',expand= True)
#button
bf =Frame(self)
bf.pack()
Button(self, text = ' ', bg = 'lightgreen', command = self._on_mark).pack(side = 'left')
Button(self, text = ' ', bg = 'white', command = self._on_unmark).pack(side = 'left')
Button(self, text = 'clear all marks', command = self._on_clear).pack(side = 'left')
ttk.Style().theme_use('clam')
def _on_mark(self):
for index in self.treeview.selection():
text = self.treeview.item(index, 'tags')
self.treeview.tag_configure(text, background = 'lightgreen')
def _on_unmark(self):
for index in self.treeview.selection():
text = self.treeview.item(index, 'tags')
self.treeview.tag_configure(text, background = 'white')
def _on_clear(self):
for index in self.treeview.get_children():
text = self.treeview.item(index, 'tags')
self.treeview.tag_configure(text, background = 'white')
def main():
root = Tk()
MyTreeview(root).pack()
root.mainloop()
if __name__ == '__main__':
main()
| 36.318182 | 99 | 0.614518 | from tkinter import *
from tkinter import ttk
class MyTreeview(Frame):
def __init__(self, master):
super().__init__(master)
self.treeview = ttk.Treeview(self)
verbar = ttk.Scrollbar(self, orient='vertical')
verbar.pack(side = 'right', fill = 'y')
verbar.configure(command=self.treeview.yview)
self.treeview.configure(yscrollcommand=verbar.set)
self.treeview.pack(fill = 'both',expand= True)
bf =Frame(self)
bf.pack()
Button(self, text = ' ', bg = 'lightgreen', command = self._on_mark).pack(side = 'left')
Button(self, text = ' ', bg = 'white', command = self._on_unmark).pack(side = 'left')
Button(self, text = 'clear all marks', command = self._on_clear).pack(side = 'left')
ttk.Style().theme_use('clam')
def _on_mark(self):
for index in self.treeview.selection():
text = self.treeview.item(index, 'tags')
self.treeview.tag_configure(text, background = 'lightgreen')
def _on_unmark(self):
for index in self.treeview.selection():
text = self.treeview.item(index, 'tags')
self.treeview.tag_configure(text, background = 'white')
def _on_clear(self):
for index in self.treeview.get_children():
text = self.treeview.item(index, 'tags')
self.treeview.tag_configure(text, background = 'white')
def main():
root = Tk()
MyTreeview(root).pack()
root.mainloop()
if __name__ == '__main__':
main()
| true | true |
f73251238eb97dd177c8621e70d07e8df4721a2a | 629 | py | Python | dashboard/manage.py | hebergui/webtrade | 338fbf334b6ba173296635b380b53b088a87bb95 | [
"Apache-2.0"
] | 9 | 2019-11-13T18:05:51.000Z | 2021-05-05T16:04:35.000Z | dashboard/manage.py | hebergui/webtrade | 338fbf334b6ba173296635b380b53b088a87bb95 | [
"Apache-2.0"
] | 9 | 2019-12-04T23:50:52.000Z | 2022-02-10T12:02:50.000Z | dashboard/manage.py | hebergui/webtrade | 338fbf334b6ba173296635b380b53b088a87bb95 | [
"Apache-2.0"
] | 7 | 2020-04-19T17:34:58.000Z | 2021-12-25T22:09:33.000Z | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dashboard.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.590909 | 73 | 0.683625 |
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dashboard.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| true | true |
f73251bfb4f2a7f1536890502a96d262792a3c60 | 602 | py | Python | baseline/baseline_classifier.py | HLTCHKUST/emotion-dialogue | 0d58b339134dd9a2f386948ae474b270a77370f9 | [
"MIT"
] | 40 | 2019-04-29T09:17:48.000Z | 2021-11-19T06:32:02.000Z | baseline/baseline_classifier.py | HLTCHKUST/emotion-dialogue | 0d58b339134dd9a2f386948ae474b270a77370f9 | [
"MIT"
] | null | null | null | baseline/baseline_classifier.py | HLTCHKUST/emotion-dialogue | 0d58b339134dd9a2f386948ae474b270a77370f9 | [
"MIT"
] | 4 | 2019-09-01T10:00:56.000Z | 2020-03-08T16:00:22.000Z | from utils import constant
from sklearn import svm
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from xgboost import XGBClassifier
def get_classifier(ty="LR", c=1.0, max_depth=5, n_estimators=300, gamma=0):
if(ty=="LR"):
classifier = LogisticRegression(solver='lbfgs',multi_class='multinomial', C=c)
elif(ty=="SVM"):
classifier = SVC(kernel='linear')
elif(ty=="XGB"):
classifier = XGBClassifier(max_depth=max_depth, n_estimators=n_estimators, gamma=gamma, n_jobs=4, tree_method="gpu_hist") ## change later ##
return classifier | 40.133333 | 148 | 0.727575 | from utils import constant
from sklearn import svm
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from xgboost import XGBClassifier
def get_classifier(ty="LR", c=1.0, max_depth=5, n_estimators=300, gamma=0):
if(ty=="LR"):
classifier = LogisticRegression(solver='lbfgs',multi_class='multinomial', C=c)
elif(ty=="SVM"):
classifier = SVC(kernel='linear')
elif(ty=="XGB"):
classifier = XGBClassifier(max_depth=max_depth, n_estimators=n_estimators, gamma=gamma, n_jobs=4, tree_method="gpu_hist") er | true | true |
f732535face63b7bf467ecb42cb63a898b1c32f2 | 446 | py | Python | SideProjectsPython/quadratic.py | EthanLawr/ComputerScience | 5edb37c05023f3cd0d4160bcc480c3d2fdabc3a8 | [
"Apache-2.0"
] | 2 | 2021-08-21T17:40:07.000Z | 2021-11-08T10:37:25.000Z | SideProjectsPython/quadratic.py | EthanLawr/ComputerScience | 5edb37c05023f3cd0d4160bcc480c3d2fdabc3a8 | [
"Apache-2.0"
] | 1 | 2017-12-11T16:08:15.000Z | 2017-12-11T16:08:15.000Z | SideProjectsPython/quadratic.py | EthanLawr/ComputerScience | 5edb37c05023f3cd0d4160bcc480c3d2fdabc3a8 | [
"Apache-2.0"
] | null | null | null | import math
num1 = int(input("Enter a:"))
num2 = int(input("Enter b:"))
num3 = int(input("Enter c:"))
result1 = (-num2 + math.sqrt(num2**2 - 4 * (num1) * (num3)))
result2 = (result1 / (2 * num1))
print("Positive: ")
print(result2)
print("-------------------------------------------------------------")
print("Negative: ")
result3 = (-num2 - math.sqrt(num2**2 - 4 * (num1) * (num3)))
result4 = (result3 / (2 * num1))
print(result4)
| 29.733333 | 71 | 0.5 | import math
num1 = int(input("Enter a:"))
num2 = int(input("Enter b:"))
num3 = int(input("Enter c:"))
result1 = (-num2 + math.sqrt(num2**2 - 4 * (num1) * (num3)))
result2 = (result1 / (2 * num1))
print("Positive: ")
print(result2)
print("-------------------------------------------------------------")
print("Negative: ")
result3 = (-num2 - math.sqrt(num2**2 - 4 * (num1) * (num3)))
result4 = (result3 / (2 * num1))
print(result4)
| true | true |
f73253d8f8ec0e7ce8cc0868d1b80b62f06e7512 | 3,697 | py | Python | apps/portalbase/system/system__usermanager/methodclass/system_usermanager.gen.py | jumpscale7/jumpscale_portal | 8c99265e48f85643f8a52bc40a23f5266fb09231 | [
"Apache-2.0"
] | 2 | 2016-04-14T14:05:01.000Z | 2016-04-21T07:20:36.000Z | apps/portalbase/system/system__usermanager/methodclass/system_usermanager.gen.py | jumpscale7/jumpscale_portal | 8c99265e48f85643f8a52bc40a23f5266fb09231 | [
"Apache-2.0"
] | 13 | 2016-03-07T12:07:15.000Z | 2018-02-28T13:11:59.000Z | apps/portalbase/system/system__usermanager/methodclass/system_usermanager.gen.py | jumpscale7/jumpscale_portal | 8c99265e48f85643f8a52bc40a23f5266fb09231 | [
"Apache-2.0"
] | 5 | 2016-03-08T07:49:51.000Z | 2018-10-19T13:57:04.000Z | from JumpScale import j
class system_usermanager(j.code.classGetBase()):
"""
get a user
"""
def __init__(self):
pass
self._te={}
self.actorname="usermanager"
self.appname="system"
#system_usermanager_osis.__init__(self)
def authenticate(self, name, secret, **kwargs):
"""
authenticate and return False if not successfull
otherwise return secret for api
param:name name
param:secret md5 or passwd
result str
"""
#put your code here to implement this method
raise NotImplementedError ("not implemented method authenticate")
def create(self, username, password, groups, emails, domain, provider, **kwargs):
"""
create a user
param:username name of user
param:password password optional when provider is set
param:groups list of groups this user belongs to
param:emails list of email addresses
param:domain domain of user
param:provider provider for this user
result str,
"""
#put your code here to implement this method
raise NotImplementedError ("not implemented method create")
def createGroup(self, name, domain, description, **kwargs):
"""
create a group
param:name name of group
param:domain domain of group
param:description description of group
result bool
"""
#put your code here to implement this method
raise NotImplementedError ("not implemented method createGroup")
def delete(self, username, **kwargs):
"""
Delete a user
param:username name of the user
"""
#put your code here to implement this method
raise NotImplementedError ("not implemented method delete")
def deleteGroup(self, id, **kwargs):
"""
delete a group
param:id id/name of group
result bool
"""
#put your code here to implement this method
raise NotImplementedError ("not implemented method deleteGroup")
def editGroup(self, name, domain, description, users, **kwargs):
"""
edit a group
param:name name of group
param:domain domain of group
param:description description of group
param:users list or comma seperate string of users
result bool
"""
#put your code here to implement this method
raise NotImplementedError ("not implemented method editGroup")
def editUser(self, username, groups, password, emails, domain, **kwargs):
"""
set Groups for a user
param:username name of user
param:groups list of groups this user belongs to
param:password password for user
param:emails list of email addresses
param:domain Domain of user
"""
#put your code here to implement this method
raise NotImplementedError ("not implemented method editUser")
def userexists(self, name, **kwargs):
"""
param:name name
result bool
"""
#put your code here to implement this method
raise NotImplementedError ("not implemented method userexists")
def userget(self, name, **kwargs):
"""
param:name name of user
"""
#put your code here to implement this method
raise NotImplementedError ("not implemented method userget")
def whoami(self, **kwargs):
"""
return username
result str
"""
#put your code here to implement this method
raise NotImplementedError ("not implemented method whoami")
| 32.147826 | 85 | 0.621315 | from JumpScale import j
class system_usermanager(j.code.classGetBase()):
def __init__(self):
pass
self._te={}
self.actorname="usermanager"
self.appname="system"
def authenticate(self, name, secret, **kwargs):
raise NotImplementedError ("not implemented method authenticate")
def create(self, username, password, groups, emails, domain, provider, **kwargs):
raise NotImplementedError ("not implemented method create")
def createGroup(self, name, domain, description, **kwargs):
raise NotImplementedError ("not implemented method createGroup")
def delete(self, username, **kwargs):
raise NotImplementedError ("not implemented method delete")
def deleteGroup(self, id, **kwargs):
raise NotImplementedError ("not implemented method deleteGroup")
def editGroup(self, name, domain, description, users, **kwargs):
raise NotImplementedError ("not implemented method editGroup")
def editUser(self, username, groups, password, emails, domain, **kwargs):
raise NotImplementedError ("not implemented method editUser")
def userexists(self, name, **kwargs):
raise NotImplementedError ("not implemented method userexists")
def userget(self, name, **kwargs):
raise NotImplementedError ("not implemented method userget")
def whoami(self, **kwargs):
raise NotImplementedError ("not implemented method whoami")
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.