hexsha
stringlengths 40
40
| size
int64 4
996k
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
996k
| avg_line_length
float64 1.33
58.2k
| max_line_length
int64 2
323k
| alphanum_fraction
float64 0
0.97
| content_no_comment
stringlengths 0
946k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
790d1d3badf6fd562c1a03dd2abb2e15b574c694
| 5,753
|
py
|
Python
|
manager.py
|
DenizShabani/TelegramMassDMBot
|
826198d853bbf6498e44e573a7f2d249c0b0ba60
|
[
"MIT"
] | 17
|
2022-02-23T08:06:21.000Z
|
2022-03-26T19:03:41.000Z
|
manager.py
|
DenizShabani/TelegramMassDMBot
|
826198d853bbf6498e44e573a7f2d249c0b0ba60
|
[
"MIT"
] | 2
|
2022-03-21T22:34:23.000Z
|
2022-03-25T20:55:25.000Z
|
manager.py
|
DenizShabani/TelegramMassDMBot
|
826198d853bbf6498e44e573a7f2d249c0b0ba60
|
[
"MIT"
] | 3
|
2022-02-23T07:20:01.000Z
|
2022-03-26T19:03:50.000Z
|
import requests
from telethon.sync import TelegramClient
from telethon.errors.rpcerrorlist import PhoneNumberBannedError
import pickle, pyfiglet
from colorama import init, Fore
import os, random
from time import sleep
init()
lg = Fore.LIGHTGREEN_EX
w = Fore.WHITE
cy = Fore.CYAN
ye = Fore.YELLOW
r = Fore.RED
n = Fore.RESET
colors = [lg, r, w, cy, ye]
def banner():
f = pyfiglet.Figlet(font='slant')
banner = f.renderText('Telegram')
print(f'{random.choice(colors)}{banner}{n}')
print(r+' Version: 1 | Author: Shabani'+n+'\n')
def clr():
if os.name == 'nt':
os.system('cls')
else:
os.system('clear')
while True:
clr()
#print(r)
banner()
#print(n)
print(lg+'[1] Add new accounts'+n)
print(lg+'[2] Filter all banned accounts'+n)
print(lg+'[3] List out all the accounts'+n)
print(lg+'[4] Delete specific accounts'+n)
#print(lg+'[5] Update your Genisys'+n)
print(lg+'[5] Quit')
a = int(input(f'\nEnter your choice: {r}'))
if a == 1:
with open('vars.txt', 'ab') as g:
newly_added = []
while True:
a = int(input(f'\n{lg}Enter API ID: {r}'))
b = str(input(f'{lg}Enter API Hash: {r}'))
c = str(input(f'{lg}Enter Phone Number: {r}'))
p = ''.join(c.split())
pickle.dump([a, b, p], g)
newly_added.append([a, b, p])
ab = input(f'\nDo you want to add more accounts?[y/n]: ')
if 'y' in ab:
pass
else:
print('\n'+lg+'[i] Saved all accounts in vars.txt'+n)
g.close()
sleep(3)
clr()
print(lg + '[*] Logging in from new accounts...\n')
for added in newly_added:
c = TelegramClient(f'sessions/{added[2]}', added[0], added[1])
try:
c.start()
print(f'n\n{lg}[+] Logged in - {added[2]}')
c.disconnect()
except PhoneNumberBannedError:
print(f'{r}[!] {added[2]} is banned! Filter it using option 2')
continue
print('\n')
input(f'\n{lg}Press enter to goto main menu...')
break
g.close()
elif a == 2:
accounts = []
banned_accs = []
h = open('vars.txt', 'rb')
while True:
try:
accounts.append(pickle.load(h))
except EOFError:
break
h.close()
if len(accounts) == 0:
print(r+'[!] There are no accounts! Please add some and retry')
sleep(3)
else:
for account in accounts:
api_id = int(account[0])
api_hash = str(account[1])
phone = str(account[2])
client = TelegramClient(f'sessions\\{phone}', api_id, api_hash)
client.connect()
if not client.is_user_authorized():
try:
client.send_code_request(phone)
client.sign_in(phone, input('[+] Enter the code: '))
except PhoneNumberBannedError:
print(r+str(phone) + ' is banned!'+n)
banned_accs.append(account)
if len(banned_accs) == 0:
print(lg+'Congrats! No banned accounts')
input('\nPress enter to goto main menu')
else:
for m in banned_accs:
accounts.remove(m)
with open('vars.txt', 'wb') as k:
for a in accounts:
Id = a[0]
Hash = a[1]
Phone = a[2]
pickle.dump([Id, Hash, Phone], k)
k.close()
print(lg+'[i] All banned accounts removed'+n)
input('\nPress enter to goto main menu')
elif a == 3:
display = []
j = open('vars.txt', 'rb')
while True:
try:
display.append(pickle.load(j))
except EOFError:
break
j.close()
print(f'\n{lg}')
print(f'API ID | API Hash | Phone')
print(f'==========================================================')
i = 0
for z in display:
print(f'{z[0]} | {z[1]} | {z[2]}')
i += 1
print(f'==========================================================')
input('\nPress enter to goto main menu')
elif a == 4:
accs = []
f = open('vars.txt', 'rb')
while True:
try:
accs.append(pickle.load(f))
except EOFError:
break
f.close()
i = 0
print(f'{lg}[i] Choose an account to delete\n')
for acc in accs:
print(f'{lg}[{i}] {acc[2]}{n}')
i += 1
index = int(input(f'\n{lg}[+] Enter a choice: {n}'))
phone = str(accs[index][2])
session_file = phone + '.session'
if os.name == 'nt':
os.system(f'del sessions\\{session_file}')
else:
os.system(f'rm sessions/{session_file}')
del accs[index]
f = open('vars.txt', 'wb')
for account in accs:
pickle.dump(account, f)
print(f'\n{lg}[+] Account Deleted{n}')
input(f'{lg}Press enter to goto main menu{n}')
f.close()
elif a == 5:
clr()
banner()
quit()
| 34.244048
| 91
| 0.441161
|
import requests
from telethon.sync import TelegramClient
from telethon.errors.rpcerrorlist import PhoneNumberBannedError
import pickle, pyfiglet
from colorama import init, Fore
import os, random
from time import sleep
init()
lg = Fore.LIGHTGREEN_EX
w = Fore.WHITE
cy = Fore.CYAN
ye = Fore.YELLOW
r = Fore.RED
n = Fore.RESET
colors = [lg, r, w, cy, ye]
def banner():
f = pyfiglet.Figlet(font='slant')
banner = f.renderText('Telegram')
print(f'{random.choice(colors)}{banner}{n}')
print(r+' Version: 1 | Author: Shabani'+n+'\n')
def clr():
if os.name == 'nt':
os.system('cls')
else:
os.system('clear')
while True:
clr()
banner()
print(lg+'[1] Add new accounts'+n)
print(lg+'[2] Filter all banned accounts'+n)
print(lg+'[3] List out all the accounts'+n)
print(lg+'[4] Delete specific accounts'+n)
print(lg+'[5] Quit')
a = int(input(f'\nEnter your choice: {r}'))
if a == 1:
with open('vars.txt', 'ab') as g:
newly_added = []
while True:
a = int(input(f'\n{lg}Enter API ID: {r}'))
b = str(input(f'{lg}Enter API Hash: {r}'))
c = str(input(f'{lg}Enter Phone Number: {r}'))
p = ''.join(c.split())
pickle.dump([a, b, p], g)
newly_added.append([a, b, p])
ab = input(f'\nDo you want to add more accounts?[y/n]: ')
if 'y' in ab:
pass
else:
print('\n'+lg+'[i] Saved all accounts in vars.txt'+n)
g.close()
sleep(3)
clr()
print(lg + '[*] Logging in from new accounts...\n')
for added in newly_added:
c = TelegramClient(f'sessions/{added[2]}', added[0], added[1])
try:
c.start()
print(f'n\n{lg}[+] Logged in - {added[2]}')
c.disconnect()
except PhoneNumberBannedError:
print(f'{r}[!] {added[2]} is banned! Filter it using option 2')
continue
print('\n')
input(f'\n{lg}Press enter to goto main menu...')
break
g.close()
elif a == 2:
accounts = []
banned_accs = []
h = open('vars.txt', 'rb')
while True:
try:
accounts.append(pickle.load(h))
except EOFError:
break
h.close()
if len(accounts) == 0:
print(r+'[!] There are no accounts! Please add some and retry')
sleep(3)
else:
for account in accounts:
api_id = int(account[0])
api_hash = str(account[1])
phone = str(account[2])
client = TelegramClient(f'sessions\\{phone}', api_id, api_hash)
client.connect()
if not client.is_user_authorized():
try:
client.send_code_request(phone)
client.sign_in(phone, input('[+] Enter the code: '))
except PhoneNumberBannedError:
print(r+str(phone) + ' is banned!'+n)
banned_accs.append(account)
if len(banned_accs) == 0:
print(lg+'Congrats! No banned accounts')
input('\nPress enter to goto main menu')
else:
for m in banned_accs:
accounts.remove(m)
with open('vars.txt', 'wb') as k:
for a in accounts:
Id = a[0]
Hash = a[1]
Phone = a[2]
pickle.dump([Id, Hash, Phone], k)
k.close()
print(lg+'[i] All banned accounts removed'+n)
input('\nPress enter to goto main menu')
elif a == 3:
display = []
j = open('vars.txt', 'rb')
while True:
try:
display.append(pickle.load(j))
except EOFError:
break
j.close()
print(f'\n{lg}')
print(f'API ID | API Hash | Phone')
print(f'==========================================================')
i = 0
for z in display:
print(f'{z[0]} | {z[1]} | {z[2]}')
i += 1
print(f'==========================================================')
input('\nPress enter to goto main menu')
elif a == 4:
accs = []
f = open('vars.txt', 'rb')
while True:
try:
accs.append(pickle.load(f))
except EOFError:
break
f.close()
i = 0
print(f'{lg}[i] Choose an account to delete\n')
for acc in accs:
print(f'{lg}[{i}] {acc[2]}{n}')
i += 1
index = int(input(f'\n{lg}[+] Enter a choice: {n}'))
phone = str(accs[index][2])
session_file = phone + '.session'
if os.name == 'nt':
os.system(f'del sessions\\{session_file}')
else:
os.system(f'rm sessions/{session_file}')
del accs[index]
f = open('vars.txt', 'wb')
for account in accs:
pickle.dump(account, f)
print(f'\n{lg}[+] Account Deleted{n}')
input(f'{lg}Press enter to goto main menu{n}')
f.close()
elif a == 5:
clr()
banner()
quit()
| true
| true
|
790d1e7ce77cb3ec1ec127f4498e640abbee98ce
| 8,338
|
py
|
Python
|
cmstack/hdfg/passes/flatten.py
|
he-actlab/cdstack
|
38f605cfa299bf97b5875a19f9fd811a2671d56f
|
[
"Apache-2.0"
] | null | null | null |
cmstack/hdfg/passes/flatten.py
|
he-actlab/cdstack
|
38f605cfa299bf97b5875a19f9fd811a2671d56f
|
[
"Apache-2.0"
] | null | null | null |
cmstack/hdfg/passes/flatten.py
|
he-actlab/cdstack
|
38f605cfa299bf97b5875a19f9fd811a2671d56f
|
[
"Apache-2.0"
] | null | null | null |
from cmstack.hdfg import hdfgutils
from cmstack.hdfg.hdfg_pb2 import Component, ValueInfo
import logging
from . import is_literal, is_number
def flatten_graph(output_graph, graph, templates, context,edge_node_ids, arg_map):
components = {}
for e in graph.edge_info:
copy_edge = ValueInfo()
if is_literal(e):
uid = str(e)
copy_edge.CopyFrom(graph.edge_info[e])
copy_edge.name = uid
elif e in arg_map.keys():
uid = context + e
copy_edge.CopyFrom(arg_map[e])
copy_edge.attributes['alias'].CopyFrom(hdfgutils.make_attribute('alias', arg_map[e].name))
else:
uid = context + e
copy_edge.CopyFrom(graph.edge_info[e])
copy_edge.name = uid
if e in graph.input and e not in output_graph.input:
output_graph.input.extend([uid])
elif e in graph.state and e not in output_graph.state:
output_graph.state.extend([uid])
elif e in graph.output and e not in output_graph.output:
output_graph.output.extend([uid])
elif e in graph.parameters and e not in output_graph.parameters:
output_graph.parameters.extend([uid])
if graph.name != 'main':
ordered_args = hdfgutils.get_attribute_value(graph.attributes['ordered_args'])
else:
ordered_args = []
if 'dimensions' in list(copy_edge.attributes):
dims = hdfgutils.get_attribute_value(copy_edge.attributes['dimensions'])
new_dims = []
for d in dims:
if d in arg_map.keys():
new_dims.append(arg_map[d].name)
else:
new_dims.append(d)
copy_edge.attributes['dimensions'].CopyFrom(hdfgutils.make_attribute('dimensions', new_dims))
if uid not in edge_node_ids['edges'].keys():
edge_node_ids['edges'][uid] = str(len(edge_node_ids['edges'].keys()))
output_graph.edge_info[uid].CopyFrom(copy_edge)
if e not in arg_map.keys():
output_graph.edge_info[uid].gid = int(edge_node_ids['edges'][uid])
output_graph.edge_info[uid].attributes['component_type'].CopyFrom(hdfgutils.make_attribute('component_type', graph.op_type))
for n in graph.sub_graph:
op_cat = n.op_cat
if op_cat == 'component':
if n.op_type in components.keys():
components[n.op_type] += 1
new_context = context + n.op_type + str(components[n.op_type]) + '/'
else:
components[n.op_type] = 0
new_context = context + n.op_type + str(components[n.op_type]) + '/'
instance_args = hdfgutils.get_attribute_value(n.attributes['ordered_args'])
ctemplate = templates[n.op_type]
signature_args = hdfgutils.get_attribute_value(ctemplate.attributes['ordered_args'])
carg_map = create_map(instance_args, signature_args,graph.edge_info, ctemplate.edge_info, templates[n.op_type])
update_statement_graphs(ctemplate, output_graph, new_context)
flatten_graph(output_graph, ctemplate, templates, new_context , edge_node_ids, carg_map)
else:
new = update_node(n, context, arg_map)
if new.name not in edge_node_ids['nodes'].keys():
edge_node_ids['nodes'][new.name] = str(len(edge_node_ids['nodes'].keys()))
new.gid = int(edge_node_ids['nodes'][new.name])
output_graph.sub_graph.extend([new])
def update_statement_graphs(template, output_graph, context):
for s in template.statement_graphs:
statement_nodes = s.statement_node
new_graph = output_graph.statement_graphs.add()
nodes = []
for n in statement_nodes:
nodes.append(context + n)
new_graph.statement_node.extend(nodes)
def create_map(instance_args, signature_args, instance_edges, signature_edges, op=None):
carg_map = {}
for i in range(len(instance_args)):
iarg = instance_args[i]
sarg = signature_args[i]
if is_number(iarg):
iarg = str(iarg)
carg_map[sarg] = instance_edges[iarg]
carg_map[sarg].name = iarg
idims = hdfgutils.get_attribute_value(instance_edges[iarg].attributes['dimensions'])
iid_literal = False
if instance_edges[iarg].iid:
inst_iid = instance_edges[iarg].iid
iid_literal = is_literal(inst_iid)
sdims = hdfgutils.get_attribute_value(signature_edges[sarg].attributes['dimensions'])
if len(idims) != len(sdims) and not iid_literal:
logging.error("Error! Dimensions between edges connecting components do not match:{} versus {} for {} and {}".format(idims, sdims, iarg, sarg))
elif not iid_literal:
for d in range(len(idims)):
inst_dim = idims[d]
sig_dim = sdims[d]
if is_number(inst_dim):
inst_dim = str(inst_dim)
carg_map[sig_dim] = instance_edges[inst_dim]
carg_map[sig_dim].name = inst_dim
carg_map[sig_dim].attributes['vtype'].CopyFrom(hdfgutils.make_attribute('vtype', 'scalar'))
if len(signature_args) > len(instance_args):
start = len(instance_args)
for default in signature_args[start:]:
sig_attr = list(signature_edges[default].attributes)
if 'default' not in sig_attr:
logging.error(
"Error! No default value for unspecified arg: {}".format(default))
else:
def_val = hdfgutils.get_attribute_value(signature_edges[default].attributes['default'])
carg_map[default] = signature_edges[default]
carg_map[default].attributes['value'].CopyFrom(hdfgutils.make_attribute('value', def_val))
if is_number(def_val):
def_val = str(def_val)
carg_map[default].name = def_val
carg_map[default].attributes['vtype'].CopyFrom(hdfgutils.make_attribute('vtype', 'scalar'))
for e in op.edge_info:
vcat = hdfgutils.get_attribute_value(op.edge_info[e].attributes['vcat'])
if vcat == 'declaration':
dims = hdfgutils.get_attribute_value(op.edge_info[e].attributes['dimensions'])
sig_name = op.edge_info[e].name.rsplit("/", 1)[-1]
return carg_map
def update_node(node, context, carg_map):
new = Component(name=context + node.name)
inputs = []
outputs = []
states = []
parameters = []
for inp in node.input:
if is_number(inp):
i = str(inp)
else:
i = inp
if is_literal(i):
inputs.append(i)
elif i in carg_map.keys():
# inputs.append(carg_map[i])
inputs.append(carg_map[i].name)
else:
inputs.append(context + i)
new.input.extend(inputs)
for o in node.output:
if is_number(o):
out = str(o)
else:
out = o
if is_literal(out):
outputs.append(out)
elif out in carg_map.keys():
# outputs.append(carg_map[out])
outputs.append(carg_map[out].name)
else:
outputs.append(context + out)
new.output.extend(outputs)
for st in node.state:
if is_number(st):
s = str(st)
else:
s = st
if is_literal(s):
states.append(s)
elif s in carg_map.keys():
# states.append(carg_map[s])
states.append(carg_map[s].name)
else:
states.append(context + s)
new.state.extend(states)
for para in node.parameters:
if is_number(para):
p = str(para)
else:
p = para
if is_literal(p):
parameters.append(p)
elif p in carg_map.keys():
# parameters.append(carg_map[p])
parameters.append(carg_map[p].name)
else:
parameters.append(context + p)
new.parameters.extend(parameters)
for attr in node.attributes:
new.attributes[attr].CopyFrom(node.attributes[attr])
new.op_type = node.op_type
return new
| 35.939655
| 155
| 0.598225
|
from cmstack.hdfg import hdfgutils
from cmstack.hdfg.hdfg_pb2 import Component, ValueInfo
import logging
from . import is_literal, is_number
def flatten_graph(output_graph, graph, templates, context,edge_node_ids, arg_map):
components = {}
for e in graph.edge_info:
copy_edge = ValueInfo()
if is_literal(e):
uid = str(e)
copy_edge.CopyFrom(graph.edge_info[e])
copy_edge.name = uid
elif e in arg_map.keys():
uid = context + e
copy_edge.CopyFrom(arg_map[e])
copy_edge.attributes['alias'].CopyFrom(hdfgutils.make_attribute('alias', arg_map[e].name))
else:
uid = context + e
copy_edge.CopyFrom(graph.edge_info[e])
copy_edge.name = uid
if e in graph.input and e not in output_graph.input:
output_graph.input.extend([uid])
elif e in graph.state and e not in output_graph.state:
output_graph.state.extend([uid])
elif e in graph.output and e not in output_graph.output:
output_graph.output.extend([uid])
elif e in graph.parameters and e not in output_graph.parameters:
output_graph.parameters.extend([uid])
if graph.name != 'main':
ordered_args = hdfgutils.get_attribute_value(graph.attributes['ordered_args'])
else:
ordered_args = []
if 'dimensions' in list(copy_edge.attributes):
dims = hdfgutils.get_attribute_value(copy_edge.attributes['dimensions'])
new_dims = []
for d in dims:
if d in arg_map.keys():
new_dims.append(arg_map[d].name)
else:
new_dims.append(d)
copy_edge.attributes['dimensions'].CopyFrom(hdfgutils.make_attribute('dimensions', new_dims))
if uid not in edge_node_ids['edges'].keys():
edge_node_ids['edges'][uid] = str(len(edge_node_ids['edges'].keys()))
output_graph.edge_info[uid].CopyFrom(copy_edge)
if e not in arg_map.keys():
output_graph.edge_info[uid].gid = int(edge_node_ids['edges'][uid])
output_graph.edge_info[uid].attributes['component_type'].CopyFrom(hdfgutils.make_attribute('component_type', graph.op_type))
for n in graph.sub_graph:
op_cat = n.op_cat
if op_cat == 'component':
if n.op_type in components.keys():
components[n.op_type] += 1
new_context = context + n.op_type + str(components[n.op_type]) + '/'
else:
components[n.op_type] = 0
new_context = context + n.op_type + str(components[n.op_type]) + '/'
instance_args = hdfgutils.get_attribute_value(n.attributes['ordered_args'])
ctemplate = templates[n.op_type]
signature_args = hdfgutils.get_attribute_value(ctemplate.attributes['ordered_args'])
carg_map = create_map(instance_args, signature_args,graph.edge_info, ctemplate.edge_info, templates[n.op_type])
update_statement_graphs(ctemplate, output_graph, new_context)
flatten_graph(output_graph, ctemplate, templates, new_context , edge_node_ids, carg_map)
else:
new = update_node(n, context, arg_map)
if new.name not in edge_node_ids['nodes'].keys():
edge_node_ids['nodes'][new.name] = str(len(edge_node_ids['nodes'].keys()))
new.gid = int(edge_node_ids['nodes'][new.name])
output_graph.sub_graph.extend([new])
def update_statement_graphs(template, output_graph, context):
for s in template.statement_graphs:
statement_nodes = s.statement_node
new_graph = output_graph.statement_graphs.add()
nodes = []
for n in statement_nodes:
nodes.append(context + n)
new_graph.statement_node.extend(nodes)
def create_map(instance_args, signature_args, instance_edges, signature_edges, op=None):
carg_map = {}
for i in range(len(instance_args)):
iarg = instance_args[i]
sarg = signature_args[i]
if is_number(iarg):
iarg = str(iarg)
carg_map[sarg] = instance_edges[iarg]
carg_map[sarg].name = iarg
idims = hdfgutils.get_attribute_value(instance_edges[iarg].attributes['dimensions'])
iid_literal = False
if instance_edges[iarg].iid:
inst_iid = instance_edges[iarg].iid
iid_literal = is_literal(inst_iid)
sdims = hdfgutils.get_attribute_value(signature_edges[sarg].attributes['dimensions'])
if len(idims) != len(sdims) and not iid_literal:
logging.error("Error! Dimensions between edges connecting components do not match:{} versus {} for {} and {}".format(idims, sdims, iarg, sarg))
elif not iid_literal:
for d in range(len(idims)):
inst_dim = idims[d]
sig_dim = sdims[d]
if is_number(inst_dim):
inst_dim = str(inst_dim)
carg_map[sig_dim] = instance_edges[inst_dim]
carg_map[sig_dim].name = inst_dim
carg_map[sig_dim].attributes['vtype'].CopyFrom(hdfgutils.make_attribute('vtype', 'scalar'))
if len(signature_args) > len(instance_args):
start = len(instance_args)
for default in signature_args[start:]:
sig_attr = list(signature_edges[default].attributes)
if 'default' not in sig_attr:
logging.error(
"Error! No default value for unspecified arg: {}".format(default))
else:
def_val = hdfgutils.get_attribute_value(signature_edges[default].attributes['default'])
carg_map[default] = signature_edges[default]
carg_map[default].attributes['value'].CopyFrom(hdfgutils.make_attribute('value', def_val))
if is_number(def_val):
def_val = str(def_val)
carg_map[default].name = def_val
carg_map[default].attributes['vtype'].CopyFrom(hdfgutils.make_attribute('vtype', 'scalar'))
for e in op.edge_info:
vcat = hdfgutils.get_attribute_value(op.edge_info[e].attributes['vcat'])
if vcat == 'declaration':
dims = hdfgutils.get_attribute_value(op.edge_info[e].attributes['dimensions'])
sig_name = op.edge_info[e].name.rsplit("/", 1)[-1]
return carg_map
def update_node(node, context, carg_map):
new = Component(name=context + node.name)
inputs = []
outputs = []
states = []
parameters = []
for inp in node.input:
if is_number(inp):
i = str(inp)
else:
i = inp
if is_literal(i):
inputs.append(i)
elif i in carg_map.keys():
inputs.append(carg_map[i].name)
else:
inputs.append(context + i)
new.input.extend(inputs)
for o in node.output:
if is_number(o):
out = str(o)
else:
out = o
if is_literal(out):
outputs.append(out)
elif out in carg_map.keys():
outputs.append(carg_map[out].name)
else:
outputs.append(context + out)
new.output.extend(outputs)
for st in node.state:
if is_number(st):
s = str(st)
else:
s = st
if is_literal(s):
states.append(s)
elif s in carg_map.keys():
states.append(carg_map[s].name)
else:
states.append(context + s)
new.state.extend(states)
for para in node.parameters:
if is_number(para):
p = str(para)
else:
p = para
if is_literal(p):
parameters.append(p)
elif p in carg_map.keys():
parameters.append(carg_map[p].name)
else:
parameters.append(context + p)
new.parameters.extend(parameters)
for attr in node.attributes:
new.attributes[attr].CopyFrom(node.attributes[attr])
new.op_type = node.op_type
return new
| true
| true
|
790d1e95f401235dc60e86bb4bd99addf75fb901
| 1,217
|
py
|
Python
|
synth.py
|
Global19/nodejs-memcache
|
13c6e820fd8e7889ddaccceb8bff739ff5e4c4a0
|
[
"Apache-2.0"
] | null | null | null |
synth.py
|
Global19/nodejs-memcache
|
13c6e820fd8e7889ddaccceb8bff739ff5e4c4a0
|
[
"Apache-2.0"
] | null | null | null |
synth.py
|
Global19/nodejs-memcache
|
13c6e820fd8e7889ddaccceb8bff739ff5e4c4a0
|
[
"Apache-2.0"
] | 1
|
2020-10-04T10:50:46.000Z
|
2020-10-04T10:50:46.000Z
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
import synthtool.gcp as gcp
import synthtool.languages.node as node
import logging
logging.basicConfig(level=logging.DEBUG)
# run the gapic generator
gapic = gcp.GAPICBazel()
versions = ['v1beta2']
name = 'memcache'
for version in versions:
library = gapic.node_library(name, version)
s.copy(library, excludes=['package.json', 'README.md'])
# Copy common templates
common_templates = gcp.CommonTemplates()
templates = common_templates.node_library(source_location='build/src')
s.copy(templates, excludes=[])
node.postprocess_gapic_library()
| 32.891892
| 74
| 0.771569
|
import synthtool as s
import synthtool.gcp as gcp
import synthtool.languages.node as node
import logging
logging.basicConfig(level=logging.DEBUG)
gapic = gcp.GAPICBazel()
versions = ['v1beta2']
name = 'memcache'
for version in versions:
library = gapic.node_library(name, version)
s.copy(library, excludes=['package.json', 'README.md'])
common_templates = gcp.CommonTemplates()
templates = common_templates.node_library(source_location='build/src')
s.copy(templates, excludes=[])
node.postprocess_gapic_library()
| true
| true
|
790d1ec1a21e2572f76463724a918a28f2ebd09a
| 4,209
|
py
|
Python
|
requirements.py
|
craig8/volttron
|
2a954311d323effa3b79c2a53f6e8c3bb9664e1c
|
[
"Apache-2.0",
"BSD-2-Clause"
] | 1
|
2020-06-08T16:54:28.000Z
|
2020-06-08T16:54:28.000Z
|
requirements.py
|
craig8/volttron
|
2a954311d323effa3b79c2a53f6e8c3bb9664e1c
|
[
"Apache-2.0",
"BSD-2-Clause"
] | 8
|
2016-10-07T22:49:28.000Z
|
2022-02-23T00:57:58.000Z
|
requirements.py
|
craig8/volttron
|
2a954311d323effa3b79c2a53f6e8c3bb9664e1c
|
[
"Apache-2.0",
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright 2020, Battelle Memorial Institute.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This material was prepared as an account of work sponsored by an agency of
# the United States Government. Neither the United States Government nor the
# United States Department of Energy, nor Battelle, nor any of their
# employees, nor any jurisdiction or organization that has cooperated in the
# development of these materials, makes any warranty, express or
# implied, or assumes any legal liability or responsibility for the accuracy,
# completeness, or usefulness or any information, apparatus, product,
# software, or process disclosed, or represents that its use would not infringe
# privately owned rights. Reference herein to any specific commercial product,
# process, or service by trade name, trademark, manufacturer, or otherwise
# does not necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors expressed
# herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY operated by
# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
# These need to be importable by bootstrap.py. If we put them in
# setup.py the import may fail if setuptools in not installed
# in the global python3.
option_requirements = [
('pyzmq', ['--zmq=bundled']),
]
install_requires = [
'gevent==20.6.1',
'greenlet==0.4.16',
'grequests',
'requests==2.23.0',
'ply',
'psutil',
'python-dateutil',
'pytz',
'PyYAML',
'pyzmq',
'setuptools',
'tzlocal',
'pyOpenSSL==19.0.0',
'cryptography==2.3',
# Cross platform way of handling changes in file/directories.
# https://github.com/Bogdanp/watchdog_gevent
'watchdog-gevent',
'wheel==0.30'
]
extras_require = {
'crate': [ # crate databases
'crate'
],
'databases': [ # Support for all known databases
'mysql-connector-python-rf',
'pymongo',
'crate',
'influxdb',
'psycopg2-binary'
],
'dnp3': [ # dnp3 agent requirements.
'pydnp3'
],
'documentation': [ # Requirements for building the documentation
'mock',
'Sphinx',
'recommonmark',
'sphinx-rtd-theme'
],
'drivers': [
'pymodbus',
'bacpypes==0.16.7',
'modbus-tk',
'pyserial'
],
'influxdb': [ # influxdb historian requirements.
'influxdb'
],
'market': [ # Requirements for the market service
'numpy',
'transitions',
],
'mongo': [ # mongo databases
'pymongo',
],
'mysql': [ # mysql databases
'mysql-connector-python-rf',
],
'pandas': [ # numpy and pandas for applications
'numpy',
'pandas',
],
'postgres': [ # numpy and pandas for applications
'psycopg2-binary'
],
'testing': [ # Testing infrastructure dependencies
'mock',
'pytest',
'pytest-timeout',
'websocket-client',
# Allows us to compare nested dictionaries easily.
'deepdiff',
# Allows setup of databases for testing with.
'docker'
],
'web': [ # Web support for launching web based agents including ssl and json web tokens.
'ws4py',
'PyJWT',
'Jinja2',
'passlib',
'argon2-cffi',
'Werkzeug'
],
'weather': [
'Pint'
],
}
| 30.948529
| 95
| 0.646947
|
option_requirements = [
('pyzmq', ['--zmq=bundled']),
]
install_requires = [
'gevent==20.6.1',
'greenlet==0.4.16',
'grequests',
'requests==2.23.0',
'ply',
'psutil',
'python-dateutil',
'pytz',
'PyYAML',
'pyzmq',
'setuptools',
'tzlocal',
'pyOpenSSL==19.0.0',
'cryptography==2.3',
'watchdog-gevent',
'wheel==0.30'
]
extras_require = {
'crate': [
'crate'
],
'databases': [
'mysql-connector-python-rf',
'pymongo',
'crate',
'influxdb',
'psycopg2-binary'
],
'dnp3': [
'pydnp3'
],
'documentation': [
'mock',
'Sphinx',
'recommonmark',
'sphinx-rtd-theme'
],
'drivers': [
'pymodbus',
'bacpypes==0.16.7',
'modbus-tk',
'pyserial'
],
'influxdb': [
'influxdb'
],
'market': [
'numpy',
'transitions',
],
'mongo': [
'pymongo',
],
'mysql': [
'mysql-connector-python-rf',
],
'pandas': [
'numpy',
'pandas',
],
'postgres': [
'psycopg2-binary'
],
'testing': [
'mock',
'pytest',
'pytest-timeout',
'websocket-client',
'deepdiff',
'docker'
],
'web': [
'ws4py',
'PyJWT',
'Jinja2',
'passlib',
'argon2-cffi',
'Werkzeug'
],
'weather': [
'Pint'
],
}
| true
| true
|
790d1f14d3c283e9a2c562fac9f050a8f233c75c
| 2,965
|
py
|
Python
|
qa/rpc-tests/test_framework/coverage.py
|
devilsan84/Devilcoin
|
cdb0e0c647ffc35113f3e42a06f99ce0e43f94ab
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/test_framework/coverage.py
|
devilsan84/Devilcoin
|
cdb0e0c647ffc35113f3e42a06f99ce0e43f94ab
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/test_framework/coverage.py
|
devilsan84/Devilcoin
|
cdb0e0c647ffc35113f3e42a06f99ce0e43f94ab
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The BitCore Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
This module contains utilities for doing coverage analysis on the RPC
interface.
It provides a way to track which RPC commands are exercised during
testing.
"""
import os
REFERENCE_FILENAME = 'rpc_interface.txt'
class AuthServiceProxyWrapper(object):
"""
An object that wraps AuthServiceProxy to record specific RPC calls.
"""
def __init__(self, auth_service_proxy_instance, coverage_logfile=None):
"""
Kwargs:
auth_service_proxy_instance (AuthServiceProxy): the instance
being wrapped.
coverage_logfile (str): if specified, write each service_name
out to a file when called.
"""
self.auth_service_proxy_instance = auth_service_proxy_instance
self.coverage_logfile = coverage_logfile
def __getattr__(self, *args, **kwargs):
return_val = self.auth_service_proxy_instance.__getattr__(
*args, **kwargs)
return AuthServiceProxyWrapper(return_val, self.coverage_logfile)
def __call__(self, *args, **kwargs):
"""
Delegates to AuthServiceProxy, then writes the particular RPC method
called to a file.
"""
return_val = self.auth_service_proxy_instance.__call__(*args, **kwargs)
rpc_method = self.auth_service_proxy_instance._service_name
if self.coverage_logfile:
with open(self.coverage_logfile, 'a+', encoding='utf8') as f:
f.write("%s\n" % rpc_method)
return return_val
@property
def url(self):
return self.auth_service_proxy_instance.url
def get_filename(dirname, n_node):
"""
Get a filename unique to the test process ID and node.
This file will contain a list of RPC commands covered.
"""
pid = str(os.getpid())
return os.path.join(
dirname, "coverage.pid%s.node%s.txt" % (pid, str(n_node)))
def write_all_rpc_commands(dirname, node):
"""
Write out a list of all RPC functions available in `bitcore-cli` for
coverage comparison. This will only happen once per coverage
directory.
Args:
dirname (str): temporary test dir
node (AuthServiceProxy): client
Returns:
bool. if the RPC interface file was written.
"""
filename = os.path.join(dirname, REFERENCE_FILENAME)
if os.path.isfile(filename):
return False
help_output = node.help().split('\n')
commands = set()
for line in help_output:
line = line.strip()
# Ignore blanks and headers
if line and not line.startswith('='):
commands.add("%s\n" % line.split()[0])
with open(filename, 'w', encoding='utf8') as f:
f.writelines(list(commands))
return True
| 27.71028
| 79
| 0.660708
|
import os
REFERENCE_FILENAME = 'rpc_interface.txt'
class AuthServiceProxyWrapper(object):
def __init__(self, auth_service_proxy_instance, coverage_logfile=None):
self.auth_service_proxy_instance = auth_service_proxy_instance
self.coverage_logfile = coverage_logfile
def __getattr__(self, *args, **kwargs):
return_val = self.auth_service_proxy_instance.__getattr__(
*args, **kwargs)
return AuthServiceProxyWrapper(return_val, self.coverage_logfile)
def __call__(self, *args, **kwargs):
return_val = self.auth_service_proxy_instance.__call__(*args, **kwargs)
rpc_method = self.auth_service_proxy_instance._service_name
if self.coverage_logfile:
with open(self.coverage_logfile, 'a+', encoding='utf8') as f:
f.write("%s\n" % rpc_method)
return return_val
@property
def url(self):
return self.auth_service_proxy_instance.url
def get_filename(dirname, n_node):
pid = str(os.getpid())
return os.path.join(
dirname, "coverage.pid%s.node%s.txt" % (pid, str(n_node)))
def write_all_rpc_commands(dirname, node):
filename = os.path.join(dirname, REFERENCE_FILENAME)
if os.path.isfile(filename):
return False
help_output = node.help().split('\n')
commands = set()
for line in help_output:
line = line.strip()
if line and not line.startswith('='):
commands.add("%s\n" % line.split()[0])
with open(filename, 'w', encoding='utf8') as f:
f.writelines(list(commands))
return True
| true
| true
|
790d2055df1fbb87c99f2e8e1b90cb03dfa54398
| 1,279
|
py
|
Python
|
tools/mo/openvino/tools/mo/front/tf/lrn_ext.py
|
pazamelin/openvino
|
b7e8ef910d7ed8e52326d14dc6fd53b71d16ed48
|
[
"Apache-2.0"
] | 1
|
2019-09-22T01:05:07.000Z
|
2019-09-22T01:05:07.000Z
|
tools/mo/openvino/tools/mo/front/tf/lrn_ext.py
|
pazamelin/openvino
|
b7e8ef910d7ed8e52326d14dc6fd53b71d16ed48
|
[
"Apache-2.0"
] | 58
|
2020-11-06T12:13:45.000Z
|
2022-03-28T13:20:11.000Z
|
tools/mo/openvino/tools/mo/front/tf/lrn_ext.py
|
pazamelin/openvino
|
b7e8ef910d7ed8e52326d14dc6fd53b71d16ed48
|
[
"Apache-2.0"
] | 2
|
2021-07-14T07:40:50.000Z
|
2021-07-27T01:40:03.000Z
|
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.ops.lrn import AttributedLRN
class LRNExtractor(FrontExtractorOp):
"""
TF and IE(CAFFE) parameters in LRN differs in several places :
region (IE) : in TF there is no such parameter, they just use last dimension (feature dimension in case of NHWC)
local-size (IE) : it's the size of 1D vector in Caffe. In TF they have 'depth_radius' that eq
'(local-size * 2) + 1'
alpha (IE) : in Caffe 'alpha' divides on local-size, so we should multiply alpha on local-size
Caffe ref : http://caffe.berkeleyvision.org/tutorial/layers/lrn.html
TF ref : https://www.tensorflow.org/api_docs/python/tf/nn/local_response_normalization
"""
op = 'LRN'
enabled = True
@classmethod
def extract(cls, node):
pb = node.pb
AttributedLRN.update_node_stat(node, {
'alpha': pb.attr['alpha'].f * (2. * pb.attr['depth_radius'].i + 1.),
'beta': pb.attr['beta'].f,
'bias': pb.attr['bias'].f,
'local_size': (2 * pb.attr['depth_radius'].i + 1),
})
return cls.enabled
| 39.96875
| 124
| 0.630962
|
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.ops.lrn import AttributedLRN
class LRNExtractor(FrontExtractorOp):
op = 'LRN'
enabled = True
@classmethod
def extract(cls, node):
pb = node.pb
AttributedLRN.update_node_stat(node, {
'alpha': pb.attr['alpha'].f * (2. * pb.attr['depth_radius'].i + 1.),
'beta': pb.attr['beta'].f,
'bias': pb.attr['bias'].f,
'local_size': (2 * pb.attr['depth_radius'].i + 1),
})
return cls.enabled
| true
| true
|
790d21c79af84a7f25090e84dcdd8d931518d941
| 636
|
py
|
Python
|
backend/manage.py
|
crowdbotics-apps/mobile-app-33660
|
e59d5ffef0804b4ecd73c80e3ab186d77197f66d
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/manage.py
|
crowdbotics-apps/mobile-app-33660
|
e59d5ffef0804b4ecd73c80e3ab186d77197f66d
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/manage.py
|
crowdbotics-apps/mobile-app-33660
|
e59d5ffef0804b4ecd73c80e3ab186d77197f66d
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mobile_app_33660.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.909091
| 80
| 0.687107
|
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mobile_app_33660.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| true
| true
|
790d21e4c0d1362d46fe167e54131980a0f98f84
| 1,100
|
py
|
Python
|
services/migrations/0001_initial.py
|
KeoH/orchestrapi
|
575e66a86c42b5c249fd943bb5f40c8c310139aa
|
[
"MIT"
] | 1
|
2021-07-05T19:37:37.000Z
|
2021-07-05T19:37:37.000Z
|
services/migrations/0001_initial.py
|
KeoH/orchestrapi
|
575e66a86c42b5c249fd943bb5f40c8c310139aa
|
[
"MIT"
] | 6
|
2020-06-05T19:30:52.000Z
|
2021-07-05T19:28:53.000Z
|
services/migrations/0001_initial.py
|
KeoH/orchestrapi
|
575e66a86c42b5c249fd943bb5f40c8c310139aa
|
[
"MIT"
] | 1
|
2020-05-15T23:58:24.000Z
|
2020-05-15T23:58:24.000Z
|
# Generated by Django 2.1.3 on 2019-01-07 17:44
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import services.models
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Service',
fields=[
('create_date', models.DateTimeField(auto_now_add=True)),
('update_date', models.DateTimeField(auto_now=True)),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('slug', models.SlugField(max_length=255, unique=True)),
('name', models.CharField(max_length=255, verbose_name='Name')),
('data', django.contrib.postgres.fields.jsonb.JSONField(blank=True, default=services.models.default_data)),
('params', django.contrib.postgres.fields.jsonb.JSONField(blank=True, default=dict)),
],
options={
'abstract': False,
},
),
]
| 33.333333
| 123
| 0.606364
|
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import services.models
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Service',
fields=[
('create_date', models.DateTimeField(auto_now_add=True)),
('update_date', models.DateTimeField(auto_now=True)),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('slug', models.SlugField(max_length=255, unique=True)),
('name', models.CharField(max_length=255, verbose_name='Name')),
('data', django.contrib.postgres.fields.jsonb.JSONField(blank=True, default=services.models.default_data)),
('params', django.contrib.postgres.fields.jsonb.JSONField(blank=True, default=dict)),
],
options={
'abstract': False,
},
),
]
| true
| true
|
790d22b68a1443e45ca84b0f533cf1ee3bbd8e1c
| 804
|
py
|
Python
|
res/manage.py
|
onap/vfc-gvnfm-vnfres
|
2ff32469650ac5b6dc6b65d99cc27f3f7aab4161
|
[
"Apache-2.0"
] | 1
|
2021-10-15T15:26:31.000Z
|
2021-10-15T15:26:31.000Z
|
res/manage.py
|
onap/vfc-gvnfm-vnfres
|
2ff32469650ac5b6dc6b65d99cc27f3f7aab4161
|
[
"Apache-2.0"
] | null | null | null |
res/manage.py
|
onap/vfc-gvnfm-vnfres
|
2ff32469650ac5b6dc6b65d99cc27f3f7aab4161
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 ZTE Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "res.settings")
if __name__ == "__main__":
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 34.956522
| 74
| 0.767413
|
import os
import sys
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "res.settings")
if __name__ == "__main__":
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| true
| true
|
790d23d902c426a29b4ab649a9e20f68721cacc7
| 6,254
|
py
|
Python
|
PaddleRec/gnn/train.py
|
heavengate/models
|
f05c910f8a8e3105de8c2f1d81e83ca00d2c7ec7
|
[
"Apache-2.0"
] | 2
|
2021-06-11T06:48:20.000Z
|
2021-09-02T10:23:07.000Z
|
PaddleRec/gnn/train.py
|
heavengate/models
|
f05c910f8a8e3105de8c2f1d81e83ca00d2c7ec7
|
[
"Apache-2.0"
] | null | null | null |
PaddleRec/gnn/train.py
|
heavengate/models
|
f05c910f8a8e3105de8c2f1d81e83ca00d2c7ec7
|
[
"Apache-2.0"
] | 1
|
2019-08-27T11:19:09.000Z
|
2019-08-27T11:19:09.000Z
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import numpy as np
import os
from functools import partial
import logging
import time
import paddle
import paddle.fluid as fluid
import argparse
import network
import reader
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger("fluid")
logger.setLevel(logging.INFO)
def parse_args():
parser = argparse.ArgumentParser("gnn")
parser.add_argument(
'--train_path', type=str, default='./data/diginetica/train.txt', help='dir of training data')
parser.add_argument(
'--config_path', type=str, default='./data/diginetica/config.txt', help='dir of config')
parser.add_argument(
'--model_path', type=str, default='./saved_model', help="path of model parameters")
parser.add_argument(
'--epoch_num', type=int, default=30, help='number of epochs to train for')
parser.add_argument(
'--batch_size', type=int, default=100, help='input batch size')
parser.add_argument(
'--hidden_size', type=int, default=100, help='hidden state size')
parser.add_argument(
'--l2', type=float, default=1e-5, help='l2 penalty')
parser.add_argument(
'--lr', type=float, default=0.001, help='learning rate')
parser.add_argument(
'--step', type=int, default=1, help='gnn propogation steps')
parser.add_argument(
'--lr_dc', type=float, default=0.1, help='learning rate decay rate')
parser.add_argument(
'--lr_dc_step', type=int, default=3, help='the number of steps after which the learning rate decay')
parser.add_argument(
'--use_cuda', type=int, default=0, help='whether to use gpu')
parser.add_argument(
'--use_parallel', type=int, default=1, help='whether to use parallel executor')
parser.add_argument(
'--enable_ce', action='store_true', help='If set, run the task with continuous evaluation logs.')
return parser.parse_args()
def train():
args = parse_args()
if args.enable_ce:
SEED = 102
fluid.default_main_program().random_seed = SEED
fluid.default_startup_program().random_seed = SEED
batch_size = args.batch_size
items_num = reader.read_config(args.config_path)
loss, acc, py_reader, feed_datas = network.network(items_num, args.hidden_size,
args.step)
data_reader = reader.Data(args.train_path, True)
logger.info("load data complete")
use_cuda = True if args.use_cuda else False
use_parallel = True if args.use_parallel else False
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
step_per_epoch = data_reader.length // batch_size
optimizer = fluid.optimizer.Adam(
learning_rate=fluid.layers.exponential_decay(
learning_rate=args.lr,
decay_steps=step_per_epoch * args.lr_dc_step,
decay_rate=args.lr_dc),
regularization=fluid.regularizer.L2DecayRegularizer(
regularization_coeff=args.l2))
optimizer.minimize(loss)
exe.run(fluid.default_startup_program())
all_vocab = fluid.global_scope().var("all_vocab").get_tensor()
all_vocab.set(
np.arange(1, items_num).astype("int64").reshape((-1, 1)), place)
feed_list = [e.name for e in feed_datas]
if use_parallel:
train_exe = fluid.ParallelExecutor(
use_cuda=use_cuda, loss_name=loss.name)
else:
train_exe = exe
logger.info("begin train")
total_time = []
ce_info = []
start_time = time.time()
loss_sum = 0.0
acc_sum = 0.0
global_step = 0
PRINT_STEP = 500
py_reader.decorate_paddle_reader(data_reader.reader(batch_size, batch_size * 20, True))
for i in range(args.epoch_num):
epoch_sum = []
py_reader.start()
try:
while True:
res = train_exe.run(fetch_list=[loss.name, acc.name])
loss_sum += res[0].mean()
acc_sum += res[1].mean()
epoch_sum.append(res[0].mean())
global_step += 1
if global_step % PRINT_STEP == 0:
ce_info.append([loss_sum / PRINT_STEP, acc_sum / PRINT_STEP])
total_time.append(time.time() - start_time)
logger.info("global_step: %d, loss: %.4lf, train_acc: %.4lf" % (
global_step, loss_sum / PRINT_STEP, acc_sum / PRINT_STEP))
loss_sum = 0.0
acc_sum = 0.0
start_time = time.time()
except fluid.core.EOFException:
py_reader.reset()
logger.info("epoch loss: %.4lf" % (np.mean(epoch_sum)))
save_dir = os.path.join(args.model_path, "epoch_" + str(i))
fetch_vars = [loss, acc]
fluid.io.save_inference_model(save_dir, feed_list, fetch_vars, exe)
logger.info("model saved in " + save_dir)
# only for ce
if args.enable_ce:
gpu_num = get_cards(args)
ce_loss = 0
ce_acc = 0
ce_time = 0
try:
ce_loss = ce_info[-1][0]
ce_acc = ce_info[-1][1]
ce_time = total_time[-1]
except:
print("ce info error")
print("kpis\teach_pass_duration_card%s\t%s" %
(gpu_num, ce_time))
print("kpis\ttrain_loss_card%s\t%f" %
(gpu_num, ce_loss))
print("kpis\ttrain_acc_card%s\t%f" %
(gpu_num, ce_acc))
def get_cards(args):
num = 0
cards = os.environ.get('CUDA_VISIBLE_DEVICES')
num = len(cards.split(","))
return num
if __name__ == "__main__":
train()
| 35.942529
| 108
| 0.633674
|
import numpy as np
import os
from functools import partial
import logging
import time
import paddle
import paddle.fluid as fluid
import argparse
import network
import reader
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger("fluid")
logger.setLevel(logging.INFO)
def parse_args():
parser = argparse.ArgumentParser("gnn")
parser.add_argument(
'--train_path', type=str, default='./data/diginetica/train.txt', help='dir of training data')
parser.add_argument(
'--config_path', type=str, default='./data/diginetica/config.txt', help='dir of config')
parser.add_argument(
'--model_path', type=str, default='./saved_model', help="path of model parameters")
parser.add_argument(
'--epoch_num', type=int, default=30, help='number of epochs to train for')
parser.add_argument(
'--batch_size', type=int, default=100, help='input batch size')
parser.add_argument(
'--hidden_size', type=int, default=100, help='hidden state size')
parser.add_argument(
'--l2', type=float, default=1e-5, help='l2 penalty')
parser.add_argument(
'--lr', type=float, default=0.001, help='learning rate')
parser.add_argument(
'--step', type=int, default=1, help='gnn propogation steps')
parser.add_argument(
'--lr_dc', type=float, default=0.1, help='learning rate decay rate')
parser.add_argument(
'--lr_dc_step', type=int, default=3, help='the number of steps after which the learning rate decay')
parser.add_argument(
'--use_cuda', type=int, default=0, help='whether to use gpu')
parser.add_argument(
'--use_parallel', type=int, default=1, help='whether to use parallel executor')
parser.add_argument(
'--enable_ce', action='store_true', help='If set, run the task with continuous evaluation logs.')
return parser.parse_args()
def train():
args = parse_args()
if args.enable_ce:
SEED = 102
fluid.default_main_program().random_seed = SEED
fluid.default_startup_program().random_seed = SEED
batch_size = args.batch_size
items_num = reader.read_config(args.config_path)
loss, acc, py_reader, feed_datas = network.network(items_num, args.hidden_size,
args.step)
data_reader = reader.Data(args.train_path, True)
logger.info("load data complete")
use_cuda = True if args.use_cuda else False
use_parallel = True if args.use_parallel else False
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
step_per_epoch = data_reader.length // batch_size
optimizer = fluid.optimizer.Adam(
learning_rate=fluid.layers.exponential_decay(
learning_rate=args.lr,
decay_steps=step_per_epoch * args.lr_dc_step,
decay_rate=args.lr_dc),
regularization=fluid.regularizer.L2DecayRegularizer(
regularization_coeff=args.l2))
optimizer.minimize(loss)
exe.run(fluid.default_startup_program())
all_vocab = fluid.global_scope().var("all_vocab").get_tensor()
all_vocab.set(
np.arange(1, items_num).astype("int64").reshape((-1, 1)), place)
feed_list = [e.name for e in feed_datas]
if use_parallel:
train_exe = fluid.ParallelExecutor(
use_cuda=use_cuda, loss_name=loss.name)
else:
train_exe = exe
logger.info("begin train")
total_time = []
ce_info = []
start_time = time.time()
loss_sum = 0.0
acc_sum = 0.0
global_step = 0
PRINT_STEP = 500
py_reader.decorate_paddle_reader(data_reader.reader(batch_size, batch_size * 20, True))
for i in range(args.epoch_num):
epoch_sum = []
py_reader.start()
try:
while True:
res = train_exe.run(fetch_list=[loss.name, acc.name])
loss_sum += res[0].mean()
acc_sum += res[1].mean()
epoch_sum.append(res[0].mean())
global_step += 1
if global_step % PRINT_STEP == 0:
ce_info.append([loss_sum / PRINT_STEP, acc_sum / PRINT_STEP])
total_time.append(time.time() - start_time)
logger.info("global_step: %d, loss: %.4lf, train_acc: %.4lf" % (
global_step, loss_sum / PRINT_STEP, acc_sum / PRINT_STEP))
loss_sum = 0.0
acc_sum = 0.0
start_time = time.time()
except fluid.core.EOFException:
py_reader.reset()
logger.info("epoch loss: %.4lf" % (np.mean(epoch_sum)))
save_dir = os.path.join(args.model_path, "epoch_" + str(i))
fetch_vars = [loss, acc]
fluid.io.save_inference_model(save_dir, feed_list, fetch_vars, exe)
logger.info("model saved in " + save_dir)
if args.enable_ce:
gpu_num = get_cards(args)
ce_loss = 0
ce_acc = 0
ce_time = 0
try:
ce_loss = ce_info[-1][0]
ce_acc = ce_info[-1][1]
ce_time = total_time[-1]
except:
print("ce info error")
print("kpis\teach_pass_duration_card%s\t%s" %
(gpu_num, ce_time))
print("kpis\ttrain_loss_card%s\t%f" %
(gpu_num, ce_loss))
print("kpis\ttrain_acc_card%s\t%f" %
(gpu_num, ce_acc))
def get_cards(args):
num = 0
cards = os.environ.get('CUDA_VISIBLE_DEVICES')
num = len(cards.split(","))
return num
if __name__ == "__main__":
train()
| true
| true
|
790d23e406815d4ab43ab292998c7832ebe46b4e
| 3,751
|
py
|
Python
|
examples/r2021_arxiv_qcase_benzyne/_instruct_22.py
|
damazz/HQCA
|
b013ba68f86e42350913c4abc2e1c91695a429b7
|
[
"Apache-2.0"
] | null | null | null |
examples/r2021_arxiv_qcase_benzyne/_instruct_22.py
|
damazz/HQCA
|
b013ba68f86e42350913c4abc2e1c91695a429b7
|
[
"Apache-2.0"
] | null | null | null |
examples/r2021_arxiv_qcase_benzyne/_instruct_22.py
|
damazz/HQCA
|
b013ba68f86e42350913c4abc2e1c91695a429b7
|
[
"Apache-2.0"
] | 1
|
2021-08-10T00:20:09.000Z
|
2021-08-10T00:20:09.000Z
|
import numpy as np
from hqca.core import *
from hqca.core.primitives import *
from hqca.tools import *
import sys
from numpy import sin as sin
from numpy import cos as cos
from copy import deepcopy as copy
class ExpPauli:
def __init__(self,vec):
v = np.asmatrix(vec)
if v.shape[0]>v.shape[1]:
v = v.T
if np.linalg.norm(v)==0:
self.iden=True
self.a = 0
self.v = v
else:
self.iden=False
self.a = np.linalg.norm(v)
self.v = v/self.a
def __mul__(self,w):
if self.iden:
return w
if w.iden:
return self
cc = np.cos(self.a)*np.cos(w.a)
cs = np.cos(self.a)*np.sin(w.a)
sc = np.sin(self.a)*np.cos(w.a)
ss = np.sin(self.a)*np.sin(w.a)
c = np.arccos(cc-np.dot(self.v,w.v.T)*ss)
k1 = self.v*sc
k2 = w.v*cs
k3 = -np.cross(self.v,w.v)*ss
k = (1/np.sin(c))*(k1+k2+k3)
return ExpPauli(c*k)
def __str__(self):
t = '||v||: {:.5f}, '.format(self.a)
t+= 'nx: {:+.5f}, '.format(self.v[0,0])
t+= 'ny: {:+.5f}, '.format(self.v[0,1])
t+= 'nz: {:+.5f}'.format(self.v[0,2])
return t
def matrix(self):
x = np.matrix([[0,1],[1,0]],dtype=np.complex_)
y = np.matrix([[0,-1j],[1j,0]],dtype=np.complex_)
z = np.matrix([[1,0],[0,-1]],dtype=np.complex_)
nx,ny,nz = self.v[0,0],self.v[0,1],self.v[0,2]
i = np.identity(2)
if self.iden:
return np.identity(2)
return np.cos(self.a)*i + (x*nx+y*ny+z*nz)*1j*np.sin(self.a)
def U3(self):
if self.iden:
return 0,0,0
A = np.sin(self.a)**2
nx,ny,nz = self.v[0,0],self.v[0,1],self.v[0,2]
part = nx**2+ny**2
vd = np.cos(self.a)+1j*nz*np.sin(self.a)
vo = (1j*nx-ny)*np.sin(self.a)
if abs(part-0)<=1e-10:
theta= 0
sigma = (1j*np.log(vd)).real
delta= 0
else:
theta = 2*np.arcsin(np.sqrt((nx**2+ny**2)*A))
aleph=-ny*np.sin(self.a)/np.sin(theta/2)
beta = nx*np.sin(self.a)/np.sin(theta/2)
delta = (-1j*np.log(vo/np.sin(theta/2))).real
sigma = (1j*np.log(vd/np.cos(theta/2))).real
return theta,sigma+delta,sigma-delta
class BenzyneInstruct(Instructions):
'''
type 1, 2 and 3
'''
def __init__(self,operator,
Nq,
propagate=False,
HamiltonianOperator=[],
scaleH=1,
**kw):
if not Nq==1:
sys.exit('Did not 1 qubit in instructions...')
para = np.array([0.0,0.0,0.0])
expS = ExpPauli(para)
for A in operator:
para = np.array([0.0,0.0,0.0])
for o in A:
if o.s=='X':
para[0]=np.imag(o.c)
elif o.s=='Y':
para[1]=np.imag(o.c)
elif o.s=='Z':
para[2]=np.imag(o.c)
expS = ExpPauli(para)*expS
#
paraH = np.array([0.0,0.0,0.0])
for o in HamiltonianOperator:
if o.s=='X':
paraH[0]= np.real(o.c)*scaleH
elif o.s=='Y':
paraH[1]=np.real(o.c)*scaleH
elif o.s=='Z':
paraH[2]=np.real(o.c)*scaleH
expiH = ExpPauli(paraH)
exp = expiH*expS
self._gates = [
[(exp,),self._U3]
]
@property
def gates(self):
return self._gates
@gates.setter
def gates(self,a):
self._gates = a
def _U3(self,Q,exp):
theta,phi,lamb = exp.U3()
Q.U3(0,theta,phi,lamb)
| 29.304688
| 68
| 0.466276
|
import numpy as np
from hqca.core import *
from hqca.core.primitives import *
from hqca.tools import *
import sys
from numpy import sin as sin
from numpy import cos as cos
from copy import deepcopy as copy
class ExpPauli:
def __init__(self,vec):
v = np.asmatrix(vec)
if v.shape[0]>v.shape[1]:
v = v.T
if np.linalg.norm(v)==0:
self.iden=True
self.a = 0
self.v = v
else:
self.iden=False
self.a = np.linalg.norm(v)
self.v = v/self.a
def __mul__(self,w):
if self.iden:
return w
if w.iden:
return self
cc = np.cos(self.a)*np.cos(w.a)
cs = np.cos(self.a)*np.sin(w.a)
sc = np.sin(self.a)*np.cos(w.a)
ss = np.sin(self.a)*np.sin(w.a)
c = np.arccos(cc-np.dot(self.v,w.v.T)*ss)
k1 = self.v*sc
k2 = w.v*cs
k3 = -np.cross(self.v,w.v)*ss
k = (1/np.sin(c))*(k1+k2+k3)
return ExpPauli(c*k)
def __str__(self):
t = '||v||: {:.5f}, '.format(self.a)
t+= 'nx: {:+.5f}, '.format(self.v[0,0])
t+= 'ny: {:+.5f}, '.format(self.v[0,1])
t+= 'nz: {:+.5f}'.format(self.v[0,2])
return t
def matrix(self):
x = np.matrix([[0,1],[1,0]],dtype=np.complex_)
y = np.matrix([[0,-1j],[1j,0]],dtype=np.complex_)
z = np.matrix([[1,0],[0,-1]],dtype=np.complex_)
nx,ny,nz = self.v[0,0],self.v[0,1],self.v[0,2]
i = np.identity(2)
if self.iden:
return np.identity(2)
return np.cos(self.a)*i + (x*nx+y*ny+z*nz)*1j*np.sin(self.a)
def U3(self):
if self.iden:
return 0,0,0
A = np.sin(self.a)**2
nx,ny,nz = self.v[0,0],self.v[0,1],self.v[0,2]
part = nx**2+ny**2
vd = np.cos(self.a)+1j*nz*np.sin(self.a)
vo = (1j*nx-ny)*np.sin(self.a)
if abs(part-0)<=1e-10:
theta= 0
sigma = (1j*np.log(vd)).real
delta= 0
else:
theta = 2*np.arcsin(np.sqrt((nx**2+ny**2)*A))
aleph=-ny*np.sin(self.a)/np.sin(theta/2)
beta = nx*np.sin(self.a)/np.sin(theta/2)
delta = (-1j*np.log(vo/np.sin(theta/2))).real
sigma = (1j*np.log(vd/np.cos(theta/2))).real
return theta,sigma+delta,sigma-delta
class BenzyneInstruct(Instructions):
def __init__(self,operator,
Nq,
propagate=False,
HamiltonianOperator=[],
scaleH=1,
**kw):
if not Nq==1:
sys.exit('Did not 1 qubit in instructions...')
para = np.array([0.0,0.0,0.0])
expS = ExpPauli(para)
for A in operator:
para = np.array([0.0,0.0,0.0])
for o in A:
if o.s=='X':
para[0]=np.imag(o.c)
elif o.s=='Y':
para[1]=np.imag(o.c)
elif o.s=='Z':
para[2]=np.imag(o.c)
expS = ExpPauli(para)*expS
paraH = np.array([0.0,0.0,0.0])
for o in HamiltonianOperator:
if o.s=='X':
paraH[0]= np.real(o.c)*scaleH
elif o.s=='Y':
paraH[1]=np.real(o.c)*scaleH
elif o.s=='Z':
paraH[2]=np.real(o.c)*scaleH
expiH = ExpPauli(paraH)
exp = expiH*expS
self._gates = [
[(exp,),self._U3]
]
@property
def gates(self):
return self._gates
@gates.setter
def gates(self,a):
self._gates = a
def _U3(self,Q,exp):
theta,phi,lamb = exp.U3()
Q.U3(0,theta,phi,lamb)
| true
| true
|
790d24e0781c70bc08d858d534ed2912997e3f80
| 6,626
|
py
|
Python
|
peering_manager/settings.py
|
amtypaldos/peering-manager
|
a5a90f108849874e9acaa6827552535fa250a60e
|
[
"Apache-2.0"
] | null | null | null |
peering_manager/settings.py
|
amtypaldos/peering-manager
|
a5a90f108849874e9acaa6827552535fa250a60e
|
[
"Apache-2.0"
] | null | null | null |
peering_manager/settings.py
|
amtypaldos/peering-manager
|
a5a90f108849874e9acaa6827552535fa250a60e
|
[
"Apache-2.0"
] | null | null | null |
# DO NOT EDIT THIS FILE!
#
# All configuration must be done in the `configuration.py` file.
# This file is part of the Peering Manager code and it will be overwritten with
# every code releases.
from __future__ import unicode_literals
import os
import socket
from django.contrib.messages import constants as messages
from django.core.exceptions import ImproperlyConfigured
try:
from peering_manager import configuration
except ImportError:
raise ImproperlyConfigured(
'Configuration file is not present. Please define peering_manager/configuration.py per the documentation.')
VERSION = '0.99-dev'
SECRET_KEY = getattr(configuration, 'SECRET_KEY', '')
ALLOWED_HOSTS = getattr(configuration, 'ALLOWED_HOSTS', [])
BASE_PATH = getattr(configuration, 'BASE_PATH', '')
if BASE_PATH:
BASE_PATH = BASE_PATH.strip('/') + '/' # Enforce trailing slash only
DEBUG = getattr(configuration, 'DEBUG', False)
LOGIN_REQUIRED = getattr(configuration, 'LOGIN_REQUIRED', False)
NAPALM_USERNAME = getattr(configuration, 'NAPALM_USERNAME', '')
NAPALM_PASSWORD = getattr(configuration, 'NAPALM_PASSWORD', '')
NAPALM_TIMEOUT = getattr(configuration, 'NAPALM_TIMEOUT', 30)
NAPALM_ARGS = getattr(configuration, 'NAPALM_ARGS', {})
PAGINATE_COUNT = getattr(configuration, 'PAGINATE_COUNT', 20)
TIME_ZONE = getattr(configuration, 'TIME_ZONE', 'UTC')
MY_ASN = getattr(configuration, 'MY_ASN', -1)
if MY_ASN == -1:
raise ImproperlyConfigured(
'The MY_ASN setting must be set to a valid AS number.')
# PeeringDB URLs
PEERINGDB_API = 'https://peeringdb.com/api/'
PEERINGDB = 'https://peeringdb.com/asn/'
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
try:
from peering_manager.ldap_config import *
LDAP_CONFIGURED = True
except ImportError:
LDAP_CONFIGURED = False
# If LDAP is configured, load the config
if LDAP_CONFIGURED:
try:
import ldap
import django_auth_ldap
# Prepend LDAPBackend to the default ModelBackend
AUTHENTICATION_BACKENDS = [
'django_auth_ldap.backend.LDAPBackend',
'django.contrib.auth.backends.ModelBackend',
]
except ImportError:
raise ImproperlyConfigured(
'LDAP authentication has been configured, but django-auth-ldap is not installed. You can remove peering_manager/ldap_config.py to disable LDAP.'
)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_filters',
'django_tables2',
'peering',
'peeringdb',
'utils',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'utils.middleware.RequireLoginMiddleware',
]
ROOT_URLCONF = 'peering_manager.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR + '/templates/'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'utils.context_processors.settings',
],
},
},
]
WSGI_APPLICATION = 'peering_manager.wsgi.application'
# Database
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Django logging
LOGGING = {
'version': 1,
'formatters': {
'simple': {
'format': '%(asctime)s | %(levelname)s | %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S',
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'simple',
},
'file': {
'class': 'logging.handlers.TimedRotatingFileHandler',
'filename': 'logs/peering-manager.log',
'when': 'midnight',
'interval': 1,
'backupCount': 5,
'formatter': 'simple',
},
'peeringdb_file': {
'class': 'logging.handlers.TimedRotatingFileHandler',
'filename': 'logs/peeringdb.log',
'when': 'midnight',
'interval': 1,
'backupCount': 5,
'formatter': 'simple',
},
'napalm_file': {
'class': 'logging.handlers.TimedRotatingFileHandler',
'filename': 'logs/napalm.log',
'when': 'midnight',
'interval': 1,
'backupCount': 5,
'formatter': 'simple',
},
},
'loggers': {
'peering.manager.peering': {
'handlers': ['file'],
'level': 'DEBUG',
},
'peering.manager.peeringdb': {
'handlers': ['peeringdb_file'],
'level': 'DEBUG',
},
'peering.manager.napalm': {
'handlers': ['napalm_file'],
'level': 'DEBUG',
},
}
}
# Internationalization
LANGUAGE_CODE = 'en-us'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Authentication URL
LOGIN_URL = '/{}login/'.format(BASE_PATH)
# Messages
MESSAGE_TAGS = {
messages.ERROR: 'danger',
}
# Static files (CSS, JavaScript, Images)
STATIC_ROOT = BASE_DIR + '/static/'
STATIC_URL = '/{}static/'.format(BASE_PATH)
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'project-static'),
)
# Django filters
FILTERS_NULL_CHOICE_LABEL = 'None'
FILTERS_NULL_CHOICE_VALUE = '0'
try:
HOSTNAME = socket.gethostname()
except Exception:
HOSTNAME = 'localhost'
| 28.195745
| 156
| 0.641111
|
from __future__ import unicode_literals
import os
import socket
from django.contrib.messages import constants as messages
from django.core.exceptions import ImproperlyConfigured
try:
from peering_manager import configuration
except ImportError:
raise ImproperlyConfigured(
'Configuration file is not present. Please define peering_manager/configuration.py per the documentation.')
VERSION = '0.99-dev'
SECRET_KEY = getattr(configuration, 'SECRET_KEY', '')
ALLOWED_HOSTS = getattr(configuration, 'ALLOWED_HOSTS', [])
BASE_PATH = getattr(configuration, 'BASE_PATH', '')
if BASE_PATH:
BASE_PATH = BASE_PATH.strip('/') + '/'
DEBUG = getattr(configuration, 'DEBUG', False)
LOGIN_REQUIRED = getattr(configuration, 'LOGIN_REQUIRED', False)
NAPALM_USERNAME = getattr(configuration, 'NAPALM_USERNAME', '')
NAPALM_PASSWORD = getattr(configuration, 'NAPALM_PASSWORD', '')
NAPALM_TIMEOUT = getattr(configuration, 'NAPALM_TIMEOUT', 30)
NAPALM_ARGS = getattr(configuration, 'NAPALM_ARGS', {})
PAGINATE_COUNT = getattr(configuration, 'PAGINATE_COUNT', 20)
TIME_ZONE = getattr(configuration, 'TIME_ZONE', 'UTC')
MY_ASN = getattr(configuration, 'MY_ASN', -1)
if MY_ASN == -1:
raise ImproperlyConfigured(
'The MY_ASN setting must be set to a valid AS number.')
PEERINGDB_API = 'https://peeringdb.com/api/'
PEERINGDB = 'https://peeringdb.com/asn/'
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
try:
from peering_manager.ldap_config import *
LDAP_CONFIGURED = True
except ImportError:
LDAP_CONFIGURED = False
if LDAP_CONFIGURED:
try:
import ldap
import django_auth_ldap
AUTHENTICATION_BACKENDS = [
'django_auth_ldap.backend.LDAPBackend',
'django.contrib.auth.backends.ModelBackend',
]
except ImportError:
raise ImproperlyConfigured(
'LDAP authentication has been configured, but django-auth-ldap is not installed. You can remove peering_manager/ldap_config.py to disable LDAP.'
)
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_filters',
'django_tables2',
'peering',
'peeringdb',
'utils',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'utils.middleware.RequireLoginMiddleware',
]
ROOT_URLCONF = 'peering_manager.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR + '/templates/'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'utils.context_processors.settings',
],
},
},
]
WSGI_APPLICATION = 'peering_manager.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LOGGING = {
'version': 1,
'formatters': {
'simple': {
'format': '%(asctime)s | %(levelname)s | %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S',
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'simple',
},
'file': {
'class': 'logging.handlers.TimedRotatingFileHandler',
'filename': 'logs/peering-manager.log',
'when': 'midnight',
'interval': 1,
'backupCount': 5,
'formatter': 'simple',
},
'peeringdb_file': {
'class': 'logging.handlers.TimedRotatingFileHandler',
'filename': 'logs/peeringdb.log',
'when': 'midnight',
'interval': 1,
'backupCount': 5,
'formatter': 'simple',
},
'napalm_file': {
'class': 'logging.handlers.TimedRotatingFileHandler',
'filename': 'logs/napalm.log',
'when': 'midnight',
'interval': 1,
'backupCount': 5,
'formatter': 'simple',
},
},
'loggers': {
'peering.manager.peering': {
'handlers': ['file'],
'level': 'DEBUG',
},
'peering.manager.peeringdb': {
'handlers': ['peeringdb_file'],
'level': 'DEBUG',
},
'peering.manager.napalm': {
'handlers': ['napalm_file'],
'level': 'DEBUG',
},
}
}
LANGUAGE_CODE = 'en-us'
USE_I18N = True
USE_L10N = True
USE_TZ = True
LOGIN_URL = '/{}login/'.format(BASE_PATH)
MESSAGE_TAGS = {
messages.ERROR: 'danger',
}
STATIC_ROOT = BASE_DIR + '/static/'
STATIC_URL = '/{}static/'.format(BASE_PATH)
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'project-static'),
)
FILTERS_NULL_CHOICE_LABEL = 'None'
FILTERS_NULL_CHOICE_VALUE = '0'
try:
HOSTNAME = socket.gethostname()
except Exception:
HOSTNAME = 'localhost'
| true
| true
|
790d24f66d9342c25b00bbbfdaf5613e710a751d
| 58,754
|
py
|
Python
|
tests/modeladmin/tests.py
|
vincepandolfo/django
|
67cf5efa31acb2916034afb15610b700695dfcb0
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 1
|
2017-01-11T06:27:15.000Z
|
2017-01-11T06:27:15.000Z
|
tests/modeladmin/tests.py
|
vincepandolfo/django
|
67cf5efa31acb2916034afb15610b700695dfcb0
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/modeladmin/tests.py
|
vincepandolfo/django
|
67cf5efa31acb2916034afb15610b700695dfcb0
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
from __future__ import unicode_literals
from datetime import date
from django import forms
from django.contrib.admin import BooleanFieldListFilter, SimpleListFilter
from django.contrib.admin.options import (
HORIZONTAL, VERTICAL, ModelAdmin, TabularInline,
)
from django.contrib.admin.sites import AdminSite
from django.contrib.admin.widgets import AdminDateWidget, AdminRadioSelect
from django.core.checks import Error
from django.forms.models import BaseModelFormSet
from django.forms.widgets import Select
from django.test import SimpleTestCase, TestCase
from django.utils import six
from .models import (
Band, Concert, ValidationTestInlineModel, ValidationTestModel,
)
class MockRequest(object):
pass
class MockSuperUser(object):
def has_perm(self, perm):
return True
request = MockRequest()
request.user = MockSuperUser()
class ModelAdminTests(TestCase):
def setUp(self):
self.band = Band.objects.create(
name='The Doors',
bio='',
sign_date=date(1965, 1, 1),
)
self.site = AdminSite()
# form/fields/fieldsets interaction ##############################
def test_default_fields(self):
ma = ModelAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['name', 'bio', 'sign_date'])
self.assertEqual(list(ma.get_fields(request)),
['name', 'bio', 'sign_date'])
self.assertEqual(list(ma.get_fields(request, self.band)),
['name', 'bio', 'sign_date'])
def test_default_fieldsets(self):
# fieldsets_add and fieldsets_change should return a special data structure that
# is used in the templates. They should generate the "right thing" whether we
# have specified a custom form, the fields argument, or nothing at all.
#
# Here's the default case. There are no custom form_add/form_change methods,
# no fields argument, and no fieldsets argument.
ma = ModelAdmin(Band, self.site)
self.assertEqual(ma.get_fieldsets(request),
[(None, {'fields': ['name', 'bio', 'sign_date']})])
self.assertEqual(ma.get_fieldsets(request, self.band),
[(None, {'fields': ['name', 'bio', 'sign_date']})])
def test_get_fieldsets(self):
# Test that get_fieldsets is called when figuring out form fields.
# Refs #18681.
class BandAdmin(ModelAdmin):
def get_fieldsets(self, request, obj=None):
return [(None, {'fields': ['name', 'bio']})]
ma = BandAdmin(Band, self.site)
form = ma.get_form(None)
self.assertEqual(form._meta.fields, ['name', 'bio'])
class InlineBandAdmin(TabularInline):
model = Concert
fk_name = 'main_band'
can_delete = False
def get_fieldsets(self, request, obj=None):
return [(None, {'fields': ['day', 'transport']})]
ma = InlineBandAdmin(Band, self.site)
form = ma.get_formset(None).form
self.assertEqual(form._meta.fields, ['day', 'transport'])
def test_lookup_allowed_allows_nonexistent_lookup(self):
"""
Ensure that a lookup_allowed allows a parameter
whose field lookup doesn't exist.
Refs #21129.
"""
class BandAdmin(ModelAdmin):
fields = ['name']
ma = BandAdmin(Band, self.site)
self.assertTrue(ma.lookup_allowed('name__nonexistent', 'test_value'))
def test_field_arguments(self):
# If we specify the fields argument, fieldsets_add and fieldsets_change should
# just stick the fields into a formsets structure and return it.
class BandAdmin(ModelAdmin):
fields = ['name']
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_fields(request)), ['name'])
self.assertEqual(list(ma.get_fields(request, self.band)), ['name'])
self.assertEqual(ma.get_fieldsets(request),
[(None, {'fields': ['name']})])
self.assertEqual(ma.get_fieldsets(request, self.band),
[(None, {'fields': ['name']})])
def test_field_arguments_restricted_on_form(self):
# If we specify fields or fieldsets, it should exclude fields on the Form class
# to the fields specified. This may cause errors to be raised in the db layer if
# required model fields aren't in fields/fieldsets, but that's preferable to
# ghost errors where you have a field in your Form class that isn't being
# displayed because you forgot to add it to fields/fieldsets
# Using `fields`.
class BandAdmin(ModelAdmin):
fields = ['name']
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['name'])
self.assertEqual(list(ma.get_form(request, self.band).base_fields),
['name'])
# Using `fieldsets`.
class BandAdmin(ModelAdmin):
fieldsets = [(None, {'fields': ['name']})]
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['name'])
self.assertEqual(list(ma.get_form(request, self.band).base_fields),
['name'])
# Using `exclude`.
class BandAdmin(ModelAdmin):
exclude = ['bio']
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['name', 'sign_date'])
# You can also pass a tuple to `exclude`.
class BandAdmin(ModelAdmin):
exclude = ('bio',)
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['name', 'sign_date'])
# Using `fields` and `exclude`.
class BandAdmin(ModelAdmin):
fields = ['name', 'bio']
exclude = ['bio']
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['name'])
def test_custom_form_meta_exclude_with_readonly(self):
"""
Ensure that the custom ModelForm's `Meta.exclude` is respected when
used in conjunction with `ModelAdmin.readonly_fields` and when no
`ModelAdmin.exclude` is defined.
Refs #14496.
"""
# First, with `ModelAdmin` -----------------------
class AdminBandForm(forms.ModelForm):
class Meta:
model = Band
exclude = ['bio']
class BandAdmin(ModelAdmin):
readonly_fields = ['name']
form = AdminBandForm
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['sign_date'])
# Then, with `InlineModelAdmin` -----------------
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ['day']
class ConcertInline(TabularInline):
readonly_fields = ['transport']
form = AdminConcertForm
fk_name = 'main_band'
model = Concert
class BandAdmin(ModelAdmin):
inlines = [
ConcertInline
]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['main_band', 'opening_band', 'id', 'DELETE'])
def test_custom_formfield_override_readonly(self):
class AdminBandForm(forms.ModelForm):
name = forms.CharField()
class Meta:
exclude = tuple()
model = Band
class BandAdmin(ModelAdmin):
form = AdminBandForm
readonly_fields = ['name']
ma = BandAdmin(Band, self.site)
# `name` shouldn't appear in base_fields because it's part of
# readonly_fields.
self.assertEqual(
list(ma.get_form(request).base_fields),
['bio', 'sign_date']
)
# But it should appear in get_fields()/fieldsets() so it can be
# displayed as read-only.
self.assertEqual(
list(ma.get_fields(request)),
['bio', 'sign_date', 'name']
)
self.assertEqual(
list(ma.get_fieldsets(request)),
[(None, {'fields': ['bio', 'sign_date', 'name']})]
)
def test_custom_form_meta_exclude(self):
"""
Ensure that the custom ModelForm's `Meta.exclude` is overridden if
`ModelAdmin.exclude` or `InlineModelAdmin.exclude` are defined.
Refs #14496.
"""
# First, with `ModelAdmin` -----------------------
class AdminBandForm(forms.ModelForm):
class Meta:
model = Band
exclude = ['bio']
class BandAdmin(ModelAdmin):
exclude = ['name']
form = AdminBandForm
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['bio', 'sign_date'])
# Then, with `InlineModelAdmin` -----------------
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ['day']
class ConcertInline(TabularInline):
exclude = ['transport']
form = AdminConcertForm
fk_name = 'main_band'
model = Concert
class BandAdmin(ModelAdmin):
inlines = [
ConcertInline
]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['main_band', 'opening_band', 'day', 'id', 'DELETE'])
def test_custom_form_validation(self):
# If we specify a form, it should use it allowing custom validation to work
# properly. This won't, however, break any of the admin widgets or media.
class AdminBandForm(forms.ModelForm):
delete = forms.BooleanField()
class BandAdmin(ModelAdmin):
form = AdminBandForm
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['name', 'bio', 'sign_date', 'delete'])
self.assertEqual(
type(ma.get_form(request).base_fields['sign_date'].widget),
AdminDateWidget)
def test_form_exclude_kwarg_override(self):
"""
Ensure that the `exclude` kwarg passed to `ModelAdmin.get_form()`
overrides all other declarations. Refs #8999.
"""
class AdminBandForm(forms.ModelForm):
class Meta:
model = Band
exclude = ['name']
class BandAdmin(ModelAdmin):
exclude = ['sign_date']
form = AdminBandForm
def get_form(self, request, obj=None, **kwargs):
kwargs['exclude'] = ['bio']
return super(BandAdmin, self).get_form(request, obj, **kwargs)
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['name', 'sign_date'])
def test_formset_exclude_kwarg_override(self):
"""
Ensure that the `exclude` kwarg passed to `InlineModelAdmin.get_formset()`
overrides all other declarations. Refs #8999.
"""
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ['day']
class ConcertInline(TabularInline):
exclude = ['transport']
form = AdminConcertForm
fk_name = 'main_band'
model = Concert
def get_formset(self, request, obj=None, **kwargs):
kwargs['exclude'] = ['opening_band']
return super(ConcertInline, self).get_formset(request, obj, **kwargs)
class BandAdmin(ModelAdmin):
inlines = [
ConcertInline
]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['main_band', 'day', 'transport', 'id', 'DELETE'])
def test_queryset_override(self):
# If we need to override the queryset of a ModelChoiceField in our custom form
# make sure that RelatedFieldWidgetWrapper doesn't mess that up.
band2 = Band(name='The Beatles', bio='', sign_date=date(1962, 1, 1))
band2.save()
class ConcertAdmin(ModelAdmin):
pass
ma = ConcertAdmin(Concert, self.site)
form = ma.get_form(request)()
self.assertHTMLEqual(str(form["main_band"]),
'<div class="related-widget-wrapper">'
'<select name="main_band" id="id_main_band">'
'<option value="" selected="selected">---------</option>'
'<option value="%d">The Beatles</option>'
'<option value="%d">The Doors</option>'
'</select></div>' % (band2.id, self.band.id))
class AdminConcertForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(AdminConcertForm, self).__init__(*args, **kwargs)
self.fields["main_band"].queryset = Band.objects.filter(name='The Doors')
class ConcertAdminWithForm(ModelAdmin):
form = AdminConcertForm
ma = ConcertAdminWithForm(Concert, self.site)
form = ma.get_form(request)()
self.assertHTMLEqual(str(form["main_band"]),
'<div class="related-widget-wrapper">'
'<select name="main_band" id="id_main_band">'
'<option value="" selected="selected">---------</option>'
'<option value="%d">The Doors</option>'
'</select></div>' % self.band.id)
def test_regression_for_ticket_15820(self):
"""
Ensure that `obj` is passed from `InlineModelAdmin.get_fieldsets()` to
`InlineModelAdmin.get_formset()`.
"""
class CustomConcertForm(forms.ModelForm):
class Meta:
model = Concert
fields = ['day']
class ConcertInline(TabularInline):
model = Concert
fk_name = 'main_band'
def get_formset(self, request, obj=None, **kwargs):
if obj:
kwargs['form'] = CustomConcertForm
return super(ConcertInline, self).get_formset(request, obj, **kwargs)
class BandAdmin(ModelAdmin):
inlines = [
ConcertInline
]
Concert.objects.create(main_band=self.band, opening_band=self.band, day=1)
ma = BandAdmin(Band, self.site)
inline_instances = ma.get_inline_instances(request)
fieldsets = list(inline_instances[0].get_fieldsets(request))
self.assertEqual(fieldsets[0][1]['fields'], ['main_band', 'opening_band', 'day', 'transport'])
fieldsets = list(inline_instances[0].get_fieldsets(request, inline_instances[0].model))
self.assertEqual(fieldsets[0][1]['fields'], ['day'])
# radio_fields behavior ###########################################
def test_default_foreign_key_widget(self):
# First, without any radio_fields specified, the widgets for ForeignKey
# and fields with choices specified ought to be a basic Select widget.
# ForeignKey widgets in the admin are wrapped with RelatedFieldWidgetWrapper so
# they need to be handled properly when type checking. For Select fields, all of
# the choices lists have a first entry of dashes.
cma = ModelAdmin(Concert, self.site)
cmafa = cma.get_form(request)
self.assertEqual(type(cmafa.base_fields['main_band'].widget.widget),
Select)
self.assertEqual(
list(cmafa.base_fields['main_band'].widget.choices),
[('', '---------'), (self.band.id, 'The Doors')])
self.assertEqual(
type(cmafa.base_fields['opening_band'].widget.widget), Select)
self.assertEqual(
list(cmafa.base_fields['opening_band'].widget.choices),
[('', '---------'), (self.band.id, 'The Doors')])
self.assertEqual(type(cmafa.base_fields['day'].widget), Select)
self.assertEqual(list(cmafa.base_fields['day'].widget.choices),
[('', '---------'), (1, 'Fri'), (2, 'Sat')])
self.assertEqual(type(cmafa.base_fields['transport'].widget),
Select)
self.assertEqual(
list(cmafa.base_fields['transport'].widget.choices),
[('', '---------'), (1, 'Plane'), (2, 'Train'), (3, 'Bus')])
def test_foreign_key_as_radio_field(self):
# Now specify all the fields as radio_fields. Widgets should now be
# RadioSelect, and the choices list should have a first entry of 'None' if
# blank=True for the model field. Finally, the widget should have the
# 'radiolist' attr, and 'inline' as well if the field is specified HORIZONTAL.
class ConcertAdmin(ModelAdmin):
radio_fields = {
'main_band': HORIZONTAL,
'opening_band': VERTICAL,
'day': VERTICAL,
'transport': HORIZONTAL,
}
cma = ConcertAdmin(Concert, self.site)
cmafa = cma.get_form(request)
self.assertEqual(type(cmafa.base_fields['main_band'].widget.widget),
AdminRadioSelect)
self.assertEqual(cmafa.base_fields['main_band'].widget.attrs,
{'class': 'radiolist inline'})
self.assertEqual(list(cmafa.base_fields['main_band'].widget.choices),
[(self.band.id, 'The Doors')])
self.assertEqual(
type(cmafa.base_fields['opening_band'].widget.widget),
AdminRadioSelect)
self.assertEqual(cmafa.base_fields['opening_band'].widget.attrs,
{'class': 'radiolist'})
self.assertEqual(
list(cmafa.base_fields['opening_band'].widget.choices),
[('', 'None'), (self.band.id, 'The Doors')])
self.assertEqual(type(cmafa.base_fields['day'].widget),
AdminRadioSelect)
self.assertEqual(cmafa.base_fields['day'].widget.attrs,
{'class': 'radiolist'})
self.assertEqual(list(cmafa.base_fields['day'].widget.choices),
[(1, 'Fri'), (2, 'Sat')])
self.assertEqual(type(cmafa.base_fields['transport'].widget),
AdminRadioSelect)
self.assertEqual(cmafa.base_fields['transport'].widget.attrs,
{'class': 'radiolist inline'})
self.assertEqual(list(cmafa.base_fields['transport'].widget.choices),
[('', 'None'), (1, 'Plane'), (2, 'Train'), (3, 'Bus')])
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ('transport',)
class ConcertAdmin(ModelAdmin):
form = AdminConcertForm
ma = ConcertAdmin(Concert, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['main_band', 'opening_band', 'day'])
class AdminConcertForm(forms.ModelForm):
extra = forms.CharField()
class Meta:
model = Concert
fields = ['extra', 'transport']
class ConcertAdmin(ModelAdmin):
form = AdminConcertForm
ma = ConcertAdmin(Concert, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['extra', 'transport'])
class ConcertInline(TabularInline):
form = AdminConcertForm
model = Concert
fk_name = 'main_band'
can_delete = True
class BandAdmin(ModelAdmin):
inlines = [
ConcertInline
]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['extra', 'transport', 'id', 'DELETE', 'main_band'])
class CheckTestCase(SimpleTestCase):
def assertIsInvalid(self, model_admin, model, msg,
id=None, hint=None, invalid_obj=None):
invalid_obj = invalid_obj or model_admin
admin_obj = model_admin(model, AdminSite())
errors = admin_obj.check()
expected = [
Error(
msg,
hint=hint,
obj=invalid_obj,
id=id,
)
]
self.assertEqual(errors, expected)
def assertIsInvalidRegexp(self, model_admin, model, msg,
id=None, hint=None, invalid_obj=None):
"""
Same as assertIsInvalid but treats the given msg as a regexp.
"""
invalid_obj = invalid_obj or model_admin
admin_obj = model_admin(model, AdminSite())
errors = admin_obj.check()
self.assertEqual(len(errors), 1)
error = errors[0]
self.assertEqual(error.hint, hint)
self.assertEqual(error.obj, invalid_obj)
self.assertEqual(error.id, id)
six.assertRegex(self, error.msg, msg)
def assertIsValid(self, model_admin, model):
admin_obj = model_admin(model, AdminSite())
errors = admin_obj.check()
expected = []
self.assertEqual(errors, expected)
class RawIdCheckTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'raw_id_fields' must be a list or tuple.",
'admin.E001')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = ('non_existent_field',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'raw_id_fields[0]' refers to 'non_existent_field', "
"which is not an attribute of 'modeladmin.ValidationTestModel'."),
'admin.E002')
def test_invalid_field_type(self):
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = ('name',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'raw_id_fields[0]' must be a foreign key or a many-to-many field.",
'admin.E003')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = ('users',)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class FieldsetsCheckTests(CheckTestCase):
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", {'fields': ('name',)}),)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'fieldsets' must be a list or tuple.",
'admin.E007')
def test_non_iterable_item(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = ({},)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'fieldsets[0]' must be a list or tuple.",
'admin.E008')
def test_item_not_a_pair(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = ((),)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'fieldsets[0]' must be of length 2.",
'admin.E009')
def test_second_element_of_item_not_a_dict(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", ()),)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'fieldsets[0][1]' must be a dictionary.",
'admin.E010')
def test_missing_fields_key(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", {}),)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'fieldsets[0][1]' must contain the key 'fields'.",
'admin.E011')
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", {'fields': ('name',)}),)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
def test_specified_both_fields_and_fieldsets(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", {'fields': ('name',)}),)
fields = ['name']
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"Both 'fieldsets' and 'fields' are specified.",
'admin.E005')
def test_duplicate_fields(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = [(None, {'fields': ['name', 'name']})]
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"There are duplicate field(s) in 'fieldsets[0][1]'.",
'admin.E012')
def test_fieldsets_with_custom_form_validation(self):
class BandAdmin(ModelAdmin):
fieldsets = (
('Band', {
'fields': ('name',)
}),
)
self.assertIsValid(BandAdmin, Band)
class FieldsCheckTests(CheckTestCase):
def test_duplicate_fields_in_fields(self):
class ValidationTestModelAdmin(ModelAdmin):
fields = ['name', 'name']
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'fields' contains duplicate field(s).",
'admin.E006')
def test_inline(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
fields = 10
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'fields' must be a list or tuple.",
'admin.E004',
invalid_obj=ValidationTestInline)
class FormCheckTests(CheckTestCase):
def test_invalid_type(self):
class FakeForm(object):
pass
class ValidationTestModelAdmin(ModelAdmin):
form = FakeForm
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'form' must inherit from 'BaseModelForm'.",
'admin.E016')
def test_fieldsets_with_custom_form_validation(self):
class BandAdmin(ModelAdmin):
fieldsets = (
('Band', {
'fields': ('name',)
}),
)
self.assertIsValid(BandAdmin, Band)
def test_valid_case(self):
class AdminBandForm(forms.ModelForm):
delete = forms.BooleanField()
class BandAdmin(ModelAdmin):
form = AdminBandForm
fieldsets = (
('Band', {
'fields': ('name', 'bio', 'sign_date', 'delete')
}),
)
self.assertIsValid(BandAdmin, Band)
class FilterVerticalCheckTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_vertical = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'filter_vertical' must be a list or tuple.",
'admin.E017')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_vertical = ('non_existent_field',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'filter_vertical[0]' refers to 'non_existent_field', "
"which is not an attribute of 'modeladmin.ValidationTestModel'."),
'admin.E019')
def test_invalid_field_type(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_vertical = ('name',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'filter_vertical[0]' must be a many-to-many field.",
'admin.E020')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_vertical = ("users",)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class FilterHorizontalCheckTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_horizontal = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'filter_horizontal' must be a list or tuple.",
'admin.E018')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_horizontal = ('non_existent_field',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'filter_horizontal[0]' refers to 'non_existent_field', "
"which is not an attribute of 'modeladmin.ValidationTestModel'."),
'admin.E019')
def test_invalid_field_type(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_horizontal = ('name',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'filter_horizontal[0]' must be a many-to-many field.",
'admin.E020')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_horizontal = ("users",)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class RadioFieldsCheckTests(CheckTestCase):
def test_not_dictionary(self):
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = ()
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'radio_fields' must be a dictionary.",
'admin.E021')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = {'non_existent_field': VERTICAL}
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'radio_fields' refers to 'non_existent_field', "
"which is not an attribute of 'modeladmin.ValidationTestModel'."),
'admin.E022')
def test_invalid_field_type(self):
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = {'name': VERTICAL}
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'radio_fields' refers to 'name', which is not an instance "
"of ForeignKey, and does not have a 'choices' definition."),
'admin.E023')
def test_invalid_value(self):
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = {"state": None}
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'radio_fields[\"state\"]' must be either admin.HORIZONTAL or admin.VERTICAL.",
'admin.E024')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = {"state": VERTICAL}
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class PrepopulatedFieldsCheckTests(CheckTestCase):
def test_not_dictionary(self):
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = ()
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'prepopulated_fields' must be a dictionary.",
'admin.E026')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = {'non_existent_field': ("slug",)}
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'prepopulated_fields' refers to 'non_existent_field', "
"which is not an attribute of 'modeladmin.ValidationTestModel'."),
'admin.E027')
def test_missing_field_again(self):
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = {"slug": ('non_existent_field',)}
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'prepopulated_fields[\"slug\"][0]' refers to 'non_existent_field', "
"which is not an attribute of 'modeladmin.ValidationTestModel'."),
'admin.E030')
def test_invalid_field_type(self):
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = {"users": ('name',)}
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'prepopulated_fields' refers to 'users', which must not be "
"a DateTimeField, a foreign key, or a many-to-many field."),
'admin.E028')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = {"slug": ('name',)}
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class ListDisplayTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_display' must be a list or tuple.",
'admin.E107')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display = ('non_existent_field',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'list_display[0]' refers to 'non_existent_field', which is not a callable, an attribute "
"of 'ValidationTestModelAdmin', or an attribute or method on 'modeladmin.ValidationTestModel'."),
'admin.E108')
def test_invalid_field_type(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display = ('users',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_display[0]' must not be a many-to-many field.",
'admin.E109')
def test_valid_case(self):
def a_callable(obj):
pass
class ValidationTestModelAdmin(ModelAdmin):
def a_method(self, obj):
pass
list_display = ('name', 'decade_published_in', 'a_method', a_callable)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class ListDisplayLinksCheckTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display_links = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_display_links' must be a list, a tuple, or None.",
'admin.E110')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display_links = ('non_existent_field',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel, (
"The value of 'list_display_links[0]' refers to "
"'non_existent_field', which is not defined in 'list_display'."
), 'admin.E111'
)
def test_missing_in_list_display(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display_links = ('name',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_display_links[0]' refers to 'name', which is not defined in 'list_display'.",
'admin.E111')
def test_valid_case(self):
def a_callable(obj):
pass
class ValidationTestModelAdmin(ModelAdmin):
def a_method(self, obj):
pass
list_display = ('name', 'decade_published_in', 'a_method', a_callable)
list_display_links = ('name', 'decade_published_in', 'a_method', a_callable)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
def test_None_is_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display_links = None
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class ListFilterTests(CheckTestCase):
def test_list_filter_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
list_filter = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_filter' must be a list or tuple.",
'admin.E112')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
list_filter = ('non_existent_field',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_filter[0]' refers to 'non_existent_field', which does not refer to a Field.",
'admin.E116')
def test_not_filter(self):
class RandomClass(object):
pass
class ValidationTestModelAdmin(ModelAdmin):
list_filter = (RandomClass,)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_filter[0]' must inherit from 'ListFilter'.",
'admin.E113')
def test_not_filter_again(self):
class RandomClass(object):
pass
class ValidationTestModelAdmin(ModelAdmin):
list_filter = (('is_active', RandomClass),)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_filter[0][1]' must inherit from 'FieldListFilter'.",
'admin.E115')
def test_not_filter_again_again(self):
class AwesomeFilter(SimpleListFilter):
def get_title(self):
return 'awesomeness'
def get_choices(self, request):
return (('bit', 'A bit awesome'), ('very', 'Very awesome'), )
def get_queryset(self, cl, qs):
return qs
class ValidationTestModelAdmin(ModelAdmin):
list_filter = (('is_active', AwesomeFilter),)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_filter[0][1]' must inherit from 'FieldListFilter'.",
'admin.E115')
def test_not_associated_with_field_name(self):
class ValidationTestModelAdmin(ModelAdmin):
list_filter = (BooleanFieldListFilter,)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_filter[0]' must not inherit from 'FieldListFilter'.",
'admin.E114')
def test_valid_case(self):
class AwesomeFilter(SimpleListFilter):
def get_title(self):
return 'awesomeness'
def get_choices(self, request):
return (('bit', 'A bit awesome'), ('very', 'Very awesome'), )
def get_queryset(self, cl, qs):
return qs
class ValidationTestModelAdmin(ModelAdmin):
list_filter = ('is_active', AwesomeFilter, ('is_active', BooleanFieldListFilter), 'no')
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class ListPerPageCheckTests(CheckTestCase):
def test_not_integer(self):
class ValidationTestModelAdmin(ModelAdmin):
list_per_page = 'hello'
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_per_page' must be an integer.",
'admin.E118')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
list_per_page = 100
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class ListMaxShowAllCheckTests(CheckTestCase):
def test_not_integer(self):
class ValidationTestModelAdmin(ModelAdmin):
list_max_show_all = 'hello'
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_max_show_all' must be an integer.",
'admin.E119')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
list_max_show_all = 200
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class SearchFieldsCheckTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
search_fields = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'search_fields' must be a list or tuple.",
'admin.E126')
class DateHierarchyCheckTests(CheckTestCase):
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
date_hierarchy = 'non_existent_field'
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'date_hierarchy' refers to 'non_existent_field', which "
"is not an attribute of 'modeladmin.ValidationTestModel'."),
'admin.E127')
def test_invalid_field_type(self):
class ValidationTestModelAdmin(ModelAdmin):
date_hierarchy = 'name'
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'date_hierarchy' must be a DateField or DateTimeField.",
'admin.E128')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
date_hierarchy = 'pub_date'
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class OrderingCheckTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
ordering = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'ordering' must be a list or tuple.",
'admin.E031'
)
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('non_existent_field',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'ordering[0]' refers to 'non_existent_field', "
"which is not an attribute of 'modeladmin.ValidationTestModel'.",
'admin.E033'
)
def test_random_marker_not_alone(self):
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('?', 'name')
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'ordering' has the random ordering marker '?', but contains "
"other fields as well.",
'admin.E032',
hint='Either remove the "?", or remove the other fields.'
)
def test_valid_random_marker_case(self):
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('?',)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
def test_valid_complex_case(self):
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('band__name',)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('name',)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class ListSelectRelatedCheckTests(CheckTestCase):
def test_invalid_type(self):
class ValidationTestModelAdmin(ModelAdmin):
list_select_related = 1
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_select_related' must be a boolean, tuple or list.",
'admin.E117')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
list_select_related = False
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class SaveAsCheckTests(CheckTestCase):
def test_not_boolean(self):
class ValidationTestModelAdmin(ModelAdmin):
save_as = 1
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'save_as' must be a boolean.",
'admin.E101')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
save_as = True
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class SaveOnTopCheckTests(CheckTestCase):
def test_not_boolean(self):
class ValidationTestModelAdmin(ModelAdmin):
save_on_top = 1
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'save_on_top' must be a boolean.",
'admin.E102')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
save_on_top = True
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class InlinesCheckTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
inlines = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'inlines' must be a list or tuple.",
'admin.E103')
def test_not_model_admin(self):
class ValidationTestInline(object):
pass
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalidRegexp(
ValidationTestModelAdmin, ValidationTestModel,
r"'.*\.ValidationTestInline' must inherit from 'BaseModelAdmin'\.",
'admin.E104')
def test_missing_model_field(self):
class ValidationTestInline(TabularInline):
pass
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalidRegexp(
ValidationTestModelAdmin, ValidationTestModel,
r"'.*\.ValidationTestInline' must have a 'model' attribute\.",
'admin.E105')
def test_invalid_model_type(self):
""" Test if `model` attribute on inline model admin is a models.Model.
"""
class SomethingBad(object):
pass
class ValidationTestInline(TabularInline):
model = SomethingBad
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalidRegexp(
ValidationTestModelAdmin, ValidationTestModel,
r"The value of '.*\.ValidationTestInline.model' must be a Model\.",
'admin.E106')
def test_valid_case(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class FkNameCheckTests(CheckTestCase):
def test_missing_field(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
fk_name = 'non_existent_field'
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"'modeladmin.ValidationTestInlineModel' has no field named 'non_existent_field'.",
'admin.E202',
invalid_obj=ValidationTestInline)
def test_valid_case(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
fk_name = "parent"
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class ExtraCheckTests(CheckTestCase):
def test_not_integer(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
extra = "hello"
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'extra' must be an integer.",
'admin.E203',
invalid_obj=ValidationTestInline)
def test_valid_case(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
extra = 2
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class MaxNumCheckTests(CheckTestCase):
def test_not_integer(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
max_num = "hello"
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'max_num' must be an integer.",
'admin.E204',
invalid_obj=ValidationTestInline)
def test_valid_case(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
max_num = 2
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class MinNumCheckTests(CheckTestCase):
def test_not_integer(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
min_num = "hello"
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'min_num' must be an integer.",
'admin.E205',
invalid_obj=ValidationTestInline)
def test_valid_case(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
min_num = 2
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class FormsetCheckTests(CheckTestCase):
def test_invalid_type(self):
class FakeFormSet(object):
pass
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
formset = FakeFormSet
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'formset' must inherit from 'BaseModelFormSet'.",
'admin.E206',
invalid_obj=ValidationTestInline)
def test_valid_case(self):
class RealModelFormSet(BaseModelFormSet):
pass
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
formset = RealModelFormSet
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class ListDisplayEditableTests(CheckTestCase):
def test_list_display_links_is_none(self):
"""
list_display and list_editable can contain the same values
when list_display_links is None
"""
class ProductAdmin(ModelAdmin):
list_display = ['name', 'slug', 'pub_date']
list_editable = list_display
list_display_links = None
self.assertIsValid(ProductAdmin, ValidationTestModel)
def test_list_display_first_item_same_as_list_editable_first_item(self):
"""
The first item in list_display can be the same as the first in
list_editable.
"""
class ProductAdmin(ModelAdmin):
list_display = ['name', 'slug', 'pub_date']
list_editable = ['name', 'slug']
list_display_links = ['pub_date']
self.assertIsValid(ProductAdmin, ValidationTestModel)
def test_list_display_first_item_in_list_editable(self):
"""
The first item in list_display can be in list_editable as long as
list_display_links is defined.
"""
class ProductAdmin(ModelAdmin):
list_display = ['name', 'slug', 'pub_date']
list_editable = ['slug', 'name']
list_display_links = ['pub_date']
self.assertIsValid(ProductAdmin, ValidationTestModel)
def test_list_display_first_item_same_as_list_editable_no_list_display_links(self):
"""
The first item in list_display cannot be the same as the first item
in list_editable if list_display_links is not defined.
"""
class ProductAdmin(ModelAdmin):
list_display = ['name']
list_editable = ['name']
self.assertIsInvalid(
ProductAdmin, ValidationTestModel,
"The value of 'list_editable[0]' refers to the first field "
"in 'list_display' ('name'), which cannot be used unless "
"'list_display_links' is set.",
id='admin.E124',
)
def test_list_display_first_item_in_list_editable_no_list_display_links(self):
"""
The first item in list_display cannot be in list_editable if
list_display_links isn't defined.
"""
class ProductAdmin(ModelAdmin):
list_display = ['name', 'slug', 'pub_date']
list_editable = ['slug', 'name']
self.assertIsInvalid(
ProductAdmin, ValidationTestModel,
"The value of 'list_editable[1]' refers to the first field "
"in 'list_display' ('name'), which cannot be used unless "
"'list_display_links' is set.",
id='admin.E124',
)
class ModelAdminPermissionTests(SimpleTestCase):
class MockUser(object):
def has_module_perms(self, app_label):
if app_label == "modeladmin":
return True
return False
class MockAddUser(MockUser):
def has_perm(self, perm):
if perm == "modeladmin.add_band":
return True
return False
class MockChangeUser(MockUser):
def has_perm(self, perm):
if perm == "modeladmin.change_band":
return True
return False
class MockDeleteUser(MockUser):
def has_perm(self, perm):
if perm == "modeladmin.delete_band":
return True
return False
def test_has_add_permission(self):
"""
Ensure that has_add_permission returns True for users who can add
objects and False for users who can't.
"""
ma = ModelAdmin(Band, AdminSite())
request = MockRequest()
request.user = self.MockAddUser()
self.assertTrue(ma.has_add_permission(request))
request.user = self.MockChangeUser()
self.assertFalse(ma.has_add_permission(request))
request.user = self.MockDeleteUser()
self.assertFalse(ma.has_add_permission(request))
def test_has_change_permission(self):
"""
Ensure that has_change_permission returns True for users who can edit
objects and False for users who can't.
"""
ma = ModelAdmin(Band, AdminSite())
request = MockRequest()
request.user = self.MockAddUser()
self.assertFalse(ma.has_change_permission(request))
request.user = self.MockChangeUser()
self.assertTrue(ma.has_change_permission(request))
request.user = self.MockDeleteUser()
self.assertFalse(ma.has_change_permission(request))
def test_has_delete_permission(self):
"""
Ensure that has_delete_permission returns True for users who can delete
objects and False for users who can't.
"""
ma = ModelAdmin(Band, AdminSite())
request = MockRequest()
request.user = self.MockAddUser()
self.assertFalse(ma.has_delete_permission(request))
request.user = self.MockChangeUser()
self.assertFalse(ma.has_delete_permission(request))
request.user = self.MockDeleteUser()
self.assertTrue(ma.has_delete_permission(request))
def test_has_module_permission(self):
"""
Ensure that has_module_permission returns True for users who have any
permission for the module and False for users who don't.
"""
ma = ModelAdmin(Band, AdminSite())
request = MockRequest()
request.user = self.MockAddUser()
self.assertTrue(ma.has_module_permission(request))
request.user = self.MockChangeUser()
self.assertTrue(ma.has_module_permission(request))
request.user = self.MockDeleteUser()
self.assertTrue(ma.has_module_permission(request))
original_app_label = ma.opts.app_label
ma.opts.app_label = 'anotherapp'
try:
request.user = self.MockAddUser()
self.assertFalse(ma.has_module_permission(request))
request.user = self.MockChangeUser()
self.assertFalse(ma.has_module_permission(request))
request.user = self.MockDeleteUser()
self.assertFalse(ma.has_module_permission(request))
finally:
ma.opts.app_label = original_app_label
| 34.480047
| 116
| 0.622187
|
from __future__ import unicode_literals
from datetime import date
from django import forms
from django.contrib.admin import BooleanFieldListFilter, SimpleListFilter
from django.contrib.admin.options import (
HORIZONTAL, VERTICAL, ModelAdmin, TabularInline,
)
from django.contrib.admin.sites import AdminSite
from django.contrib.admin.widgets import AdminDateWidget, AdminRadioSelect
from django.core.checks import Error
from django.forms.models import BaseModelFormSet
from django.forms.widgets import Select
from django.test import SimpleTestCase, TestCase
from django.utils import six
from .models import (
Band, Concert, ValidationTestInlineModel, ValidationTestModel,
)
class MockRequest(object):
pass
class MockSuperUser(object):
def has_perm(self, perm):
return True
request = MockRequest()
request.user = MockSuperUser()
class ModelAdminTests(TestCase):
def setUp(self):
self.band = Band.objects.create(
name='The Doors',
bio='',
sign_date=date(1965, 1, 1),
)
self.site = AdminSite()
# no fields argument, and no fieldsets argument.
ma = ModelAdmin(Band, self.site)
self.assertEqual(ma.get_fieldsets(request),
[(None, {'fields': ['name', 'bio', 'sign_date']})])
self.assertEqual(ma.get_fieldsets(request, self.band),
[(None, {'fields': ['name', 'bio', 'sign_date']})])
def test_get_fieldsets(self):
# Test that get_fieldsets is called when figuring out form fields.
# Refs #18681.
class BandAdmin(ModelAdmin):
def get_fieldsets(self, request, obj=None):
return [(None, {'fields': ['name', 'bio']})]
ma = BandAdmin(Band, self.site)
form = ma.get_form(None)
self.assertEqual(form._meta.fields, ['name', 'bio'])
class InlineBandAdmin(TabularInline):
model = Concert
fk_name = 'main_band'
can_delete = False
def get_fieldsets(self, request, obj=None):
return [(None, {'fields': ['day', 'transport']})]
ma = InlineBandAdmin(Band, self.site)
form = ma.get_formset(None).form
self.assertEqual(form._meta.fields, ['day', 'transport'])
def test_lookup_allowed_allows_nonexistent_lookup(self):
class BandAdmin(ModelAdmin):
fields = ['name']
ma = BandAdmin(Band, self.site)
self.assertTrue(ma.lookup_allowed('name__nonexistent', 'test_value'))
def test_field_arguments(self):
# If we specify the fields argument, fieldsets_add and fieldsets_change should
# just stick the fields into a formsets structure and return it.
class BandAdmin(ModelAdmin):
fields = ['name']
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_fields(request)), ['name'])
self.assertEqual(list(ma.get_fields(request, self.band)), ['name'])
self.assertEqual(ma.get_fieldsets(request),
[(None, {'fields': ['name']})])
self.assertEqual(ma.get_fieldsets(request, self.band),
[(None, {'fields': ['name']})])
def test_field_arguments_restricted_on_form(self):
# If we specify fields or fieldsets, it should exclude fields on the Form class
# to the fields specified. This may cause errors to be raised in the db layer if
# required model fields aren't in fields/fieldsets, but that's preferable to
# ghost errors where you have a field in your Form class that isn't being
class BandAdmin(ModelAdmin):
fields = ['name']
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['name'])
self.assertEqual(list(ma.get_form(request, self.band).base_fields),
['name'])
class BandAdmin(ModelAdmin):
fieldsets = [(None, {'fields': ['name']})]
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['name'])
self.assertEqual(list(ma.get_form(request, self.band).base_fields),
['name'])
class BandAdmin(ModelAdmin):
exclude = ['bio']
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['name', 'sign_date'])
class BandAdmin(ModelAdmin):
exclude = ('bio',)
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['name', 'sign_date'])
class BandAdmin(ModelAdmin):
fields = ['name', 'bio']
exclude = ['bio']
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['name'])
def test_custom_form_meta_exclude_with_readonly(self):
class AdminBandForm(forms.ModelForm):
class Meta:
model = Band
exclude = ['bio']
class BandAdmin(ModelAdmin):
readonly_fields = ['name']
form = AdminBandForm
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['sign_date'])
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ['day']
class ConcertInline(TabularInline):
readonly_fields = ['transport']
form = AdminConcertForm
fk_name = 'main_band'
model = Concert
class BandAdmin(ModelAdmin):
inlines = [
ConcertInline
]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['main_band', 'opening_band', 'id', 'DELETE'])
def test_custom_formfield_override_readonly(self):
class AdminBandForm(forms.ModelForm):
name = forms.CharField()
class Meta:
exclude = tuple()
model = Band
class BandAdmin(ModelAdmin):
form = AdminBandForm
readonly_fields = ['name']
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(ma.get_form(request).base_fields),
['bio', 'sign_date']
)
self.assertEqual(
list(ma.get_fields(request)),
['bio', 'sign_date', 'name']
)
self.assertEqual(
list(ma.get_fieldsets(request)),
[(None, {'fields': ['bio', 'sign_date', 'name']})]
)
def test_custom_form_meta_exclude(self):
class AdminBandForm(forms.ModelForm):
class Meta:
model = Band
exclude = ['bio']
class BandAdmin(ModelAdmin):
exclude = ['name']
form = AdminBandForm
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['bio', 'sign_date'])
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ['day']
class ConcertInline(TabularInline):
exclude = ['transport']
form = AdminConcertForm
fk_name = 'main_band'
model = Concert
class BandAdmin(ModelAdmin):
inlines = [
ConcertInline
]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['main_band', 'opening_band', 'day', 'id', 'DELETE'])
def test_custom_form_validation(self):
class AdminBandForm(forms.ModelForm):
delete = forms.BooleanField()
class BandAdmin(ModelAdmin):
form = AdminBandForm
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['name', 'bio', 'sign_date', 'delete'])
self.assertEqual(
type(ma.get_form(request).base_fields['sign_date'].widget),
AdminDateWidget)
def test_form_exclude_kwarg_override(self):
class AdminBandForm(forms.ModelForm):
class Meta:
model = Band
exclude = ['name']
class BandAdmin(ModelAdmin):
exclude = ['sign_date']
form = AdminBandForm
def get_form(self, request, obj=None, **kwargs):
kwargs['exclude'] = ['bio']
return super(BandAdmin, self).get_form(request, obj, **kwargs)
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['name', 'sign_date'])
def test_formset_exclude_kwarg_override(self):
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ['day']
class ConcertInline(TabularInline):
exclude = ['transport']
form = AdminConcertForm
fk_name = 'main_band'
model = Concert
def get_formset(self, request, obj=None, **kwargs):
kwargs['exclude'] = ['opening_band']
return super(ConcertInline, self).get_formset(request, obj, **kwargs)
class BandAdmin(ModelAdmin):
inlines = [
ConcertInline
]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['main_band', 'day', 'transport', 'id', 'DELETE'])
def test_queryset_override(self):
# If we need to override the queryset of a ModelChoiceField in our custom form
# make sure that RelatedFieldWidgetWrapper doesn't mess that up.
band2 = Band(name='The Beatles', bio='', sign_date=date(1962, 1, 1))
band2.save()
class ConcertAdmin(ModelAdmin):
pass
ma = ConcertAdmin(Concert, self.site)
form = ma.get_form(request)()
self.assertHTMLEqual(str(form["main_band"]),
'<div class="related-widget-wrapper">'
'<select name="main_band" id="id_main_band">'
'<option value="" selected="selected">---------</option>'
'<option value="%d">The Beatles</option>'
'<option value="%d">The Doors</option>'
'</select></div>' % (band2.id, self.band.id))
class AdminConcertForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(AdminConcertForm, self).__init__(*args, **kwargs)
self.fields["main_band"].queryset = Band.objects.filter(name='The Doors')
class ConcertAdminWithForm(ModelAdmin):
form = AdminConcertForm
ma = ConcertAdminWithForm(Concert, self.site)
form = ma.get_form(request)()
self.assertHTMLEqual(str(form["main_band"]),
'<div class="related-widget-wrapper">'
'<select name="main_band" id="id_main_band">'
'<option value="" selected="selected">---------</option>'
'<option value="%d">The Doors</option>'
'</select></div>' % self.band.id)
def test_regression_for_ticket_15820(self):
class CustomConcertForm(forms.ModelForm):
class Meta:
model = Concert
fields = ['day']
class ConcertInline(TabularInline):
model = Concert
fk_name = 'main_band'
def get_formset(self, request, obj=None, **kwargs):
if obj:
kwargs['form'] = CustomConcertForm
return super(ConcertInline, self).get_formset(request, obj, **kwargs)
class BandAdmin(ModelAdmin):
inlines = [
ConcertInline
]
Concert.objects.create(main_band=self.band, opening_band=self.band, day=1)
ma = BandAdmin(Band, self.site)
inline_instances = ma.get_inline_instances(request)
fieldsets = list(inline_instances[0].get_fieldsets(request))
self.assertEqual(fieldsets[0][1]['fields'], ['main_band', 'opening_band', 'day', 'transport'])
fieldsets = list(inline_instances[0].get_fieldsets(request, inline_instances[0].model))
self.assertEqual(fieldsets[0][1]['fields'], ['day'])
rt'].widget),
Select)
self.assertEqual(
list(cmafa.base_fields['transport'].widget.choices),
[('', '---------'), (1, 'Plane'), (2, 'Train'), (3, 'Bus')])
def test_foreign_key_as_radio_field(self):
class ConcertAdmin(ModelAdmin):
radio_fields = {
'main_band': HORIZONTAL,
'opening_band': VERTICAL,
'day': VERTICAL,
'transport': HORIZONTAL,
}
cma = ConcertAdmin(Concert, self.site)
cmafa = cma.get_form(request)
self.assertEqual(type(cmafa.base_fields['main_band'].widget.widget),
AdminRadioSelect)
self.assertEqual(cmafa.base_fields['main_band'].widget.attrs,
{'class': 'radiolist inline'})
self.assertEqual(list(cmafa.base_fields['main_band'].widget.choices),
[(self.band.id, 'The Doors')])
self.assertEqual(
type(cmafa.base_fields['opening_band'].widget.widget),
AdminRadioSelect)
self.assertEqual(cmafa.base_fields['opening_band'].widget.attrs,
{'class': 'radiolist'})
self.assertEqual(
list(cmafa.base_fields['opening_band'].widget.choices),
[('', 'None'), (self.band.id, 'The Doors')])
self.assertEqual(type(cmafa.base_fields['day'].widget),
AdminRadioSelect)
self.assertEqual(cmafa.base_fields['day'].widget.attrs,
{'class': 'radiolist'})
self.assertEqual(list(cmafa.base_fields['day'].widget.choices),
[(1, 'Fri'), (2, 'Sat')])
self.assertEqual(type(cmafa.base_fields['transport'].widget),
AdminRadioSelect)
self.assertEqual(cmafa.base_fields['transport'].widget.attrs,
{'class': 'radiolist inline'})
self.assertEqual(list(cmafa.base_fields['transport'].widget.choices),
[('', 'None'), (1, 'Plane'), (2, 'Train'), (3, 'Bus')])
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ('transport',)
class ConcertAdmin(ModelAdmin):
form = AdminConcertForm
ma = ConcertAdmin(Concert, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['main_band', 'opening_band', 'day'])
class AdminConcertForm(forms.ModelForm):
extra = forms.CharField()
class Meta:
model = Concert
fields = ['extra', 'transport']
class ConcertAdmin(ModelAdmin):
form = AdminConcertForm
ma = ConcertAdmin(Concert, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['extra', 'transport'])
class ConcertInline(TabularInline):
form = AdminConcertForm
model = Concert
fk_name = 'main_band'
can_delete = True
class BandAdmin(ModelAdmin):
inlines = [
ConcertInline
]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['extra', 'transport', 'id', 'DELETE', 'main_band'])
class CheckTestCase(SimpleTestCase):
def assertIsInvalid(self, model_admin, model, msg,
id=None, hint=None, invalid_obj=None):
invalid_obj = invalid_obj or model_admin
admin_obj = model_admin(model, AdminSite())
errors = admin_obj.check()
expected = [
Error(
msg,
hint=hint,
obj=invalid_obj,
id=id,
)
]
self.assertEqual(errors, expected)
def assertIsInvalidRegexp(self, model_admin, model, msg,
id=None, hint=None, invalid_obj=None):
invalid_obj = invalid_obj or model_admin
admin_obj = model_admin(model, AdminSite())
errors = admin_obj.check()
self.assertEqual(len(errors), 1)
error = errors[0]
self.assertEqual(error.hint, hint)
self.assertEqual(error.obj, invalid_obj)
self.assertEqual(error.id, id)
six.assertRegex(self, error.msg, msg)
def assertIsValid(self, model_admin, model):
admin_obj = model_admin(model, AdminSite())
errors = admin_obj.check()
expected = []
self.assertEqual(errors, expected)
class RawIdCheckTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'raw_id_fields' must be a list or tuple.",
'admin.E001')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = ('non_existent_field',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'raw_id_fields[0]' refers to 'non_existent_field', "
"which is not an attribute of 'modeladmin.ValidationTestModel'."),
'admin.E002')
def test_invalid_field_type(self):
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = ('name',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'raw_id_fields[0]' must be a foreign key or a many-to-many field.",
'admin.E003')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = ('users',)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class FieldsetsCheckTests(CheckTestCase):
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", {'fields': ('name',)}),)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'fieldsets' must be a list or tuple.",
'admin.E007')
def test_non_iterable_item(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = ({},)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'fieldsets[0]' must be a list or tuple.",
'admin.E008')
def test_item_not_a_pair(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = ((),)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'fieldsets[0]' must be of length 2.",
'admin.E009')
def test_second_element_of_item_not_a_dict(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", ()),)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'fieldsets[0][1]' must be a dictionary.",
'admin.E010')
def test_missing_fields_key(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", {}),)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'fieldsets[0][1]' must contain the key 'fields'.",
'admin.E011')
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", {'fields': ('name',)}),)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
def test_specified_both_fields_and_fieldsets(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", {'fields': ('name',)}),)
fields = ['name']
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"Both 'fieldsets' and 'fields' are specified.",
'admin.E005')
def test_duplicate_fields(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = [(None, {'fields': ['name', 'name']})]
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"There are duplicate field(s) in 'fieldsets[0][1]'.",
'admin.E012')
def test_fieldsets_with_custom_form_validation(self):
class BandAdmin(ModelAdmin):
fieldsets = (
('Band', {
'fields': ('name',)
}),
)
self.assertIsValid(BandAdmin, Band)
class FieldsCheckTests(CheckTestCase):
def test_duplicate_fields_in_fields(self):
class ValidationTestModelAdmin(ModelAdmin):
fields = ['name', 'name']
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'fields' contains duplicate field(s).",
'admin.E006')
def test_inline(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
fields = 10
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'fields' must be a list or tuple.",
'admin.E004',
invalid_obj=ValidationTestInline)
class FormCheckTests(CheckTestCase):
def test_invalid_type(self):
class FakeForm(object):
pass
class ValidationTestModelAdmin(ModelAdmin):
form = FakeForm
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'form' must inherit from 'BaseModelForm'.",
'admin.E016')
def test_fieldsets_with_custom_form_validation(self):
class BandAdmin(ModelAdmin):
fieldsets = (
('Band', {
'fields': ('name',)
}),
)
self.assertIsValid(BandAdmin, Band)
def test_valid_case(self):
class AdminBandForm(forms.ModelForm):
delete = forms.BooleanField()
class BandAdmin(ModelAdmin):
form = AdminBandForm
fieldsets = (
('Band', {
'fields': ('name', 'bio', 'sign_date', 'delete')
}),
)
self.assertIsValid(BandAdmin, Band)
class FilterVerticalCheckTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_vertical = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'filter_vertical' must be a list or tuple.",
'admin.E017')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_vertical = ('non_existent_field',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'filter_vertical[0]' refers to 'non_existent_field', "
"which is not an attribute of 'modeladmin.ValidationTestModel'."),
'admin.E019')
def test_invalid_field_type(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_vertical = ('name',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'filter_vertical[0]' must be a many-to-many field.",
'admin.E020')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_vertical = ("users",)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class FilterHorizontalCheckTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_horizontal = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'filter_horizontal' must be a list or tuple.",
'admin.E018')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_horizontal = ('non_existent_field',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'filter_horizontal[0]' refers to 'non_existent_field', "
"which is not an attribute of 'modeladmin.ValidationTestModel'."),
'admin.E019')
def test_invalid_field_type(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_horizontal = ('name',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'filter_horizontal[0]' must be a many-to-many field.",
'admin.E020')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_horizontal = ("users",)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class RadioFieldsCheckTests(CheckTestCase):
def test_not_dictionary(self):
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = ()
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'radio_fields' must be a dictionary.",
'admin.E021')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = {'non_existent_field': VERTICAL}
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'radio_fields' refers to 'non_existent_field', "
"which is not an attribute of 'modeladmin.ValidationTestModel'."),
'admin.E022')
def test_invalid_field_type(self):
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = {'name': VERTICAL}
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'radio_fields' refers to 'name', which is not an instance "
"of ForeignKey, and does not have a 'choices' definition."),
'admin.E023')
def test_invalid_value(self):
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = {"state": None}
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'radio_fields[\"state\"]' must be either admin.HORIZONTAL or admin.VERTICAL.",
'admin.E024')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = {"state": VERTICAL}
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class PrepopulatedFieldsCheckTests(CheckTestCase):
def test_not_dictionary(self):
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = ()
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'prepopulated_fields' must be a dictionary.",
'admin.E026')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = {'non_existent_field': ("slug",)}
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'prepopulated_fields' refers to 'non_existent_field', "
"which is not an attribute of 'modeladmin.ValidationTestModel'."),
'admin.E027')
def test_missing_field_again(self):
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = {"slug": ('non_existent_field',)}
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'prepopulated_fields[\"slug\"][0]' refers to 'non_existent_field', "
"which is not an attribute of 'modeladmin.ValidationTestModel'."),
'admin.E030')
def test_invalid_field_type(self):
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = {"users": ('name',)}
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'prepopulated_fields' refers to 'users', which must not be "
"a DateTimeField, a foreign key, or a many-to-many field."),
'admin.E028')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = {"slug": ('name',)}
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class ListDisplayTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_display' must be a list or tuple.",
'admin.E107')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display = ('non_existent_field',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'list_display[0]' refers to 'non_existent_field', which is not a callable, an attribute "
"of 'ValidationTestModelAdmin', or an attribute or method on 'modeladmin.ValidationTestModel'."),
'admin.E108')
def test_invalid_field_type(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display = ('users',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_display[0]' must not be a many-to-many field.",
'admin.E109')
def test_valid_case(self):
def a_callable(obj):
pass
class ValidationTestModelAdmin(ModelAdmin):
def a_method(self, obj):
pass
list_display = ('name', 'decade_published_in', 'a_method', a_callable)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class ListDisplayLinksCheckTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display_links = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_display_links' must be a list, a tuple, or None.",
'admin.E110')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display_links = ('non_existent_field',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel, (
"The value of 'list_display_links[0]' refers to "
"'non_existent_field', which is not defined in 'list_display'."
), 'admin.E111'
)
def test_missing_in_list_display(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display_links = ('name',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_display_links[0]' refers to 'name', which is not defined in 'list_display'.",
'admin.E111')
def test_valid_case(self):
def a_callable(obj):
pass
class ValidationTestModelAdmin(ModelAdmin):
def a_method(self, obj):
pass
list_display = ('name', 'decade_published_in', 'a_method', a_callable)
list_display_links = ('name', 'decade_published_in', 'a_method', a_callable)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
def test_None_is_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display_links = None
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class ListFilterTests(CheckTestCase):
def test_list_filter_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
list_filter = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_filter' must be a list or tuple.",
'admin.E112')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
list_filter = ('non_existent_field',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_filter[0]' refers to 'non_existent_field', which does not refer to a Field.",
'admin.E116')
def test_not_filter(self):
class RandomClass(object):
pass
class ValidationTestModelAdmin(ModelAdmin):
list_filter = (RandomClass,)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_filter[0]' must inherit from 'ListFilter'.",
'admin.E113')
def test_not_filter_again(self):
class RandomClass(object):
pass
class ValidationTestModelAdmin(ModelAdmin):
list_filter = (('is_active', RandomClass),)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_filter[0][1]' must inherit from 'FieldListFilter'.",
'admin.E115')
def test_not_filter_again_again(self):
class AwesomeFilter(SimpleListFilter):
def get_title(self):
return 'awesomeness'
def get_choices(self, request):
return (('bit', 'A bit awesome'), ('very', 'Very awesome'), )
def get_queryset(self, cl, qs):
return qs
class ValidationTestModelAdmin(ModelAdmin):
list_filter = (('is_active', AwesomeFilter),)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_filter[0][1]' must inherit from 'FieldListFilter'.",
'admin.E115')
def test_not_associated_with_field_name(self):
class ValidationTestModelAdmin(ModelAdmin):
list_filter = (BooleanFieldListFilter,)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_filter[0]' must not inherit from 'FieldListFilter'.",
'admin.E114')
def test_valid_case(self):
class AwesomeFilter(SimpleListFilter):
def get_title(self):
return 'awesomeness'
def get_choices(self, request):
return (('bit', 'A bit awesome'), ('very', 'Very awesome'), )
def get_queryset(self, cl, qs):
return qs
class ValidationTestModelAdmin(ModelAdmin):
list_filter = ('is_active', AwesomeFilter, ('is_active', BooleanFieldListFilter), 'no')
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class ListPerPageCheckTests(CheckTestCase):
def test_not_integer(self):
class ValidationTestModelAdmin(ModelAdmin):
list_per_page = 'hello'
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_per_page' must be an integer.",
'admin.E118')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
list_per_page = 100
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class ListMaxShowAllCheckTests(CheckTestCase):
def test_not_integer(self):
class ValidationTestModelAdmin(ModelAdmin):
list_max_show_all = 'hello'
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_max_show_all' must be an integer.",
'admin.E119')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
list_max_show_all = 200
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class SearchFieldsCheckTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
search_fields = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'search_fields' must be a list or tuple.",
'admin.E126')
class DateHierarchyCheckTests(CheckTestCase):
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
date_hierarchy = 'non_existent_field'
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'date_hierarchy' refers to 'non_existent_field', which "
"is not an attribute of 'modeladmin.ValidationTestModel'."),
'admin.E127')
def test_invalid_field_type(self):
class ValidationTestModelAdmin(ModelAdmin):
date_hierarchy = 'name'
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'date_hierarchy' must be a DateField or DateTimeField.",
'admin.E128')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
date_hierarchy = 'pub_date'
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class OrderingCheckTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
ordering = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'ordering' must be a list or tuple.",
'admin.E031'
)
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('non_existent_field',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'ordering[0]' refers to 'non_existent_field', "
"which is not an attribute of 'modeladmin.ValidationTestModel'.",
'admin.E033'
)
def test_random_marker_not_alone(self):
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('?', 'name')
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'ordering' has the random ordering marker '?', but contains "
"other fields as well.",
'admin.E032',
hint='Either remove the "?", or remove the other fields.'
)
def test_valid_random_marker_case(self):
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('?',)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
def test_valid_complex_case(self):
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('band__name',)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('name',)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class ListSelectRelatedCheckTests(CheckTestCase):
def test_invalid_type(self):
class ValidationTestModelAdmin(ModelAdmin):
list_select_related = 1
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_select_related' must be a boolean, tuple or list.",
'admin.E117')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
list_select_related = False
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class SaveAsCheckTests(CheckTestCase):
def test_not_boolean(self):
class ValidationTestModelAdmin(ModelAdmin):
save_as = 1
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'save_as' must be a boolean.",
'admin.E101')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
save_as = True
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class SaveOnTopCheckTests(CheckTestCase):
def test_not_boolean(self):
class ValidationTestModelAdmin(ModelAdmin):
save_on_top = 1
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'save_on_top' must be a boolean.",
'admin.E102')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
save_on_top = True
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class InlinesCheckTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
inlines = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'inlines' must be a list or tuple.",
'admin.E103')
def test_not_model_admin(self):
class ValidationTestInline(object):
pass
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalidRegexp(
ValidationTestModelAdmin, ValidationTestModel,
r"'.*\.ValidationTestInline' must inherit from 'BaseModelAdmin'\.",
'admin.E104')
def test_missing_model_field(self):
class ValidationTestInline(TabularInline):
pass
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalidRegexp(
ValidationTestModelAdmin, ValidationTestModel,
r"'.*\.ValidationTestInline' must have a 'model' attribute\.",
'admin.E105')
def test_invalid_model_type(self):
class SomethingBad(object):
pass
class ValidationTestInline(TabularInline):
model = SomethingBad
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalidRegexp(
ValidationTestModelAdmin, ValidationTestModel,
r"The value of '.*\.ValidationTestInline.model' must be a Model\.",
'admin.E106')
def test_valid_case(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class FkNameCheckTests(CheckTestCase):
def test_missing_field(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
fk_name = 'non_existent_field'
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"'modeladmin.ValidationTestInlineModel' has no field named 'non_existent_field'.",
'admin.E202',
invalid_obj=ValidationTestInline)
def test_valid_case(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
fk_name = "parent"
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class ExtraCheckTests(CheckTestCase):
def test_not_integer(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
extra = "hello"
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'extra' must be an integer.",
'admin.E203',
invalid_obj=ValidationTestInline)
def test_valid_case(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
extra = 2
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class MaxNumCheckTests(CheckTestCase):
def test_not_integer(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
max_num = "hello"
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'max_num' must be an integer.",
'admin.E204',
invalid_obj=ValidationTestInline)
def test_valid_case(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
max_num = 2
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class MinNumCheckTests(CheckTestCase):
def test_not_integer(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
min_num = "hello"
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'min_num' must be an integer.",
'admin.E205',
invalid_obj=ValidationTestInline)
def test_valid_case(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
min_num = 2
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class FormsetCheckTests(CheckTestCase):
def test_invalid_type(self):
class FakeFormSet(object):
pass
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
formset = FakeFormSet
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'formset' must inherit from 'BaseModelFormSet'.",
'admin.E206',
invalid_obj=ValidationTestInline)
def test_valid_case(self):
class RealModelFormSet(BaseModelFormSet):
pass
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
formset = RealModelFormSet
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class ListDisplayEditableTests(CheckTestCase):
def test_list_display_links_is_none(self):
class ProductAdmin(ModelAdmin):
list_display = ['name', 'slug', 'pub_date']
list_editable = list_display
list_display_links = None
self.assertIsValid(ProductAdmin, ValidationTestModel)
def test_list_display_first_item_same_as_list_editable_first_item(self):
class ProductAdmin(ModelAdmin):
list_display = ['name', 'slug', 'pub_date']
list_editable = ['name', 'slug']
list_display_links = ['pub_date']
self.assertIsValid(ProductAdmin, ValidationTestModel)
def test_list_display_first_item_in_list_editable(self):
class ProductAdmin(ModelAdmin):
list_display = ['name', 'slug', 'pub_date']
list_editable = ['slug', 'name']
list_display_links = ['pub_date']
self.assertIsValid(ProductAdmin, ValidationTestModel)
def test_list_display_first_item_same_as_list_editable_no_list_display_links(self):
class ProductAdmin(ModelAdmin):
list_display = ['name']
list_editable = ['name']
self.assertIsInvalid(
ProductAdmin, ValidationTestModel,
"The value of 'list_editable[0]' refers to the first field "
"in 'list_display' ('name'), which cannot be used unless "
"'list_display_links' is set.",
id='admin.E124',
)
def test_list_display_first_item_in_list_editable_no_list_display_links(self):
class ProductAdmin(ModelAdmin):
list_display = ['name', 'slug', 'pub_date']
list_editable = ['slug', 'name']
self.assertIsInvalid(
ProductAdmin, ValidationTestModel,
"The value of 'list_editable[1]' refers to the first field "
"in 'list_display' ('name'), which cannot be used unless "
"'list_display_links' is set.",
id='admin.E124',
)
class ModelAdminPermissionTests(SimpleTestCase):
class MockUser(object):
def has_module_perms(self, app_label):
if app_label == "modeladmin":
return True
return False
class MockAddUser(MockUser):
def has_perm(self, perm):
if perm == "modeladmin.add_band":
return True
return False
class MockChangeUser(MockUser):
def has_perm(self, perm):
if perm == "modeladmin.change_band":
return True
return False
class MockDeleteUser(MockUser):
def has_perm(self, perm):
if perm == "modeladmin.delete_band":
return True
return False
def test_has_add_permission(self):
ma = ModelAdmin(Band, AdminSite())
request = MockRequest()
request.user = self.MockAddUser()
self.assertTrue(ma.has_add_permission(request))
request.user = self.MockChangeUser()
self.assertFalse(ma.has_add_permission(request))
request.user = self.MockDeleteUser()
self.assertFalse(ma.has_add_permission(request))
def test_has_change_permission(self):
ma = ModelAdmin(Band, AdminSite())
request = MockRequest()
request.user = self.MockAddUser()
self.assertFalse(ma.has_change_permission(request))
request.user = self.MockChangeUser()
self.assertTrue(ma.has_change_permission(request))
request.user = self.MockDeleteUser()
self.assertFalse(ma.has_change_permission(request))
def test_has_delete_permission(self):
ma = ModelAdmin(Band, AdminSite())
request = MockRequest()
request.user = self.MockAddUser()
self.assertFalse(ma.has_delete_permission(request))
request.user = self.MockChangeUser()
self.assertFalse(ma.has_delete_permission(request))
request.user = self.MockDeleteUser()
self.assertTrue(ma.has_delete_permission(request))
def test_has_module_permission(self):
ma = ModelAdmin(Band, AdminSite())
request = MockRequest()
request.user = self.MockAddUser()
self.assertTrue(ma.has_module_permission(request))
request.user = self.MockChangeUser()
self.assertTrue(ma.has_module_permission(request))
request.user = self.MockDeleteUser()
self.assertTrue(ma.has_module_permission(request))
original_app_label = ma.opts.app_label
ma.opts.app_label = 'anotherapp'
try:
request.user = self.MockAddUser()
self.assertFalse(ma.has_module_permission(request))
request.user = self.MockChangeUser()
self.assertFalse(ma.has_module_permission(request))
request.user = self.MockDeleteUser()
self.assertFalse(ma.has_module_permission(request))
finally:
ma.opts.app_label = original_app_label
| true
| true
|
790d25019235ab96857803e36b00ae9a3404355e
| 2,159
|
py
|
Python
|
documentstore_migracao/utils/extract_isis.py
|
patymori/document-store-migracao
|
1320ef58de1484ca8383c29c1fea55c4b2d89e67
|
[
"BSD-2-Clause"
] | 1
|
2019-11-21T12:35:36.000Z
|
2019-11-21T12:35:36.000Z
|
documentstore_migracao/utils/extract_isis.py
|
patymori/document-store-migracao
|
1320ef58de1484ca8383c29c1fea55c4b2d89e67
|
[
"BSD-2-Clause"
] | 336
|
2019-04-01T14:06:37.000Z
|
2022-03-21T22:16:55.000Z
|
documentstore_migracao/utils/extract_isis.py
|
patymori/document-store-migracao
|
1320ef58de1484ca8383c29c1fea55c4b2d89e67
|
[
"BSD-2-Clause"
] | 4
|
2019-03-28T13:32:04.000Z
|
2020-04-17T18:03:19.000Z
|
import os
import logging
import json
from typing import Union, Dict, List
from documentstore_migracao.utils.isis2json import isis2json
logger = logging.getLogger(__name__)
class OutputContainer:
"""Classe que mimetiza a escrita de arquivos para a escrita em uma estrutura
de lista. Cada linha em um arquivo representa uma entrada na lista."""
def __init__(self):
self._lines = []
def write(self, string: str) -> None:
try:
_string = json.loads(string)
except Exception:
pass
else:
self._lines.append(_string)
def close(self):
pass
@property
def lines(self):
return self._lines
def create_output_dir(path):
output_dir = "/".join(path.split("/")[:-1])
if not os.path.exists(output_dir):
logger.debug("Creating folder: %s", output_dir)
os.makedirs(output_dir)
def run(path: str, output_file: str = "", mongo=False) -> Union[None, List[dict]]:
"""Invoca o utilitário `isis2json` com os parâmetros adaptados para a
leitura de arquivos MST de acordo com as definições padrões utilizadas
pelo __main__ da ferramenta `isis2json`.
O resultado de saída pode ser escrito diretamente para um arquivo em disco
ou retornará uma lista contento as linhas passíveis de conversão para
JSON.
Exemplo:
>>> run("file.mst")
>>> [{"mfn": 1}, {"mfn": 2}]
>>> run("file.mst", output_file="/tmp/output.json")
>>> None
"""
if not os.path.exists(path):
raise FileNotFoundError("File '%s' does not exist.")
if len(output_file) > 0:
output_file = open(output_file, "wb")
else:
output_file = OutputContainer()
isis2json.writeJsonArray(
iterRecords=isis2json.iterMstRecords,
file_name=path,
output=output_file,
qty=isis2json.DEFAULT_QTY,
skip=0,
id_tag=0,
gen_uuid=False,
mongo=mongo,
mfn=True,
isis_json_type=3,
prefix="v",
constant="",
)
output_file.close()
if isinstance(output_file, OutputContainer):
return output_file.lines
| 25.104651
| 82
| 0.633164
|
import os
import logging
import json
from typing import Union, Dict, List
from documentstore_migracao.utils.isis2json import isis2json
logger = logging.getLogger(__name__)
class OutputContainer:
def __init__(self):
self._lines = []
def write(self, string: str) -> None:
try:
_string = json.loads(string)
except Exception:
pass
else:
self._lines.append(_string)
def close(self):
pass
@property
def lines(self):
return self._lines
def create_output_dir(path):
output_dir = "/".join(path.split("/")[:-1])
if not os.path.exists(output_dir):
logger.debug("Creating folder: %s", output_dir)
os.makedirs(output_dir)
def run(path: str, output_file: str = "", mongo=False) -> Union[None, List[dict]]:
if not os.path.exists(path):
raise FileNotFoundError("File '%s' does not exist.")
if len(output_file) > 0:
output_file = open(output_file, "wb")
else:
output_file = OutputContainer()
isis2json.writeJsonArray(
iterRecords=isis2json.iterMstRecords,
file_name=path,
output=output_file,
qty=isis2json.DEFAULT_QTY,
skip=0,
id_tag=0,
gen_uuid=False,
mongo=mongo,
mfn=True,
isis_json_type=3,
prefix="v",
constant="",
)
output_file.close()
if isinstance(output_file, OutputContainer):
return output_file.lines
| true
| true
|
790d254d2661f4cf5efcb99dab0345b6c649a88b
| 4,350
|
py
|
Python
|
torchvision/prototype/models/video/resnet.py
|
brianjo/vision
|
a8bde78130fd8c956780d85693d0f51912013732
|
[
"BSD-3-Clause"
] | 1
|
2022-03-08T14:11:12.000Z
|
2022-03-08T14:11:12.000Z
|
torchvision/prototype/models/video/resnet.py
|
brianjo/vision
|
a8bde78130fd8c956780d85693d0f51912013732
|
[
"BSD-3-Clause"
] | null | null | null |
torchvision/prototype/models/video/resnet.py
|
brianjo/vision
|
a8bde78130fd8c956780d85693d0f51912013732
|
[
"BSD-3-Clause"
] | null | null | null |
from functools import partial
from typing import Any, Callable, List, Optional, Sequence, Type, Union
from torch import nn
from torchvision.prototype.transforms import VideoClassificationEval
from torchvision.transforms.functional import InterpolationMode
from ....models.video.resnet import (
BasicBlock,
BasicStem,
Bottleneck,
Conv2Plus1D,
Conv3DSimple,
Conv3DNoTemporal,
R2Plus1dStem,
VideoResNet,
)
from .._api import WeightsEnum, Weights
from .._meta import _KINETICS400_CATEGORIES
from .._utils import handle_legacy_interface, _ovewrite_named_param
__all__ = [
"VideoResNet",
"R3D_18_Weights",
"MC3_18_Weights",
"R2Plus1D_18_Weights",
"r3d_18",
"mc3_18",
"r2plus1d_18",
]
def _video_resnet(
block: Type[Union[BasicBlock, Bottleneck]],
conv_makers: Sequence[Type[Union[Conv3DSimple, Conv3DNoTemporal, Conv2Plus1D]]],
layers: List[int],
stem: Callable[..., nn.Module],
weights: Optional[WeightsEnum],
progress: bool,
**kwargs: Any,
) -> VideoResNet:
if weights is not None:
_ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
model = VideoResNet(block, conv_makers, layers, stem, **kwargs)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress))
return model
_COMMON_META = {
"task": "video_classification",
"publication_year": 2017,
"size": (112, 112),
"min_size": (1, 1),
"categories": _KINETICS400_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR,
"recipe": "https://github.com/pytorch/vision/tree/main/references/video_classification",
}
class R3D_18_Weights(WeightsEnum):
KINETICS400_V1 = Weights(
url="https://download.pytorch.org/models/r3d_18-b3b3357e.pth",
transforms=partial(VideoClassificationEval, crop_size=(112, 112), resize_size=(128, 171)),
meta={
**_COMMON_META,
"architecture": "R3D",
"num_params": 33371472,
"acc@1": 52.75,
"acc@5": 75.45,
},
)
DEFAULT = KINETICS400_V1
class MC3_18_Weights(WeightsEnum):
KINETICS400_V1 = Weights(
url="https://download.pytorch.org/models/mc3_18-a90a0ba3.pth",
transforms=partial(VideoClassificationEval, crop_size=(112, 112), resize_size=(128, 171)),
meta={
**_COMMON_META,
"architecture": "MC3",
"num_params": 11695440,
"acc@1": 53.90,
"acc@5": 76.29,
},
)
DEFAULT = KINETICS400_V1
class R2Plus1D_18_Weights(WeightsEnum):
KINETICS400_V1 = Weights(
url="https://download.pytorch.org/models/r2plus1d_18-91a641e6.pth",
transforms=partial(VideoClassificationEval, crop_size=(112, 112), resize_size=(128, 171)),
meta={
**_COMMON_META,
"architecture": "R(2+1)D",
"num_params": 31505325,
"acc@1": 57.50,
"acc@5": 78.81,
},
)
DEFAULT = KINETICS400_V1
@handle_legacy_interface(weights=("pretrained", R3D_18_Weights.KINETICS400_V1))
def r3d_18(*, weights: Optional[R3D_18_Weights] = None, progress: bool = True, **kwargs: Any) -> VideoResNet:
weights = R3D_18_Weights.verify(weights)
return _video_resnet(
BasicBlock,
[Conv3DSimple] * 4,
[2, 2, 2, 2],
BasicStem,
weights,
progress,
**kwargs,
)
@handle_legacy_interface(weights=("pretrained", MC3_18_Weights.KINETICS400_V1))
def mc3_18(*, weights: Optional[MC3_18_Weights] = None, progress: bool = True, **kwargs: Any) -> VideoResNet:
weights = MC3_18_Weights.verify(weights)
return _video_resnet(
BasicBlock,
[Conv3DSimple] + [Conv3DNoTemporal] * 3, # type: ignore[list-item]
[2, 2, 2, 2],
BasicStem,
weights,
progress,
**kwargs,
)
@handle_legacy_interface(weights=("pretrained", R2Plus1D_18_Weights.KINETICS400_V1))
def r2plus1d_18(*, weights: Optional[R2Plus1D_18_Weights] = None, progress: bool = True, **kwargs: Any) -> VideoResNet:
weights = R2Plus1D_18_Weights.verify(weights)
return _video_resnet(
BasicBlock,
[Conv2Plus1D] * 4,
[2, 2, 2, 2],
R2Plus1dStem,
weights,
progress,
**kwargs,
)
| 28.431373
| 119
| 0.644828
|
from functools import partial
from typing import Any, Callable, List, Optional, Sequence, Type, Union
from torch import nn
from torchvision.prototype.transforms import VideoClassificationEval
from torchvision.transforms.functional import InterpolationMode
from ....models.video.resnet import (
BasicBlock,
BasicStem,
Bottleneck,
Conv2Plus1D,
Conv3DSimple,
Conv3DNoTemporal,
R2Plus1dStem,
VideoResNet,
)
from .._api import WeightsEnum, Weights
from .._meta import _KINETICS400_CATEGORIES
from .._utils import handle_legacy_interface, _ovewrite_named_param
__all__ = [
"VideoResNet",
"R3D_18_Weights",
"MC3_18_Weights",
"R2Plus1D_18_Weights",
"r3d_18",
"mc3_18",
"r2plus1d_18",
]
def _video_resnet(
block: Type[Union[BasicBlock, Bottleneck]],
conv_makers: Sequence[Type[Union[Conv3DSimple, Conv3DNoTemporal, Conv2Plus1D]]],
layers: List[int],
stem: Callable[..., nn.Module],
weights: Optional[WeightsEnum],
progress: bool,
**kwargs: Any,
) -> VideoResNet:
if weights is not None:
_ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
model = VideoResNet(block, conv_makers, layers, stem, **kwargs)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress))
return model
_COMMON_META = {
"task": "video_classification",
"publication_year": 2017,
"size": (112, 112),
"min_size": (1, 1),
"categories": _KINETICS400_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR,
"recipe": "https://github.com/pytorch/vision/tree/main/references/video_classification",
}
class R3D_18_Weights(WeightsEnum):
KINETICS400_V1 = Weights(
url="https://download.pytorch.org/models/r3d_18-b3b3357e.pth",
transforms=partial(VideoClassificationEval, crop_size=(112, 112), resize_size=(128, 171)),
meta={
**_COMMON_META,
"architecture": "R3D",
"num_params": 33371472,
"acc@1": 52.75,
"acc@5": 75.45,
},
)
DEFAULT = KINETICS400_V1
class MC3_18_Weights(WeightsEnum):
KINETICS400_V1 = Weights(
url="https://download.pytorch.org/models/mc3_18-a90a0ba3.pth",
transforms=partial(VideoClassificationEval, crop_size=(112, 112), resize_size=(128, 171)),
meta={
**_COMMON_META,
"architecture": "MC3",
"num_params": 11695440,
"acc@1": 53.90,
"acc@5": 76.29,
},
)
DEFAULT = KINETICS400_V1
class R2Plus1D_18_Weights(WeightsEnum):
KINETICS400_V1 = Weights(
url="https://download.pytorch.org/models/r2plus1d_18-91a641e6.pth",
transforms=partial(VideoClassificationEval, crop_size=(112, 112), resize_size=(128, 171)),
meta={
**_COMMON_META,
"architecture": "R(2+1)D",
"num_params": 31505325,
"acc@1": 57.50,
"acc@5": 78.81,
},
)
DEFAULT = KINETICS400_V1
@handle_legacy_interface(weights=("pretrained", R3D_18_Weights.KINETICS400_V1))
def r3d_18(*, weights: Optional[R3D_18_Weights] = None, progress: bool = True, **kwargs: Any) -> VideoResNet:
weights = R3D_18_Weights.verify(weights)
return _video_resnet(
BasicBlock,
[Conv3DSimple] * 4,
[2, 2, 2, 2],
BasicStem,
weights,
progress,
**kwargs,
)
@handle_legacy_interface(weights=("pretrained", MC3_18_Weights.KINETICS400_V1))
def mc3_18(*, weights: Optional[MC3_18_Weights] = None, progress: bool = True, **kwargs: Any) -> VideoResNet:
weights = MC3_18_Weights.verify(weights)
return _video_resnet(
BasicBlock,
[Conv3DSimple] + [Conv3DNoTemporal] * 3,
[2, 2, 2, 2],
BasicStem,
weights,
progress,
**kwargs,
)
@handle_legacy_interface(weights=("pretrained", R2Plus1D_18_Weights.KINETICS400_V1))
def r2plus1d_18(*, weights: Optional[R2Plus1D_18_Weights] = None, progress: bool = True, **kwargs: Any) -> VideoResNet:
weights = R2Plus1D_18_Weights.verify(weights)
return _video_resnet(
BasicBlock,
[Conv2Plus1D] * 4,
[2, 2, 2, 2],
R2Plus1dStem,
weights,
progress,
**kwargs,
)
| true
| true
|
790d2568757ada99e9c2e6d240a3c520fa2886d3
| 3,561
|
py
|
Python
|
tests/utils.py
|
Neilblaze/websockets
|
c39268c4867e41d11c20f7859583761d52a04012
|
[
"BSD-3-Clause"
] | 1
|
2021-03-04T06:10:30.000Z
|
2021-03-04T06:10:30.000Z
|
tests/utils.py
|
Neilblaze/websockets
|
c39268c4867e41d11c20f7859583761d52a04012
|
[
"BSD-3-Clause"
] | null | null | null |
tests/utils.py
|
Neilblaze/websockets
|
c39268c4867e41d11c20f7859583761d52a04012
|
[
"BSD-3-Clause"
] | null | null | null |
import asyncio
import contextlib
import email.utils
import functools
import logging
import os
import time
import unittest
DATE = email.utils.formatdate(usegmt=True)
class GeneratorTestCase(unittest.TestCase):
def assertGeneratorRunning(self, gen):
"""
Check that a generator-based coroutine hasn't completed yet.
"""
next(gen)
def assertGeneratorReturns(self, gen):
"""
Check that a generator-based coroutine completes and return its value.
"""
with self.assertRaises(StopIteration) as raised:
next(gen)
return raised.exception.value
class AsyncioTestCase(unittest.TestCase):
"""
Base class for tests that sets up an isolated event loop for each test.
"""
def __init_subclass__(cls, **kwargs):
"""
Convert test coroutines to test functions.
This supports asychronous tests transparently.
"""
super().__init_subclass__(**kwargs)
for name in unittest.defaultTestLoader.getTestCaseNames(cls):
test = getattr(cls, name)
if asyncio.iscoroutinefunction(test):
setattr(cls, name, cls.convert_async_to_sync(test))
@staticmethod
def convert_async_to_sync(test):
"""
Convert a test coroutine to a test function.
"""
@functools.wraps(test)
def test_func(self, *args, **kwargs):
return self.loop.run_until_complete(test(self, *args, **kwargs))
return test_func
def setUp(self):
super().setUp()
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
def tearDown(self):
self.loop.close()
super().tearDown()
def run_loop_once(self):
# Process callbacks scheduled with call_soon by appending a callback
# to stop the event loop then running it until it hits that callback.
self.loop.call_soon(self.loop.stop)
self.loop.run_forever()
@contextlib.contextmanager
def assertNoLogs(self, logger="websockets", level=logging.ERROR):
"""
No message is logged on the given logger with at least the given level.
"""
with self.assertLogs(logger, level) as logs:
# We want to test that no log message is emitted
# but assertLogs expects at least one log message.
logging.getLogger(logger).log(level, "dummy")
yield
level_name = logging.getLevelName(level)
self.assertEqual(logs.output, [f"{level_name}:{logger}:dummy"])
def assertDeprecationWarnings(self, recorded_warnings, expected_warnings):
"""
Check recorded deprecation warnings match a list of expected messages.
"""
self.assertEqual(len(recorded_warnings), len(expected_warnings))
for recorded, expected in zip(recorded_warnings, expected_warnings):
actual = recorded.message
self.assertEqual(str(actual), expected)
self.assertEqual(type(actual), DeprecationWarning)
# Unit for timeouts. May be increased on slow machines by setting the
# WEBSOCKETS_TESTS_TIMEOUT_FACTOR environment variable.
MS = 0.001 * int(os.environ.get("WEBSOCKETS_TESTS_TIMEOUT_FACTOR", 1))
# asyncio's debug mode has a 10x performance penalty for this test suite.
if os.environ.get("PYTHONASYNCIODEBUG"): # pragma: no cover
MS *= 10
# Ensure that timeouts are larger than the clock's resolution (for Windows).
MS = max(MS, 2.5 * time.get_clock_info("monotonic").resolution)
| 30.698276
| 79
| 0.663016
|
import asyncio
import contextlib
import email.utils
import functools
import logging
import os
import time
import unittest
DATE = email.utils.formatdate(usegmt=True)
class GeneratorTestCase(unittest.TestCase):
def assertGeneratorRunning(self, gen):
next(gen)
def assertGeneratorReturns(self, gen):
with self.assertRaises(StopIteration) as raised:
next(gen)
return raised.exception.value
class AsyncioTestCase(unittest.TestCase):
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
for name in unittest.defaultTestLoader.getTestCaseNames(cls):
test = getattr(cls, name)
if asyncio.iscoroutinefunction(test):
setattr(cls, name, cls.convert_async_to_sync(test))
@staticmethod
def convert_async_to_sync(test):
@functools.wraps(test)
def test_func(self, *args, **kwargs):
return self.loop.run_until_complete(test(self, *args, **kwargs))
return test_func
def setUp(self):
super().setUp()
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
def tearDown(self):
self.loop.close()
super().tearDown()
def run_loop_once(self):
self.loop.call_soon(self.loop.stop)
self.loop.run_forever()
@contextlib.contextmanager
def assertNoLogs(self, logger="websockets", level=logging.ERROR):
with self.assertLogs(logger, level) as logs:
logging.getLogger(logger).log(level, "dummy")
yield
level_name = logging.getLevelName(level)
self.assertEqual(logs.output, [f"{level_name}:{logger}:dummy"])
def assertDeprecationWarnings(self, recorded_warnings, expected_warnings):
self.assertEqual(len(recorded_warnings), len(expected_warnings))
for recorded, expected in zip(recorded_warnings, expected_warnings):
actual = recorded.message
self.assertEqual(str(actual), expected)
self.assertEqual(type(actual), DeprecationWarning)
MS = 0.001 * int(os.environ.get("WEBSOCKETS_TESTS_TIMEOUT_FACTOR", 1))
if os.environ.get("PYTHONASYNCIODEBUG"): # pragma: no cover
MS *= 10
# Ensure that timeouts are larger than the clock's resolution (for Windows).
MS = max(MS, 2.5 * time.get_clock_info("monotonic").resolution)
| true
| true
|
790d258c90c95ae815827212a0fd1da8191e3ca9
| 1,103
|
py
|
Python
|
tests/test_clients/test_methods/test_errors/test_chat_not_found.py
|
ExpressApp/pybotx
|
97c8b1ce5d45a05567ed01d545cb43174a2dcbb9
|
[
"MIT"
] | 13
|
2021-01-21T12:43:10.000Z
|
2022-03-23T11:11:59.000Z
|
tests/test_clients/test_methods/test_errors/test_chat_not_found.py
|
ExpressApp/pybotx
|
97c8b1ce5d45a05567ed01d545cb43174a2dcbb9
|
[
"MIT"
] | 259
|
2020-02-26T08:51:03.000Z
|
2022-03-23T11:08:36.000Z
|
tests/test_clients/test_methods/test_errors/test_chat_not_found.py
|
ExpressApp/pybotx
|
97c8b1ce5d45a05567ed01d545cb43174a2dcbb9
|
[
"MIT"
] | 5
|
2019-12-02T16:19:22.000Z
|
2021-11-22T20:33:34.000Z
|
import uuid
from http import HTTPStatus
import pytest
from botx.clients.methods.errors.chat_not_found import (
ChatNotFoundData,
ChatNotFoundError,
)
from botx.clients.methods.v3.chats.add_user import AddUser
from botx.concurrency import callable_to_coroutine
pytestmark = pytest.mark.asyncio
pytest_plugins = ("tests.test_clients.fixtures",)
async def test_raising_chat_not_found(client, requests_client):
method = AddUser(
host="example.com",
group_chat_id=uuid.uuid4(),
user_huids=[uuid.uuid4()],
)
errors_to_raise = {
AddUser: (
HTTPStatus.NOT_FOUND,
ChatNotFoundData(group_chat_id=method.group_chat_id),
),
}
with client.error_client(errors=errors_to_raise):
request = requests_client.build_request(method)
response = await callable_to_coroutine(requests_client.execute, request)
with pytest.raises(ChatNotFoundError):
await callable_to_coroutine(
requests_client.process_response,
method,
response,
)
| 26.902439
| 80
| 0.682684
|
import uuid
from http import HTTPStatus
import pytest
from botx.clients.methods.errors.chat_not_found import (
ChatNotFoundData,
ChatNotFoundError,
)
from botx.clients.methods.v3.chats.add_user import AddUser
from botx.concurrency import callable_to_coroutine
pytestmark = pytest.mark.asyncio
pytest_plugins = ("tests.test_clients.fixtures",)
async def test_raising_chat_not_found(client, requests_client):
method = AddUser(
host="example.com",
group_chat_id=uuid.uuid4(),
user_huids=[uuid.uuid4()],
)
errors_to_raise = {
AddUser: (
HTTPStatus.NOT_FOUND,
ChatNotFoundData(group_chat_id=method.group_chat_id),
),
}
with client.error_client(errors=errors_to_raise):
request = requests_client.build_request(method)
response = await callable_to_coroutine(requests_client.execute, request)
with pytest.raises(ChatNotFoundError):
await callable_to_coroutine(
requests_client.process_response,
method,
response,
)
| true
| true
|
790d265552193003b0d3b4ace357dfe4ec873f3b
| 786
|
py
|
Python
|
328-odd-even-linked-list/328-odd-even-linked-list.py
|
MayaScarlet/leetcode-python
|
8ef0c5cadf2e975957085c0ef84a8c3d90a64b6a
|
[
"MIT"
] | null | null | null |
328-odd-even-linked-list/328-odd-even-linked-list.py
|
MayaScarlet/leetcode-python
|
8ef0c5cadf2e975957085c0ef84a8c3d90a64b6a
|
[
"MIT"
] | null | null | null |
328-odd-even-linked-list/328-odd-even-linked-list.py
|
MayaScarlet/leetcode-python
|
8ef0c5cadf2e975957085c0ef84a8c3d90a64b6a
|
[
"MIT"
] | null | null | null |
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def oddEvenList(self, head: Optional[ListNode]) -> Optional[ListNode]:
if head is None:
return None
odd, even = ListNode(), ListNode()
oddTail, evenTail = odd, even
count = 0
while head:
if count % 2 == 0:
evenTail.next = head
evenTail = evenTail.next
else:
oddTail.next = head
oddTail = oddTail.next
head = head.next
count += 1
evenTail.next = odd.next
oddTail.next = None
return even.next
| 27.103448
| 74
| 0.493639
|
class Solution:
def oddEvenList(self, head: Optional[ListNode]) -> Optional[ListNode]:
if head is None:
return None
odd, even = ListNode(), ListNode()
oddTail, evenTail = odd, even
count = 0
while head:
if count % 2 == 0:
evenTail.next = head
evenTail = evenTail.next
else:
oddTail.next = head
oddTail = oddTail.next
head = head.next
count += 1
evenTail.next = odd.next
oddTail.next = None
return even.next
| true
| true
|
790d26ec5256f531073248f670b34a1c813b6507
| 2,260
|
py
|
Python
|
sha2/py_sha256.py
|
ryos36/polyphony-tutorial
|
8937f2b8e8136c3b5d55b2a6be6e8b6ab35b04e7
|
[
"MIT"
] | 4
|
2018-05-04T01:08:49.000Z
|
2021-01-21T07:09:00.000Z
|
sha2/py_sha256.py
|
ryos36/polyphony-tutorial
|
8937f2b8e8136c3b5d55b2a6be6e8b6ab35b04e7
|
[
"MIT"
] | null | null | null |
sha2/py_sha256.py
|
ryos36/polyphony-tutorial
|
8937f2b8e8136c3b5d55b2a6be6e8b6ab35b04e7
|
[
"MIT"
] | 1
|
2020-06-02T08:41:54.000Z
|
2020-06-02T08:41:54.000Z
|
_k = [0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2]
_h = [0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a,
0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19]
def rotr(x, y):
return ((x >> y) | (x << (32 - y))) & 0xFFFFFFFF
def sha256(b4x16):
w = [0] * 64
print(b4x16)
for i in range(16):
w[i] = b4x16[i]
for i in range(16, 64):
wi_15 = w[i - 15]
s0 = rotr(wi_15, 7) ^ rotr(wi_15, 18) ^ (wi_15 >> 3)
wi_2 = w[i - 2]
s1 = rotr(wi_2, 17) ^ rotr(wi_2, 19) ^ (wi_2 >> 10)
wi_16 = w[i - 16]
wi_7 = w[i - 7]
w[i] = (wi_16 + s0 + wi_7 + s1) & 0xFFFFFFFF
a, b, c, d, e, f, g, h = _h
for i in range(64):
s0 = rotr(a, 2) ^ rotr(a, 13) ^ rotr(a, 22)
maj = (a & b) ^ (a & c) ^ (b & c)
t2 = s0 + maj
s1 = rotr(e, 6) ^ rotr(e, 11) ^ rotr(e, 25)
ch = (e & f) ^ ((~e) & g)
t1 = h + s1 + ch + _k[i] + w[i]
h = g
g = f
f = e
e = (d + t1) & 0xFFFFFFFF
d = c
c = b
b = a
a = (t1 + t2) & 0xFFFFFFFF
_lst = [a, b, c, d, e, f, g, h]
for i in range(8):
_h[i] = (_h[i] + _lst[i]) & 0xFFFFFFFF
for i in _h:
print('{:08x}'.format(i))
print("===========")
return _h
lst = [0x61616161] * 16
sha256(lst)
lst = [0] * 16
lst[0] = 0x80000000
lst[15] = 0x00000200
rv = sha256(lst)
for i in rv:
print('R {:08x}'.format(i))
| 28.974359
| 60
| 0.553097
|
_k = [0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2]
_h = [0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a,
0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19]
def rotr(x, y):
return ((x >> y) | (x << (32 - y))) & 0xFFFFFFFF
def sha256(b4x16):
w = [0] * 64
print(b4x16)
for i in range(16):
w[i] = b4x16[i]
for i in range(16, 64):
wi_15 = w[i - 15]
s0 = rotr(wi_15, 7) ^ rotr(wi_15, 18) ^ (wi_15 >> 3)
wi_2 = w[i - 2]
s1 = rotr(wi_2, 17) ^ rotr(wi_2, 19) ^ (wi_2 >> 10)
wi_16 = w[i - 16]
wi_7 = w[i - 7]
w[i] = (wi_16 + s0 + wi_7 + s1) & 0xFFFFFFFF
a, b, c, d, e, f, g, h = _h
for i in range(64):
s0 = rotr(a, 2) ^ rotr(a, 13) ^ rotr(a, 22)
maj = (a & b) ^ (a & c) ^ (b & c)
t2 = s0 + maj
s1 = rotr(e, 6) ^ rotr(e, 11) ^ rotr(e, 25)
ch = (e & f) ^ ((~e) & g)
t1 = h + s1 + ch + _k[i] + w[i]
h = g
g = f
f = e
e = (d + t1) & 0xFFFFFFFF
d = c
c = b
b = a
a = (t1 + t2) & 0xFFFFFFFF
_lst = [a, b, c, d, e, f, g, h]
for i in range(8):
_h[i] = (_h[i] + _lst[i]) & 0xFFFFFFFF
for i in _h:
print('{:08x}'.format(i))
print("===========")
return _h
lst = [0x61616161] * 16
sha256(lst)
lst = [0] * 16
lst[0] = 0x80000000
lst[15] = 0x00000200
rv = sha256(lst)
for i in rv:
print('R {:08x}'.format(i))
| true
| true
|
790d277cc871ed756813e2f1a58586fb075984d4
| 818
|
py
|
Python
|
races/__init__.py
|
Belvarm/roguelike-tutorial
|
ea989c080b0f7dd61c38b5719ab8e502a45a0489
|
[
"MIT"
] | null | null | null |
races/__init__.py
|
Belvarm/roguelike-tutorial
|
ea989c080b0f7dd61c38b5719ab8e502a45a0489
|
[
"MIT"
] | null | null | null |
races/__init__.py
|
Belvarm/roguelike-tutorial
|
ea989c080b0f7dd61c38b5719ab8e502a45a0489
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from typing import Optional, Type, TYPE_CHECKING
import actor
from actions.ai import BasicMonster
import graphic
from inventory import Inventory
if TYPE_CHECKING:
from actions import Action
from location import Location
class Fighter(graphic.Graphic):
render_order = 0
hp: int = 0
power: int = 0
defense: int = 0
DEFAULT_AI: Type[Action] = BasicMonster
def __init__(self, inventory: Optional[Inventory] = None) -> None:
self.alive = True
self.max_hp = self.hp
self.inventory = inventory or Inventory()
@classmethod
def spawn(
cls, location: Location, ai_cls: Optional[Type[Action]] = None
) -> actor.Actor:
self = cls()
return actor.Actor(location, self, ai_cls or cls.DEFAULT_AI)
| 22.722222
| 70
| 0.683374
|
from __future__ import annotations
from typing import Optional, Type, TYPE_CHECKING
import actor
from actions.ai import BasicMonster
import graphic
from inventory import Inventory
if TYPE_CHECKING:
from actions import Action
from location import Location
class Fighter(graphic.Graphic):
render_order = 0
hp: int = 0
power: int = 0
defense: int = 0
DEFAULT_AI: Type[Action] = BasicMonster
def __init__(self, inventory: Optional[Inventory] = None) -> None:
self.alive = True
self.max_hp = self.hp
self.inventory = inventory or Inventory()
@classmethod
def spawn(
cls, location: Location, ai_cls: Optional[Type[Action]] = None
) -> actor.Actor:
self = cls()
return actor.Actor(location, self, ai_cls or cls.DEFAULT_AI)
| true
| true
|
790d27f2700d35a73d7e035fce8a5923c6d24964
| 10,877
|
py
|
Python
|
py/update_nginx_vhosts.py
|
bcoding/docker-host-scripts
|
edfb516266a991abf37b56e5e537ac9e93a6de26
|
[
"Unlicense"
] | null | null | null |
py/update_nginx_vhosts.py
|
bcoding/docker-host-scripts
|
edfb516266a991abf37b56e5e537ac9e93a6de26
|
[
"Unlicense"
] | null | null | null |
py/update_nginx_vhosts.py
|
bcoding/docker-host-scripts
|
edfb516266a991abf37b56e5e537ac9e93a6de26
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python
import common
import json
import docker_utils
nginx_sites_available = '/etc/nginx/sites-available'
CERT_DIR = '/root/certs'
import subprocess
def create_certificates(domains):
format_args = {'cert_dir': CERT_DIR}
import os.path
if not os.path.isfile(os.path.join(CERT_DIR, 'acmeCA.key.deleteme')):
commands = """openssl rsa -in %(cert_dir)s/acmeCA.key -out %(cert_dir)s/acmeCA.key.deleteme""" % format_args
for command in [cmd for cmd in commands.split("\n") if cmd]:
subprocess.call([arg for arg in command.split(" ") if arg])
for domain in domains:
create_certificate(domain)
def create_certificate(domain):
format_args = {'domain': domain,
'cert_dir': CERT_DIR}
import os.path
if os.path.isfile('%(cert_dir)s/%(domain)s.key' % format_args):
return
commands = """
openssl genrsa -out %(cert_dir)s/%(domain)s.key 2048
openssl req -new -key %(cert_dir)s/%(domain)s.key -out %(cert_dir)s/%(domain)s.csr -subj /C=DE/ST=Niedersachsen/L=Osnabrueck/O=OPS/CN=%(domain)s
openssl x509 -req -in %(cert_dir)s/%(domain)s.csr -CA %(cert_dir)s/acmeCA.pem -CAkey %(cert_dir)s/acmeCA.key.deleteme -CAcreateserial -out %(cert_dir)s/%(domain)s.crt -days 500
rm %(cert_dir)s/%(domain)s.csr
""" % format_args
for command in [cmd for cmd in commands.split("\n") if cmd]:
print command.split(" ")
subprocess.call([arg for arg in command.split(" ") if arg])
# create_certificates([host.domains[0] for host in common.get_vhost_config()])
def update_vhosts_config(applications):
jsonFile = open('/root/config/nginx_vhosts.json', "r")
data = json.load(jsonFile)
jsonFile.close()
for app in applications:
docker_container_config = docker_utils.get_config(app.docker_container_name)
vhost_config = data[app.vhost_name]
vhost_config['port'] = docker_container_config.port if not app.docker_container_port else app.docker_container_port
vhost_config['ip_addr'] = docker_container_config.ip_addr
jsonFile = open('/root/config/nginx_vhosts.json', "w+")
jsonFile.write(json.dumps(data, indent=4, sort_keys=True))
jsonFile.close()
def update_vhosts(vhosts):
for vhost in vhosts:
host = vhost.host
port = vhost.port
ip_addr = vhost.ip_addr
domains = vhost.domains
flags = vhost.flags
location_tmpl = """
location %(path)s {
proxy_pass http://upstream_%(upstream)s%(upstream_path)s;
proxy_http_version 1.1;
%(redirect_rule)s
proxy_next_upstream error timeout invalid_header http_500 http_502 http_503 http_504;
proxy_set_header Host %(host)s;
%(set_script_name)s
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Port $server_port;
%(misc)s
}
"""
location_tmpl_params = {
'redirect_rule': 'proxy_redirect off;' if flags.get('disableRedirect') else ''
}
def render_location(location_dict):
location_dict['host'] = location_dict.get('host', '$host')
location_dict['set_script_name'] = location_dict.get('set_script_name', '')
location_dict['misc'] = location_dict.get('misc', '')
location_dict['upstream_path'] = location_dict.get('upstream_path', '')
params = dict(location_dict.items()+ location_tmpl_params.items())
# print params
return location_tmpl % params
location_parameters = { 'upstream': domains[0], 'path': '/', 'host': flags.get('forceHost', '$host'),
'upstream_path': flags.get('upstream_path', '')}
if 'htpasswd_file' in flags:
location_parameters['misc'] = 'auth_basic "Restricted"; auth_basic_user_file %s;' % (flags['htpasswd_file'])
if 'location_extra' in flags:
location_parameters['misc'] = location_parameters['misc'] if 'misc' in location_parameters else ''
location_parameters['misc'] += flags['location_extra']
location = render_location(location_parameters)
location_ssl = location
upstreams = [{
'local_port': port,
'local_address': ip_addr,
'name': domains[0]
}]
if flags.get('sslToPort'):
upstream_name = "%s_ssl " % domains[0]
location_ssl = render_location({ 'upstream': upstream_name, 'path': '/', 'host': flags.get('forceHost', '$host')})
upstreams.append({
'local_port': flags.get('sslToPort'),
'local_address': ip_addr,
'name': upstream_name
})
if flags.get('httpsToHttpPaths'):
for path in flags.get('httpsToHttpPaths').split(','):
location_ssl += "\n" + render_location({ 'upstream': domains[0], 'path': '/%s' % path, 'host': flags.get('forceHost', '$host') })
other_locations = [{ 'upstream': domains[0], 'path': '@failover', 'host': flags.get('forceHost', '$host')}]
other_locations_https = []
path_idx = 0
for path, path_config in vhost.paths.items():
upstream_name = "%s_%s " % (domains[0], path_idx)
upstreams.append({
'local_port': path_config['port'],
'local_address': vm_map[path_config['host']]['local_address'],
'name': upstream_name
})
if path_config['secure']:
other_locations_https.append({ 'upstream': upstream_name, 'path': '/%s' % path,
'misc': '''
''',
'set_script_name': ('proxy_set_header SCRIPT_NAME /%s;' % path.rstrip('/')) if path_config.get('setScriptName') else '',
'host': flags.get('forceHost', '$host')})
else:
other_locations.append({ 'upstream': upstream_name, 'path': '/%s' % path,
'misc': '''
error_page 500 = @failover;
proxy_intercept_errors on;
''',
'set_script_name': ('proxy_set_header SCRIPT_NAME /%s;' % path.rstrip('/')) if path_config.get('setScriptName') else '',
'host': flags.get('forceHost', '$host')})
path_idx += 1
upstream_tmpl = 'upstream upstream_%(name)s { server %(local_address)s:%(local_port)s; }'
rewrites = ''
extra_directives = ''
if flags.get('block_robots'):
extra_directives += '''
location = /robots.txt {
alias /var/www/robots_deny.txt;
}
'''
if flags.get('allow_robots'):
extra_directives += '''
location = /robots.txt {
alias /var/www/robots_allow.txt;
}
'''
if 'server_config_extra' in flags:
extra_directives += flags['server_config_extra']
if flags.get('aliases'):
aliases = flags.get('aliases').split("\n")
for alias in aliases:
extra_directives += '''
location /%s {
alias %s;
}
''' % tuple(alias.strip().split('->'))
if vhost.rewrites:
rewrites += vhost.rewrites
location_http = location if flags.get('allow_http') else 'return 301 https://$host$request_uri;'
if flags.get('httpPaths'):
for path in flags.get('httpPaths').split(','):
location_http = "\n" + render_location({ 'upstream': domains[0], 'path': '/%s' % path, 'host': flags.get('forceHost', '$host') }) + "\n" + ''' location / { return 301 https://$host$request_uri; }
'''
format_args = {
'upstreams': "\n".join([upstream_tmpl % up for up in upstreams]),
'public_port': port,
'other_locations': "\n".join([render_location(location_dict) for location_dict in other_locations]),
'other_locations_https': "\n".join([render_location(location_dict) for location_dict in other_locations_https]),
'extra_directives': extra_directives,
'domain': domains[0],
'server_names': ' '.join(domains) if not flags.get('rewriteDomains') else domains[0],
'location': location_ssl,
'rewrites': rewrites,
'upload_limit': flags.get('uploadLimit', '20M'),
'location_http': location_http,
'cert_dir': CERT_DIR}
config = """
%(upstreams)s
server {
listen 80;
server_name %(server_names)s;
client_max_body_size %(upload_limit)s;
%(rewrites)s
%(location_http)s
%(other_locations)s
%(extra_directives)s
}
""" % format_args
if not flags.get('noSsl'):
config += """
server {
listen 443 ssl;
server_name %(server_names)s;
client_max_body_size %(upload_limit)s;
ssl on;
ssl_certificate %(cert_dir)s/%(domain)s.cer;
ssl_certificate_key %(cert_dir)s/%(domain)s.key;
ssl_ciphers ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-RC4-SHA:ECDHE-RSA-AES128-SHA:AES128-GCM-SHA256:RC4:HIGH:!MD5:!aNULL:!EDH:!CAMELLIA;
ssl_protocols TLSv1.2 TLSv1.1 TLSv1;
ssl_prefer_server_ciphers on;
%(location)s
%(other_locations_https)s
%(extra_directives)s
}
""" % format_args
if flags.get('rewriteDomains'):
for domain in domains[1:]:
config += """
server {
listen 80;
server_name %(domain1)s;
return 301 http://%(domain2)s$request_uri;
}
""" % {'domain1': domain, 'domain2': domains[0]}
f = open('%s/%s' % (nginx_sites_available, domains[0]), 'w')
f.write(config)
f.close()
'''
proxy_next_upstream error timeout invalid_header http_500 http_502 http_503 http_504;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
'''
update_vhosts_config(common.get_applications_config())
update_vhosts(common.get_vhost_config())
| 38.708185
| 341
| 0.558886
|
import common
import json
import docker_utils
nginx_sites_available = '/etc/nginx/sites-available'
CERT_DIR = '/root/certs'
import subprocess
def create_certificates(domains):
format_args = {'cert_dir': CERT_DIR}
import os.path
if not os.path.isfile(os.path.join(CERT_DIR, 'acmeCA.key.deleteme')):
commands = """openssl rsa -in %(cert_dir)s/acmeCA.key -out %(cert_dir)s/acmeCA.key.deleteme""" % format_args
for command in [cmd for cmd in commands.split("\n") if cmd]:
subprocess.call([arg for arg in command.split(" ") if arg])
for domain in domains:
create_certificate(domain)
def create_certificate(domain):
format_args = {'domain': domain,
'cert_dir': CERT_DIR}
import os.path
if os.path.isfile('%(cert_dir)s/%(domain)s.key' % format_args):
return
commands = """
openssl genrsa -out %(cert_dir)s/%(domain)s.key 2048
openssl req -new -key %(cert_dir)s/%(domain)s.key -out %(cert_dir)s/%(domain)s.csr -subj /C=DE/ST=Niedersachsen/L=Osnabrueck/O=OPS/CN=%(domain)s
openssl x509 -req -in %(cert_dir)s/%(domain)s.csr -CA %(cert_dir)s/acmeCA.pem -CAkey %(cert_dir)s/acmeCA.key.deleteme -CAcreateserial -out %(cert_dir)s/%(domain)s.crt -days 500
rm %(cert_dir)s/%(domain)s.csr
""" % format_args
for command in [cmd for cmd in commands.split("\n") if cmd]:
print command.split(" ")
subprocess.call([arg for arg in command.split(" ") if arg])
def update_vhosts_config(applications):
jsonFile = open('/root/config/nginx_vhosts.json', "r")
data = json.load(jsonFile)
jsonFile.close()
for app in applications:
docker_container_config = docker_utils.get_config(app.docker_container_name)
vhost_config = data[app.vhost_name]
vhost_config['port'] = docker_container_config.port if not app.docker_container_port else app.docker_container_port
vhost_config['ip_addr'] = docker_container_config.ip_addr
jsonFile = open('/root/config/nginx_vhosts.json', "w+")
jsonFile.write(json.dumps(data, indent=4, sort_keys=True))
jsonFile.close()
def update_vhosts(vhosts):
for vhost in vhosts:
host = vhost.host
port = vhost.port
ip_addr = vhost.ip_addr
domains = vhost.domains
flags = vhost.flags
location_tmpl = """
location %(path)s {
proxy_pass http://upstream_%(upstream)s%(upstream_path)s;
proxy_http_version 1.1;
%(redirect_rule)s
proxy_next_upstream error timeout invalid_header http_500 http_502 http_503 http_504;
proxy_set_header Host %(host)s;
%(set_script_name)s
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Port $server_port;
%(misc)s
}
"""
location_tmpl_params = {
'redirect_rule': 'proxy_redirect off;' if flags.get('disableRedirect') else ''
}
def render_location(location_dict):
location_dict['host'] = location_dict.get('host', '$host')
location_dict['set_script_name'] = location_dict.get('set_script_name', '')
location_dict['misc'] = location_dict.get('misc', '')
location_dict['upstream_path'] = location_dict.get('upstream_path', '')
params = dict(location_dict.items()+ location_tmpl_params.items())
return location_tmpl % params
location_parameters = { 'upstream': domains[0], 'path': '/', 'host': flags.get('forceHost', '$host'),
'upstream_path': flags.get('upstream_path', '')}
if 'htpasswd_file' in flags:
location_parameters['misc'] = 'auth_basic "Restricted"; auth_basic_user_file %s;' % (flags['htpasswd_file'])
if 'location_extra' in flags:
location_parameters['misc'] = location_parameters['misc'] if 'misc' in location_parameters else ''
location_parameters['misc'] += flags['location_extra']
location = render_location(location_parameters)
location_ssl = location
upstreams = [{
'local_port': port,
'local_address': ip_addr,
'name': domains[0]
}]
if flags.get('sslToPort'):
upstream_name = "%s_ssl " % domains[0]
location_ssl = render_location({ 'upstream': upstream_name, 'path': '/', 'host': flags.get('forceHost', '$host')})
upstreams.append({
'local_port': flags.get('sslToPort'),
'local_address': ip_addr,
'name': upstream_name
})
if flags.get('httpsToHttpPaths'):
for path in flags.get('httpsToHttpPaths').split(','):
location_ssl += "\n" + render_location({ 'upstream': domains[0], 'path': '/%s' % path, 'host': flags.get('forceHost', '$host') })
other_locations = [{ 'upstream': domains[0], 'path': '@failover', 'host': flags.get('forceHost', '$host')}]
other_locations_https = []
path_idx = 0
for path, path_config in vhost.paths.items():
upstream_name = "%s_%s " % (domains[0], path_idx)
upstreams.append({
'local_port': path_config['port'],
'local_address': vm_map[path_config['host']]['local_address'],
'name': upstream_name
})
if path_config['secure']:
other_locations_https.append({ 'upstream': upstream_name, 'path': '/%s' % path,
'misc': '''
''',
'set_script_name': ('proxy_set_header SCRIPT_NAME /%s;' % path.rstrip('/')) if path_config.get('setScriptName') else '',
'host': flags.get('forceHost', '$host')})
else:
other_locations.append({ 'upstream': upstream_name, 'path': '/%s' % path,
'misc': '''
error_page 500 = @failover;
proxy_intercept_errors on;
''',
'set_script_name': ('proxy_set_header SCRIPT_NAME /%s;' % path.rstrip('/')) if path_config.get('setScriptName') else '',
'host': flags.get('forceHost', '$host')})
path_idx += 1
upstream_tmpl = 'upstream upstream_%(name)s { server %(local_address)s:%(local_port)s; }'
rewrites = ''
extra_directives = ''
if flags.get('block_robots'):
extra_directives += '''
location = /robots.txt {
alias /var/www/robots_deny.txt;
}
'''
if flags.get('allow_robots'):
extra_directives += '''
location = /robots.txt {
alias /var/www/robots_allow.txt;
}
'''
if 'server_config_extra' in flags:
extra_directives += flags['server_config_extra']
if flags.get('aliases'):
aliases = flags.get('aliases').split("\n")
for alias in aliases:
extra_directives += '''
location /%s {
alias %s;
}
''' % tuple(alias.strip().split('->'))
if vhost.rewrites:
rewrites += vhost.rewrites
location_http = location if flags.get('allow_http') else 'return 301 https://$host$request_uri;'
if flags.get('httpPaths'):
for path in flags.get('httpPaths').split(','):
location_http = "\n" + render_location({ 'upstream': domains[0], 'path': '/%s' % path, 'host': flags.get('forceHost', '$host') }) + "\n" + ''' location / { return 301 https://$host$request_uri; }
'''
format_args = {
'upstreams': "\n".join([upstream_tmpl % up for up in upstreams]),
'public_port': port,
'other_locations': "\n".join([render_location(location_dict) for location_dict in other_locations]),
'other_locations_https': "\n".join([render_location(location_dict) for location_dict in other_locations_https]),
'extra_directives': extra_directives,
'domain': domains[0],
'server_names': ' '.join(domains) if not flags.get('rewriteDomains') else domains[0],
'location': location_ssl,
'rewrites': rewrites,
'upload_limit': flags.get('uploadLimit', '20M'),
'location_http': location_http,
'cert_dir': CERT_DIR}
config = """
%(upstreams)s
server {
listen 80;
server_name %(server_names)s;
client_max_body_size %(upload_limit)s;
%(rewrites)s
%(location_http)s
%(other_locations)s
%(extra_directives)s
}
""" % format_args
if not flags.get('noSsl'):
config += """
server {
listen 443 ssl;
server_name %(server_names)s;
client_max_body_size %(upload_limit)s;
ssl on;
ssl_certificate %(cert_dir)s/%(domain)s.cer;
ssl_certificate_key %(cert_dir)s/%(domain)s.key;
ssl_ciphers ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-RC4-SHA:ECDHE-RSA-AES128-SHA:AES128-GCM-SHA256:RC4:HIGH:!MD5:!aNULL:!EDH:!CAMELLIA;
ssl_protocols TLSv1.2 TLSv1.1 TLSv1;
ssl_prefer_server_ciphers on;
%(location)s
%(other_locations_https)s
%(extra_directives)s
}
""" % format_args
if flags.get('rewriteDomains'):
for domain in domains[1:]:
config += """
server {
listen 80;
server_name %(domain1)s;
return 301 http://%(domain2)s$request_uri;
}
""" % {'domain1': domain, 'domain2': domains[0]}
f = open('%s/%s' % (nginx_sites_available, domains[0]), 'w')
f.write(config)
f.close()
'''
proxy_next_upstream error timeout invalid_header http_500 http_502 http_503 http_504;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
'''
update_vhosts_config(common.get_applications_config())
update_vhosts(common.get_vhost_config())
| false
| true
|
790d281d9a6890459a8ef59f90819d90a838ec71
| 773
|
py
|
Python
|
do-word-vector-model.py
|
mathieu-lacage/sophiaconf2018
|
f4aa1f8fd6a0ba463a03335d9525e9194d94b0e3
|
[
"MIT"
] | 1
|
2018-07-11T22:01:21.000Z
|
2018-07-11T22:01:21.000Z
|
do-word-vector-model.py
|
mathieu-lacage/sophiaconf2018
|
f4aa1f8fd6a0ba463a03335d9525e9194d94b0e3
|
[
"MIT"
] | null | null | null |
do-word-vector-model.py
|
mathieu-lacage/sophiaconf2018
|
f4aa1f8fd6a0ba463a03335d9525e9194d94b0e3
|
[
"MIT"
] | null | null | null |
import optparse
import Utils
import gensim
def main():
parser = optparse.OptionParser()
parser.add_option('-d', '--dataset', default='sample')
parser.add_option('--size', default=300, type='int', help='vectors dimension. Default: %default')
parser.add_option('--window', default=5, type='int', help='window size. Default: %default')
parser.add_option('--min_count', default=5, type='int', help='Min count. Default: %default')
options, args = parser.parse_args()
documents = list(Utils.read_json('%s-tokenized.json' % options.dataset))
model = gensim.models.word2vec.Word2Vec(documents, size=options.size, window=options.window, min_count=options.min_count, workers=4)
model.save('%s-word-vector-model' % options.dataset)
main()
| 38.65
| 136
| 0.702458
|
import optparse
import Utils
import gensim
def main():
parser = optparse.OptionParser()
parser.add_option('-d', '--dataset', default='sample')
parser.add_option('--size', default=300, type='int', help='vectors dimension. Default: %default')
parser.add_option('--window', default=5, type='int', help='window size. Default: %default')
parser.add_option('--min_count', default=5, type='int', help='Min count. Default: %default')
options, args = parser.parse_args()
documents = list(Utils.read_json('%s-tokenized.json' % options.dataset))
model = gensim.models.word2vec.Word2Vec(documents, size=options.size, window=options.window, min_count=options.min_count, workers=4)
model.save('%s-word-vector-model' % options.dataset)
main()
| true
| true
|
790d284d75eff85f735c2212c566822f71310c99
| 1,981
|
py
|
Python
|
ooobuild/lo/form/x_positioning_listener.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/lo/form/x_positioning_listener.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/lo/form/x_positioning_listener.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Interface Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.form
import typing
from abc import abstractmethod
from ..lang.x_event_listener import XEventListener as XEventListener_c7230c4a
if typing.TYPE_CHECKING:
from ..lang.event_object import EventObject as EventObject_a3d70b03
class XPositioningListener(XEventListener_c7230c4a):
"""
allows to receive notifications about cursor movements into a database form.
Please do not use anymore, this interface is deprecated, and superseded by functionality from the com.sun.star.form.component.DataForm service, as well as the com.sun.star.sdbc.XRowSetListener.
.. deprecated::
Class is deprecated.
See Also:
`API XPositioningListener <https://api.libreoffice.org/docs/idl/ref/interfacecom_1_1sun_1_1star_1_1form_1_1XPositioningListener.html>`_
"""
__ooo_ns__: str = 'com.sun.star.form'
__ooo_full_ns__: str = 'com.sun.star.form.XPositioningListener'
__ooo_type_name__: str = 'interface'
__pyunointerface__: str = 'com.sun.star.form.XPositioningListener'
@abstractmethod
def positioned(self, aEvent: 'EventObject_a3d70b03') -> None:
"""
is invoked when the database form has been positioned on a data record.
"""
__all__ = ['XPositioningListener']
| 37.377358
| 197
| 0.746593
|
import typing
from abc import abstractmethod
from ..lang.x_event_listener import XEventListener as XEventListener_c7230c4a
if typing.TYPE_CHECKING:
from ..lang.event_object import EventObject as EventObject_a3d70b03
class XPositioningListener(XEventListener_c7230c4a):
__ooo_ns__: str = 'com.sun.star.form'
__ooo_full_ns__: str = 'com.sun.star.form.XPositioningListener'
__ooo_type_name__: str = 'interface'
__pyunointerface__: str = 'com.sun.star.form.XPositioningListener'
@abstractmethod
def positioned(self, aEvent: 'EventObject_a3d70b03') -> None:
__all__ = ['XPositioningListener']
| true
| true
|
790d28571a496b84aac1533406d3da1d53904569
| 6,878
|
py
|
Python
|
docs/source/conf.py
|
cselab/CubismNova
|
cbd6876ae9b5864f82f3470b564132c92e0f2e00
|
[
"BSD-2-Clause"
] | 9
|
2020-01-27T01:17:19.000Z
|
2022-02-26T12:20:17.000Z
|
docs/source/conf.py
|
cselab/CubismNova
|
cbd6876ae9b5864f82f3470b564132c92e0f2e00
|
[
"BSD-2-Clause"
] | null | null | null |
docs/source/conf.py
|
cselab/CubismNova
|
cbd6876ae9b5864f82f3470b564132c92e0f2e00
|
[
"BSD-2-Clause"
] | 1
|
2021-04-01T07:48:39.000Z
|
2021-04-01T07:48:39.000Z
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import re
import sphinx_rtd_theme
import subprocess as sp
# -- Project information -----------------------------------------------------
project = 'CubismNova'
copyright = 'ETH Zurich'
author = 'Fabian Wermelinger'
sp.run('(cd .. && doxygen)', shell=True) # compile the xml source
v = str(sp.check_output('git describe --abbrev=0', shell=True)) # get version
# The short X.Y version
version = '.'.join(v.split('.')[:2])
# The full version, including alpha/beta/rc tags
release = v
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx_rtd_theme',
'sphinxcontrib.bibtex',
'breathe',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# breathe extension
breathe_default_project = "CubismNova"
breathe_projects = {
"CubismNova": "../doxygen/xml"
}
breathe_domain_by_extension = { "h" : "cpp", "cu" : "cpp" }
cpp_id_attributes = ['__device__', '__global__', '__host__']
cpp_paren_attributes = ['__align__']
# Tell sphinx what the primary language being documented is
primary_domain = 'cpp'
# Tell sphinx what the pygments highlight language should be
highlight_language = 'cpp'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_title = "CubismNova Documentation"
# If false, no module index is generated.
html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'CubismNovadoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'CubismNova.tex', 'CubismNova Documentation',
'Fabian Wermelinger', 'manual'),
]
# BibTeX files
bibtex_bibfiles = ['bibtex/references.bib']
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'cubismnova', 'CubismNova Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'CubismNova', 'CubismNova Documentation',
author, 'CubismNova', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
| 30.034934
| 79
| 0.664728
|
import re
import sphinx_rtd_theme
import subprocess as sp
project = 'CubismNova'
copyright = 'ETH Zurich'
author = 'Fabian Wermelinger'
sp.run('(cd .. && doxygen)', shell=True)
v = str(sp.check_output('git describe --abbrev=0', shell=True))
version = '.'.join(v.split('.')[:2])
release = v
extensions = [
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx_rtd_theme',
'sphinxcontrib.bibtex',
'breathe',
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
language = None
exclude_patterns = []
pygments_style = 'sphinx'
breathe_default_project = "CubismNova"
breathe_projects = {
"CubismNova": "../doxygen/xml"
}
breathe_domain_by_extension = { "h" : "cpp", "cu" : "cpp" }
cpp_id_attributes = ['__device__', '__global__', '__host__']
cpp_paren_attributes = ['__align__']
primary_domain = 'cpp'
highlight_language = 'cpp'
html_static_path = ['_static']
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_title = "CubismNova Documentation"
html_domain_indices = True
html_use_index = True
html_split_index = False
html_show_sourcelink = False
html_show_sphinx = False
html_show_copyright = True
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'CubismNovadoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'CubismNova.tex', 'CubismNova Documentation',
'Fabian Wermelinger', 'manual'),
]
# BibTeX files
bibtex_bibfiles = ['bibtex/references.bib']
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'cubismnova', 'CubismNova Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'CubismNova', 'CubismNova Documentation',
author, 'CubismNova', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
| true
| true
|
790d294ef329695d1bf4f217ad39e15c34ee8dc8
| 3,406
|
py
|
Python
|
win32com/test/testStorage.py
|
zhanqxun/cv_fish
|
f78f4f5bdafb070c179efee8b9276719dfaef1d7
|
[
"Apache-2.0"
] | 3
|
2016-11-24T03:57:22.000Z
|
2019-02-27T15:19:50.000Z
|
Lib/site-packages/win32com/test/testStorage.py
|
adzhou/Python27
|
a7113b69d54a04cc780143241c2f1fe81939ad3a
|
[
"bzip2-1.0.6"
] | 67
|
2016-10-19T01:23:47.000Z
|
2016-12-14T04:30:38.000Z
|
Lib/site-packages/win32com/test/testStorage.py
|
adzhou/Python27
|
a7113b69d54a04cc780143241c2f1fe81939ad3a
|
[
"bzip2-1.0.6"
] | 4
|
2021-02-11T03:51:39.000Z
|
2021-02-12T05:10:43.000Z
|
from win32com import storagecon
import pythoncom, os, win32api
import win32com.test.util
import unittest
class TestEnum(win32com.test.util.TestCase):
def testit(self):
fname, tmp = win32api.GetTempFileName(win32api.GetTempPath(),'stg')
m=storagecon.STGM_READWRITE | storagecon.STGM_SHARE_EXCLUSIVE
## file, mode, format, attrs (always 0), IID (IStorage or IPropertySetStorage, storage options(only used with STGFMT_DOCFILE)
pss=pythoncom.StgOpenStorageEx(fname, m, storagecon.STGFMT_FILE, 0 , pythoncom.IID_IPropertySetStorage)
### {"Version":2,"reserved":0,"SectorSize":512,"TemplateFile":u'somefilename'})
## FMTID_SummaryInformation FMTID_DocSummaryInformation FMTID_UserDefinedProperties
psuser=pss.Create(pythoncom.FMTID_UserDefinedProperties,
pythoncom.IID_IPropertySetStorage,
storagecon.PROPSETFLAG_DEFAULT,
storagecon.STGM_READWRITE|storagecon.STGM_CREATE|storagecon.STGM_SHARE_EXCLUSIVE) ## its very picky about flag combinations!
psuser.WriteMultiple((3,4),('hey','bubba'))
psuser.WritePropertyNames((3,4),('property3','property4'))
expected_summaries = []
expected_summaries.append( ('property3', 3, pythoncom.VT_BSTR))
expected_summaries.append( ('property4', 4, pythoncom.VT_BSTR))
psuser=None
pssum=pss.Create(pythoncom.FMTID_SummaryInformation,
pythoncom.IID_IPropertySetStorage,
storagecon.PROPSETFLAG_DEFAULT,
storagecon.STGM_READWRITE|storagecon.STGM_CREATE|storagecon.STGM_SHARE_EXCLUSIVE)
pssum.WriteMultiple((storagecon.PIDSI_AUTHOR,storagecon.PIDSI_COMMENTS),('me', 'comment'))
pssum=None
pss=None ## doesn't seem to be a close or release method, and you can't even reopen it from the same process until previous object is gone
pssread=pythoncom.StgOpenStorageEx(fname, storagecon.STGM_READ|storagecon.STGM_SHARE_EXCLUSIVE, storagecon.STGFMT_FILE, 0 , pythoncom.IID_IPropertySetStorage)
found_summaries = []
for psstat in pssread:
ps=pssread.Open(psstat[0],storagecon.STGM_READ|storagecon.STGM_SHARE_EXCLUSIVE)
for p in ps:
p_val = ps.ReadMultiple((p[1],))[0]
if (p[1]==storagecon.PIDSI_AUTHOR and p_val=='me') or \
(p[1]==storagecon.PIDSI_COMMENTS and p_val=='comment'):
pass
else:
self.fail("Uxexpected property %s/%s" % (p, p_val))
ps=None
## FMTID_UserDefinedProperties can't exist without FMTID_DocSummaryInformation, and isn't returned independently from Enum
## also can't be open at same time
if psstat[0]==pythoncom.FMTID_DocSummaryInformation:
ps=pssread.Open(pythoncom.FMTID_UserDefinedProperties,storagecon.STGM_READ|storagecon.STGM_SHARE_EXCLUSIVE)
for p in ps:
found_summaries.append(p)
ps=None
psread=None
expected_summaries.sort()
found_summaries.sort()
self.assertEqual(expected_summaries, found_summaries)
if __name__=='__main__':
unittest.main()
| 54.935484
| 167
| 0.644745
|
from win32com import storagecon
import pythoncom, os, win32api
import win32com.test.util
import unittest
class TestEnum(win32com.test.util.TestCase):
def testit(self):
fname, tmp = win32api.GetTempFileName(win32api.GetTempPath(),'stg')
m=storagecon.STGM_READWRITE | storagecon.STGM_SHARE_EXCLUSIVE
bubba'))
psuser.WritePropertyNames((3,4),('property3','property4'))
expected_summaries = []
expected_summaries.append( ('property3', 3, pythoncom.VT_BSTR))
expected_summaries.append( ('property4', 4, pythoncom.VT_BSTR))
psuser=None
pssum=pss.Create(pythoncom.FMTID_SummaryInformation,
pythoncom.IID_IPropertySetStorage,
storagecon.PROPSETFLAG_DEFAULT,
storagecon.STGM_READWRITE|storagecon.STGM_CREATE|storagecon.STGM_SHARE_EXCLUSIVE)
pssum.WriteMultiple((storagecon.PIDSI_AUTHOR,storagecon.PIDSI_COMMENTS),('me', 'comment'))
pssum=None
pss=None MT_FILE, 0 , pythoncom.IID_IPropertySetStorage)
found_summaries = []
for psstat in pssread:
ps=pssread.Open(psstat[0],storagecon.STGM_READ|storagecon.STGM_SHARE_EXCLUSIVE)
for p in ps:
p_val = ps.ReadMultiple((p[1],))[0]
if (p[1]==storagecon.PIDSI_AUTHOR and p_val=='me') or \
(p[1]==storagecon.PIDSI_COMMENTS and p_val=='comment'):
pass
else:
self.fail("Uxexpected property %s/%s" % (p, p_val))
ps=None
econ.STGM_READ|storagecon.STGM_SHARE_EXCLUSIVE)
for p in ps:
found_summaries.append(p)
ps=None
psread=None
expected_summaries.sort()
found_summaries.sort()
self.assertEqual(expected_summaries, found_summaries)
if __name__=='__main__':
unittest.main()
| true
| true
|
790d297231d8844da46107086b521371549c91af
| 531
|
py
|
Python
|
ccal/make_reflecting_grid.py
|
alex-wenzel/ccal
|
74dfc604d93e6ce9e12f34a828b601618df51faa
|
[
"MIT"
] | null | null | null |
ccal/make_reflecting_grid.py
|
alex-wenzel/ccal
|
74dfc604d93e6ce9e12f34a828b601618df51faa
|
[
"MIT"
] | null | null | null |
ccal/make_reflecting_grid.py
|
alex-wenzel/ccal
|
74dfc604d93e6ce9e12f34a828b601618df51faa
|
[
"MIT"
] | null | null | null |
from .check_nd_array_for_bad import check_nd_array_for_bad
def make_reflecting_grid(grid, reflecting_grid_value, raise_for_bad=True):
check_nd_array_for_bad(grid, raise_for_bad=raise_for_bad)
reflecting_grid = grid.copy()
for i, grid_value in enumerate(reflecting_grid):
if grid_value < reflecting_grid_value:
reflecting_grid[i] += (reflecting_grid_value - grid_value) * 2
else:
reflecting_grid[i] -= (grid_value - reflecting_grid_value) * 2
return reflecting_grid
| 25.285714
| 74
| 0.730697
|
from .check_nd_array_for_bad import check_nd_array_for_bad
def make_reflecting_grid(grid, reflecting_grid_value, raise_for_bad=True):
check_nd_array_for_bad(grid, raise_for_bad=raise_for_bad)
reflecting_grid = grid.copy()
for i, grid_value in enumerate(reflecting_grid):
if grid_value < reflecting_grid_value:
reflecting_grid[i] += (reflecting_grid_value - grid_value) * 2
else:
reflecting_grid[i] -= (grid_value - reflecting_grid_value) * 2
return reflecting_grid
| true
| true
|
790d2988fdeebf0e2f1b10f4c63a81871e7ee883
| 17,901
|
py
|
Python
|
sdk/python/pulumi_alicloud/bastionhost/host_group_account_user_group_attachment.py
|
pulumi/pulumi-alicloud
|
9c34d84b4588a7c885c6bec1f03b5016e5a41683
|
[
"ECL-2.0",
"Apache-2.0"
] | 42
|
2019-03-18T06:34:37.000Z
|
2022-03-24T07:08:57.000Z
|
sdk/python/pulumi_alicloud/bastionhost/host_group_account_user_group_attachment.py
|
pulumi/pulumi-alicloud
|
9c34d84b4588a7c885c6bec1f03b5016e5a41683
|
[
"ECL-2.0",
"Apache-2.0"
] | 152
|
2019-04-15T21:03:44.000Z
|
2022-03-29T18:00:57.000Z
|
sdk/python/pulumi_alicloud/bastionhost/host_group_account_user_group_attachment.py
|
pulumi/pulumi-alicloud
|
9c34d84b4588a7c885c6bec1f03b5016e5a41683
|
[
"ECL-2.0",
"Apache-2.0"
] | 3
|
2020-08-26T17:30:07.000Z
|
2021-07-05T01:37:45.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['HostGroupAccountUserGroupAttachmentArgs', 'HostGroupAccountUserGroupAttachment']
@pulumi.input_type
class HostGroupAccountUserGroupAttachmentArgs:
def __init__(__self__, *,
host_account_names: pulumi.Input[Sequence[pulumi.Input[str]]],
host_group_id: pulumi.Input[str],
instance_id: pulumi.Input[str],
user_group_id: pulumi.Input[str]):
"""
The set of arguments for constructing a HostGroupAccountUserGroupAttachment resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] host_account_names: A list names of the host account.
:param pulumi.Input[str] host_group_id: The ID of the host group.
:param pulumi.Input[str] instance_id: The ID of the Bastionhost instance where you want to authorize the user to manage the specified hosts and host accounts.
:param pulumi.Input[str] user_group_id: The ID of the user group that you want to authorize to manage the specified hosts and host accounts.
"""
pulumi.set(__self__, "host_account_names", host_account_names)
pulumi.set(__self__, "host_group_id", host_group_id)
pulumi.set(__self__, "instance_id", instance_id)
pulumi.set(__self__, "user_group_id", user_group_id)
@property
@pulumi.getter(name="hostAccountNames")
def host_account_names(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
A list names of the host account.
"""
return pulumi.get(self, "host_account_names")
@host_account_names.setter
def host_account_names(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "host_account_names", value)
@property
@pulumi.getter(name="hostGroupId")
def host_group_id(self) -> pulumi.Input[str]:
"""
The ID of the host group.
"""
return pulumi.get(self, "host_group_id")
@host_group_id.setter
def host_group_id(self, value: pulumi.Input[str]):
pulumi.set(self, "host_group_id", value)
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> pulumi.Input[str]:
"""
The ID of the Bastionhost instance where you want to authorize the user to manage the specified hosts and host accounts.
"""
return pulumi.get(self, "instance_id")
@instance_id.setter
def instance_id(self, value: pulumi.Input[str]):
pulumi.set(self, "instance_id", value)
@property
@pulumi.getter(name="userGroupId")
def user_group_id(self) -> pulumi.Input[str]:
"""
The ID of the user group that you want to authorize to manage the specified hosts and host accounts.
"""
return pulumi.get(self, "user_group_id")
@user_group_id.setter
def user_group_id(self, value: pulumi.Input[str]):
pulumi.set(self, "user_group_id", value)
@pulumi.input_type
class _HostGroupAccountUserGroupAttachmentState:
def __init__(__self__, *,
host_account_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
host_group_id: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
user_group_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering HostGroupAccountUserGroupAttachment resources.
:param pulumi.Input[Sequence[pulumi.Input[str]]] host_account_names: A list names of the host account.
:param pulumi.Input[str] host_group_id: The ID of the host group.
:param pulumi.Input[str] instance_id: The ID of the Bastionhost instance where you want to authorize the user to manage the specified hosts and host accounts.
:param pulumi.Input[str] user_group_id: The ID of the user group that you want to authorize to manage the specified hosts and host accounts.
"""
if host_account_names is not None:
pulumi.set(__self__, "host_account_names", host_account_names)
if host_group_id is not None:
pulumi.set(__self__, "host_group_id", host_group_id)
if instance_id is not None:
pulumi.set(__self__, "instance_id", instance_id)
if user_group_id is not None:
pulumi.set(__self__, "user_group_id", user_group_id)
@property
@pulumi.getter(name="hostAccountNames")
def host_account_names(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list names of the host account.
"""
return pulumi.get(self, "host_account_names")
@host_account_names.setter
def host_account_names(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "host_account_names", value)
@property
@pulumi.getter(name="hostGroupId")
def host_group_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the host group.
"""
return pulumi.get(self, "host_group_id")
@host_group_id.setter
def host_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host_group_id", value)
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Bastionhost instance where you want to authorize the user to manage the specified hosts and host accounts.
"""
return pulumi.get(self, "instance_id")
@instance_id.setter
def instance_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instance_id", value)
@property
@pulumi.getter(name="userGroupId")
def user_group_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the user group that you want to authorize to manage the specified hosts and host accounts.
"""
return pulumi.get(self, "user_group_id")
@user_group_id.setter
def user_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_group_id", value)
class HostGroupAccountUserGroupAttachment(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
host_account_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
host_group_id: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
user_group_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a Bastion Host Host Account Attachment resource to add list host accounts into one user group and one host group.
> **NOTE:** Available in v1.135.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
default_host = alicloud.bastionhost.Host("defaultHost",
instance_id="bastionhost-cn-tl3xxxxxxx",
host_name=var["name"],
active_address_type="Private",
host_private_address="172.16.0.10",
os_type="Linux",
source="Local")
default_host_account = []
for range in [{"value": i} for i in range(0, 3)]:
default_host_account.append(alicloud.bastionhost.HostAccount(f"defaultHostAccount-{range['value']}",
instance_id=default_host.instance_id,
host_account_name=f"example_value-{range['value']}",
host_id=default_host.host_id,
protocol_name="SSH",
password="YourPassword12345"))
default_user_group = alicloud.bastionhost.UserGroup("defaultUserGroup",
instance_id=default_host.instance_id,
user_group_name="my-local-user")
default_host_group = alicloud.bastionhost.HostGroup("defaultHostGroup",
host_group_name="example_value",
instance_id="bastionhost-cn-tl3xxxxxxx")
default_host_group_account_user_group_attachment = alicloud.bastionhost.HostGroupAccountUserGroupAttachment("defaultHostGroupAccountUserGroupAttachment",
instance_id=default_host.instance_id,
user_group_id=default_user_group.user_group_id,
host_group_id=default_host_group.host_group_id,
host_account_names=[__item.host_account_name for __item in default_host_account])
```
## Import
Bastion Host Host Account can be imported using the id, e.g.
```sh
$ pulumi import alicloud:bastionhost/hostGroupAccountUserGroupAttachment:HostGroupAccountUserGroupAttachment example <instance_id>:<user_group_id>:<host_group_id>
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] host_account_names: A list names of the host account.
:param pulumi.Input[str] host_group_id: The ID of the host group.
:param pulumi.Input[str] instance_id: The ID of the Bastionhost instance where you want to authorize the user to manage the specified hosts and host accounts.
:param pulumi.Input[str] user_group_id: The ID of the user group that you want to authorize to manage the specified hosts and host accounts.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: HostGroupAccountUserGroupAttachmentArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Bastion Host Host Account Attachment resource to add list host accounts into one user group and one host group.
> **NOTE:** Available in v1.135.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
default_host = alicloud.bastionhost.Host("defaultHost",
instance_id="bastionhost-cn-tl3xxxxxxx",
host_name=var["name"],
active_address_type="Private",
host_private_address="172.16.0.10",
os_type="Linux",
source="Local")
default_host_account = []
for range in [{"value": i} for i in range(0, 3)]:
default_host_account.append(alicloud.bastionhost.HostAccount(f"defaultHostAccount-{range['value']}",
instance_id=default_host.instance_id,
host_account_name=f"example_value-{range['value']}",
host_id=default_host.host_id,
protocol_name="SSH",
password="YourPassword12345"))
default_user_group = alicloud.bastionhost.UserGroup("defaultUserGroup",
instance_id=default_host.instance_id,
user_group_name="my-local-user")
default_host_group = alicloud.bastionhost.HostGroup("defaultHostGroup",
host_group_name="example_value",
instance_id="bastionhost-cn-tl3xxxxxxx")
default_host_group_account_user_group_attachment = alicloud.bastionhost.HostGroupAccountUserGroupAttachment("defaultHostGroupAccountUserGroupAttachment",
instance_id=default_host.instance_id,
user_group_id=default_user_group.user_group_id,
host_group_id=default_host_group.host_group_id,
host_account_names=[__item.host_account_name for __item in default_host_account])
```
## Import
Bastion Host Host Account can be imported using the id, e.g.
```sh
$ pulumi import alicloud:bastionhost/hostGroupAccountUserGroupAttachment:HostGroupAccountUserGroupAttachment example <instance_id>:<user_group_id>:<host_group_id>
```
:param str resource_name: The name of the resource.
:param HostGroupAccountUserGroupAttachmentArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(HostGroupAccountUserGroupAttachmentArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
host_account_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
host_group_id: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
user_group_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = HostGroupAccountUserGroupAttachmentArgs.__new__(HostGroupAccountUserGroupAttachmentArgs)
if host_account_names is None and not opts.urn:
raise TypeError("Missing required property 'host_account_names'")
__props__.__dict__["host_account_names"] = host_account_names
if host_group_id is None and not opts.urn:
raise TypeError("Missing required property 'host_group_id'")
__props__.__dict__["host_group_id"] = host_group_id
if instance_id is None and not opts.urn:
raise TypeError("Missing required property 'instance_id'")
__props__.__dict__["instance_id"] = instance_id
if user_group_id is None and not opts.urn:
raise TypeError("Missing required property 'user_group_id'")
__props__.__dict__["user_group_id"] = user_group_id
super(HostGroupAccountUserGroupAttachment, __self__).__init__(
'alicloud:bastionhost/hostGroupAccountUserGroupAttachment:HostGroupAccountUserGroupAttachment',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
host_account_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
host_group_id: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
user_group_id: Optional[pulumi.Input[str]] = None) -> 'HostGroupAccountUserGroupAttachment':
"""
Get an existing HostGroupAccountUserGroupAttachment resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] host_account_names: A list names of the host account.
:param pulumi.Input[str] host_group_id: The ID of the host group.
:param pulumi.Input[str] instance_id: The ID of the Bastionhost instance where you want to authorize the user to manage the specified hosts and host accounts.
:param pulumi.Input[str] user_group_id: The ID of the user group that you want to authorize to manage the specified hosts and host accounts.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _HostGroupAccountUserGroupAttachmentState.__new__(_HostGroupAccountUserGroupAttachmentState)
__props__.__dict__["host_account_names"] = host_account_names
__props__.__dict__["host_group_id"] = host_group_id
__props__.__dict__["instance_id"] = instance_id
__props__.__dict__["user_group_id"] = user_group_id
return HostGroupAccountUserGroupAttachment(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="hostAccountNames")
def host_account_names(self) -> pulumi.Output[Sequence[str]]:
"""
A list names of the host account.
"""
return pulumi.get(self, "host_account_names")
@property
@pulumi.getter(name="hostGroupId")
def host_group_id(self) -> pulumi.Output[str]:
"""
The ID of the host group.
"""
return pulumi.get(self, "host_group_id")
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> pulumi.Output[str]:
"""
The ID of the Bastionhost instance where you want to authorize the user to manage the specified hosts and host accounts.
"""
return pulumi.get(self, "instance_id")
@property
@pulumi.getter(name="userGroupId")
def user_group_id(self) -> pulumi.Output[str]:
"""
The ID of the user group that you want to authorize to manage the specified hosts and host accounts.
"""
return pulumi.get(self, "user_group_id")
| 46.496104
| 171
| 0.669572
|
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['HostGroupAccountUserGroupAttachmentArgs', 'HostGroupAccountUserGroupAttachment']
@pulumi.input_type
class HostGroupAccountUserGroupAttachmentArgs:
def __init__(__self__, *,
host_account_names: pulumi.Input[Sequence[pulumi.Input[str]]],
host_group_id: pulumi.Input[str],
instance_id: pulumi.Input[str],
user_group_id: pulumi.Input[str]):
pulumi.set(__self__, "host_account_names", host_account_names)
pulumi.set(__self__, "host_group_id", host_group_id)
pulumi.set(__self__, "instance_id", instance_id)
pulumi.set(__self__, "user_group_id", user_group_id)
@property
@pulumi.getter(name="hostAccountNames")
def host_account_names(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
return pulumi.get(self, "host_account_names")
@host_account_names.setter
def host_account_names(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "host_account_names", value)
@property
@pulumi.getter(name="hostGroupId")
def host_group_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "host_group_id")
@host_group_id.setter
def host_group_id(self, value: pulumi.Input[str]):
pulumi.set(self, "host_group_id", value)
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "instance_id")
@instance_id.setter
def instance_id(self, value: pulumi.Input[str]):
pulumi.set(self, "instance_id", value)
@property
@pulumi.getter(name="userGroupId")
def user_group_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "user_group_id")
@user_group_id.setter
def user_group_id(self, value: pulumi.Input[str]):
pulumi.set(self, "user_group_id", value)
@pulumi.input_type
class _HostGroupAccountUserGroupAttachmentState:
def __init__(__self__, *,
host_account_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
host_group_id: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
user_group_id: Optional[pulumi.Input[str]] = None):
if host_account_names is not None:
pulumi.set(__self__, "host_account_names", host_account_names)
if host_group_id is not None:
pulumi.set(__self__, "host_group_id", host_group_id)
if instance_id is not None:
pulumi.set(__self__, "instance_id", instance_id)
if user_group_id is not None:
pulumi.set(__self__, "user_group_id", user_group_id)
@property
@pulumi.getter(name="hostAccountNames")
def host_account_names(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "host_account_names")
@host_account_names.setter
def host_account_names(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "host_account_names", value)
@property
@pulumi.getter(name="hostGroupId")
def host_group_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "host_group_id")
@host_group_id.setter
def host_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host_group_id", value)
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "instance_id")
@instance_id.setter
def instance_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instance_id", value)
@property
@pulumi.getter(name="userGroupId")
def user_group_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "user_group_id")
@user_group_id.setter
def user_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_group_id", value)
class HostGroupAccountUserGroupAttachment(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
host_account_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
host_group_id: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
user_group_id: Optional[pulumi.Input[str]] = None,
__props__=None):
...
@overload
def __init__(__self__,
resource_name: str,
args: HostGroupAccountUserGroupAttachmentArgs,
opts: Optional[pulumi.ResourceOptions] = None):
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(HostGroupAccountUserGroupAttachmentArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
host_account_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
host_group_id: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
user_group_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = HostGroupAccountUserGroupAttachmentArgs.__new__(HostGroupAccountUserGroupAttachmentArgs)
if host_account_names is None and not opts.urn:
raise TypeError("Missing required property 'host_account_names'")
__props__.__dict__["host_account_names"] = host_account_names
if host_group_id is None and not opts.urn:
raise TypeError("Missing required property 'host_group_id'")
__props__.__dict__["host_group_id"] = host_group_id
if instance_id is None and not opts.urn:
raise TypeError("Missing required property 'instance_id'")
__props__.__dict__["instance_id"] = instance_id
if user_group_id is None and not opts.urn:
raise TypeError("Missing required property 'user_group_id'")
__props__.__dict__["user_group_id"] = user_group_id
super(HostGroupAccountUserGroupAttachment, __self__).__init__(
'alicloud:bastionhost/hostGroupAccountUserGroupAttachment:HostGroupAccountUserGroupAttachment',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
host_account_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
host_group_id: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
user_group_id: Optional[pulumi.Input[str]] = None) -> 'HostGroupAccountUserGroupAttachment':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _HostGroupAccountUserGroupAttachmentState.__new__(_HostGroupAccountUserGroupAttachmentState)
__props__.__dict__["host_account_names"] = host_account_names
__props__.__dict__["host_group_id"] = host_group_id
__props__.__dict__["instance_id"] = instance_id
__props__.__dict__["user_group_id"] = user_group_id
return HostGroupAccountUserGroupAttachment(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="hostAccountNames")
def host_account_names(self) -> pulumi.Output[Sequence[str]]:
return pulumi.get(self, "host_account_names")
@property
@pulumi.getter(name="hostGroupId")
def host_group_id(self) -> pulumi.Output[str]:
return pulumi.get(self, "host_group_id")
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> pulumi.Output[str]:
return pulumi.get(self, "instance_id")
@property
@pulumi.getter(name="userGroupId")
def user_group_id(self) -> pulumi.Output[str]:
return pulumi.get(self, "user_group_id")
| true
| true
|
790d29c307d811537cc3f4315ef145e0729785ce
| 1,380
|
py
|
Python
|
buildtools/test_examples.py
|
loicgasser/ngeo
|
03a9376d281f9b4bff8a4b572ad73ef5a8df41f3
|
[
"MIT"
] | 17
|
2015-01-14T08:40:22.000Z
|
2021-05-08T04:39:50.000Z
|
buildtools/test_examples.py
|
haoyunZhou/ngeo
|
340ca60786470f10cf2d9e5c69c203af1589040c
|
[
"MIT"
] | 1,477
|
2015-01-05T09:58:41.000Z
|
2022-03-18T11:07:09.000Z
|
buildtools/test_examples.py
|
haoyunZhou/ngeo
|
340ca60786470f10cf2d9e5c69c203af1589040c
|
[
"MIT"
] | 14
|
2015-07-24T07:33:13.000Z
|
2021-03-02T13:51:48.000Z
|
#!/usr/bin/python
import re
import sys
import glob
import subprocess
BLACKLIST = [
"googlestreetview"
]
def main():
if len(sys.argv) > 1:
split_current, split_number = (int(v) for v in sys.argv[1].split("/"))
split_current = split_current - 1
else:
split_current, split_number = (0, 1)
return_code, split_current = check("contribs/gmf/apps", "", "contribs/gmf/apps/", split_current, split_number)
exit(return_code)
def check(folder, file_postfix, make_prefix, split_current, split_number):
return_code = 0
re_ = re.compile(r"^{}/([a-zA-Z_]*){}$".format(re.escape(folder), re.escape(file_postfix)))
for ex in glob.glob("{}/*{}".format(folder, file_postfix)):
match = re_.search(ex)
if match is not None and match.group(1) not in BLACKLIST:
if split_current == 0:
new_code = subprocess.call(
["make", ".build/{}{}.check.timestamp".format(make_prefix, match.group(1))]
)
print('The command "make .build/{}{}.check.timestamp" exited with {}'.format(
make_prefix, match.group(1), new_code
))
return_code = max(return_code, new_code)
split_current = (split_current + 1) % split_number
return return_code, split_current
if __name__ == '__main__':
main()
| 31.363636
| 114
| 0.603623
|
import re
import sys
import glob
import subprocess
BLACKLIST = [
"googlestreetview"
]
def main():
if len(sys.argv) > 1:
split_current, split_number = (int(v) for v in sys.argv[1].split("/"))
split_current = split_current - 1
else:
split_current, split_number = (0, 1)
return_code, split_current = check("contribs/gmf/apps", "", "contribs/gmf/apps/", split_current, split_number)
exit(return_code)
def check(folder, file_postfix, make_prefix, split_current, split_number):
return_code = 0
re_ = re.compile(r"^{}/([a-zA-Z_]*){}$".format(re.escape(folder), re.escape(file_postfix)))
for ex in glob.glob("{}/*{}".format(folder, file_postfix)):
match = re_.search(ex)
if match is not None and match.group(1) not in BLACKLIST:
if split_current == 0:
new_code = subprocess.call(
["make", ".build/{}{}.check.timestamp".format(make_prefix, match.group(1))]
)
print('The command "make .build/{}{}.check.timestamp" exited with {}'.format(
make_prefix, match.group(1), new_code
))
return_code = max(return_code, new_code)
split_current = (split_current + 1) % split_number
return return_code, split_current
if __name__ == '__main__':
main()
| true
| true
|
790d2b638700d84286592446f58d247ce9a6ad3d
| 1,369
|
py
|
Python
|
test/experiments/bench.py
|
aleyooop/realm-core
|
9874d5164927ea39273b241a5af14b596a3233e9
|
[
"Apache-2.0"
] | 977
|
2016-09-27T12:54:24.000Z
|
2022-03-29T08:08:47.000Z
|
test/experiments/bench.py
|
aleyooop/realm-core
|
9874d5164927ea39273b241a5af14b596a3233e9
|
[
"Apache-2.0"
] | 2,265
|
2016-09-27T13:01:26.000Z
|
2022-03-31T17:55:37.000Z
|
test/experiments/bench.py
|
aleyooop/realm-core
|
9874d5164927ea39273b241a5af14b596a3233e9
|
[
"Apache-2.0"
] | 154
|
2016-09-27T14:02:56.000Z
|
2022-03-27T14:51:00.000Z
|
import subprocess
subprocess.call(["/usr/bin/python", "innotest.py"])
print "1-0"
subprocess.call(["/usr/bin/time","-v","-otiming", "./innotest", "0", "1", "0"])
print "4-0"
subprocess.call(["/usr/bin/time","-v","-otiming","--append","./innotest", "0", "4", "0"])
print "8-0"
subprocess.call(["/usr/bin/time","-v","-otiming","--append","./innotest", "0", "8", "0"])
print "16-0"
subprocess.call(["/usr/bin/time","-v","-otiming","--append","./innotest", "0", "16", "0"])
print "1-100K"
subprocess.call(["/usr/bin/time","-v","-otiming","--append","./innotest", "0", "1", "100000"])
print "4-100K"
subprocess.call(["/usr/bin/time","-v","-otiming","--append","./innotest", "0", "4", "100000"])
print "8-100K"
subprocess.call(["/usr/bin/time","-v","-otiming","--append","./innotest", "0", "8", "100000"])
print "16-100K"
subprocess.call(["/usr/bin/time","-v","-otiming","--append","./innotest", "0", "16", "100000"])
print "1-10K"
subprocess.call(["/usr/bin/time","-v","-otiming","--append","./innotest", "0", "1", "10000"])
print "4-10K"
subprocess.call(["/usr/bin/time","-v","-otiming","--append","./innotest", "0", "4", "10000"])
print "8-10K"
subprocess.call(["/usr/bin/time","-v","-otiming","--append","./innotest", "0", "8", "10000"])
print "16-10K"
subprocess.call(["/usr/bin/time","-v","-otiming","--append","./innotest", "0", "16", "10000"])
| 44.16129
| 95
| 0.569759
|
import subprocess
subprocess.call(["/usr/bin/python", "innotest.py"])
print "1-0"
subprocess.call(["/usr/bin/time","-v","-otiming", "./innotest", "0", "1", "0"])
print "4-0"
subprocess.call(["/usr/bin/time","-v","-otiming","--append","./innotest", "0", "4", "0"])
print "8-0"
subprocess.call(["/usr/bin/time","-v","-otiming","--append","./innotest", "0", "8", "0"])
print "16-0"
subprocess.call(["/usr/bin/time","-v","-otiming","--append","./innotest", "0", "16", "0"])
print "1-100K"
subprocess.call(["/usr/bin/time","-v","-otiming","--append","./innotest", "0", "1", "100000"])
print "4-100K"
subprocess.call(["/usr/bin/time","-v","-otiming","--append","./innotest", "0", "4", "100000"])
print "8-100K"
subprocess.call(["/usr/bin/time","-v","-otiming","--append","./innotest", "0", "8", "100000"])
print "16-100K"
subprocess.call(["/usr/bin/time","-v","-otiming","--append","./innotest", "0", "16", "100000"])
print "1-10K"
subprocess.call(["/usr/bin/time","-v","-otiming","--append","./innotest", "0", "1", "10000"])
print "4-10K"
subprocess.call(["/usr/bin/time","-v","-otiming","--append","./innotest", "0", "4", "10000"])
print "8-10K"
subprocess.call(["/usr/bin/time","-v","-otiming","--append","./innotest", "0", "8", "10000"])
print "16-10K"
subprocess.call(["/usr/bin/time","-v","-otiming","--append","./innotest", "0", "16", "10000"])
| false
| true
|
790d2bbb0ec468df2b33e0107eed7a3f93190796
| 1,330
|
py
|
Python
|
locate.py
|
jdnietov/wazeReading
|
2b4567d7d32ba260d462ab95e1424f6d92090c17
|
[
"MIT"
] | 2
|
2020-06-09T12:15:08.000Z
|
2020-10-03T07:37:31.000Z
|
locate.py
|
jdnietov/wazeReading
|
2b4567d7d32ba260d462ab95e1424f6d92090c17
|
[
"MIT"
] | null | null | null |
locate.py
|
jdnietov/wazeReading
|
2b4567d7d32ba260d462ab95e1424f6d92090c17
|
[
"MIT"
] | null | null | null |
"""This module takes the data.log file produced by main.cpp and
fetches Bogota's addresses based on the coordinates in the file.
TODO: check if system has requirements - if not, install them
* requests
* subprocess (upcoming)
TODO: include exact time of match
TODO: progress bar
FIXME: select best from multiple addresses
"""
import requests
GOOGLE_MAPS_API_URL = 'http://maps.googleapis.com/maps/api/geocode/json'
LOGNAME = "data-wr.log"
DATANAME = "data-wr-addr.log"
def main():
"""Main function. Read coordinates, fetch addresses and write on file."""
logfile = open(LOGNAME, "r")
datafile = open(DATANAME, "w")
logfile.readline() # first line is always a date
print("fetching addresses...")
line = logfile.readline()
while not line.startswith("***") and line.strip():
cat, lat, lng = line.split(';')
latlng = "%s,%s" % (lat, lng)
params = {
'latlng': latlng
}
req = requests.get(GOOGLE_MAPS_API_URL, params=params)
res = req.json()
print(res)
result = res['results'][0]
address = result['formatted_address']
datafile.write("%s en %s |%s,%s" % (cat, address.partition(",")[0], lat, lng))
line = logfile.readline()
logfile.close()
datafile.close()
print("done.")
main()
| 24.62963
| 86
| 0.630827
|
import requests
GOOGLE_MAPS_API_URL = 'http://maps.googleapis.com/maps/api/geocode/json'
LOGNAME = "data-wr.log"
DATANAME = "data-wr-addr.log"
def main():
logfile = open(LOGNAME, "r")
datafile = open(DATANAME, "w")
logfile.readline()
print("fetching addresses...")
line = logfile.readline()
while not line.startswith("***") and line.strip():
cat, lat, lng = line.split(';')
latlng = "%s,%s" % (lat, lng)
params = {
'latlng': latlng
}
req = requests.get(GOOGLE_MAPS_API_URL, params=params)
res = req.json()
print(res)
result = res['results'][0]
address = result['formatted_address']
datafile.write("%s en %s |%s,%s" % (cat, address.partition(",")[0], lat, lng))
line = logfile.readline()
logfile.close()
datafile.close()
print("done.")
main()
| true
| true
|
790d2df7fcda33af8f8f131cae867deb7bb8f242
| 792
|
py
|
Python
|
useful_scripts/split.py
|
UILXELA/Cooperative-3D-Object-Detection-Using-Shared-Raw-LIDAR-Data
|
84b3c792fcea5c618737855cd0d65c7b7b6e16f6
|
[
"MIT"
] | 6
|
2021-03-04T06:16:55.000Z
|
2022-01-11T07:12:16.000Z
|
useful_scripts/split.py
|
UILXELA/Cooperative-3D-Object-Detection-Using-Shared-Raw-LIDAR-Data
|
84b3c792fcea5c618737855cd0d65c7b7b6e16f6
|
[
"MIT"
] | null | null | null |
useful_scripts/split.py
|
UILXELA/Cooperative-3D-Object-Detection-Using-Shared-Raw-LIDAR-Data
|
84b3c792fcea5c618737855cd0d65c7b7b6e16f6
|
[
"MIT"
] | 2
|
2021-04-07T01:43:19.000Z
|
2021-12-06T14:47:36.000Z
|
import os
import shutil
#for i in range(8050,8051):
# old=str(i) + '.bin'
# new="../new/"+'%06d.bin' % i
# shutil.move(old,new)
file1 = open('a.txt', 'r')
Lines = file1.readlines()
file2 = open('b.txt', 'r')
Lines2 = file2.readlines()
calib_DIR='./calib/'
img_DIR='./image_2/'
label_DIR='./label_2/'
pcl_DIR='./velodyne/'
# Strips the newline character
for line in Lines2:
line=line.rstrip()
print(line)
pcl_fname=line+'.bin'
img_fname=line+'.png'
txt_fname=line+'.txt'
shutil.move(calib_DIR+txt_fname, "../testing/"+calib_DIR+txt_fname)
#shutil.move(label_DIR+txt_fname, "../testing/"+label_DIR+txt_fname)
#shutil.move(img_DIR+img_fname, "../testing/"+img_DIR+img_fname)
#shutil.move(pcl_DIR+pcl_fname, "../testing/"+pcl_DIR+pcl_fname)
| 26.4
| 72
| 0.655303
|
import os
import shutil
file1 = open('a.txt', 'r')
Lines = file1.readlines()
file2 = open('b.txt', 'r')
Lines2 = file2.readlines()
calib_DIR='./calib/'
img_DIR='./image_2/'
label_DIR='./label_2/'
pcl_DIR='./velodyne/'
for line in Lines2:
line=line.rstrip()
print(line)
pcl_fname=line+'.bin'
img_fname=line+'.png'
txt_fname=line+'.txt'
shutil.move(calib_DIR+txt_fname, "../testing/"+calib_DIR+txt_fname)
| true
| true
|
790d2ed14bdd402028a265d0e74f8c436e448b5c
| 4,659
|
py
|
Python
|
examples/versioned_rows/versioned_rows_w_versionid.py
|
Dreamsorcerer/sqlalchemy
|
153671df9d4cd7f2cdb3e14e6221f529269885d9
|
[
"MIT"
] | 2
|
2021-08-31T14:37:34.000Z
|
2021-11-17T14:09:59.000Z
|
examples/versioned_rows/versioned_rows_w_versionid.py
|
Dreamsorcerer/sqlalchemy
|
153671df9d4cd7f2cdb3e14e6221f529269885d9
|
[
"MIT"
] | 1
|
2021-01-25T09:53:34.000Z
|
2021-01-25T09:53:35.000Z
|
examples/versioned_rows/versioned_rows_w_versionid.py
|
Dreamsorcerer/sqlalchemy
|
153671df9d4cd7f2cdb3e14e6221f529269885d9
|
[
"MIT"
] | 2
|
2021-01-10T10:49:52.000Z
|
2021-01-13T09:34:27.000Z
|
"""Illustrates a method to intercept changes on objects, turning
an UPDATE statement on a single row into an INSERT statement, so that a new
row is inserted with the new data, keeping the old row intact.
This example adds a numerical version_id to the Versioned class as well
as the ability to see which row is the most "current" vesion.
"""
from sqlalchemy import Boolean
from sqlalchemy import Column
from sqlalchemy import create_engine
from sqlalchemy import event
from sqlalchemy import ForeignKeyConstraint
from sqlalchemy import func
from sqlalchemy import Integer
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import attributes
from sqlalchemy.orm import backref
from sqlalchemy.orm import column_property
from sqlalchemy.orm import make_transient
from sqlalchemy.orm import relationship
from sqlalchemy.orm import Session
from sqlalchemy.orm import sessionmaker
class Versioned(object):
# we have a composite primary key consisting of "id"
# and "version_id"
id = Column(Integer, primary_key=True)
version_id = Column(Integer, primary_key=True, default=1)
# optional - add a persisted is_current_version column
is_current_version = Column(Boolean, default=True)
# optional - add a calculated is_current_version column
@classmethod
def __declare_last__(cls):
alias = cls.__table__.alias()
cls.calc_is_current_version = column_property(
select(func.max(alias.c.version_id) == cls.version_id).where(
alias.c.id == cls.id
)
)
def new_version(self, session):
# optional - set previous version to have is_current_version=False
old_id = self.id
session.query(self.__class__).filter_by(id=old_id).update(
values=dict(is_current_version=False), synchronize_session=False
)
# make us transient (removes persistent
# identity).
make_transient(self)
# increment version_id, which means we have a new PK.
self.version_id += 1
@event.listens_for(Session, "before_flush")
def before_flush(session, flush_context, instances):
for instance in session.dirty:
if not isinstance(instance, Versioned):
continue
if not session.is_modified(instance, passive=True):
continue
if not attributes.instance_state(instance).has_identity:
continue
# make it transient
instance.new_version(session)
# re-add
session.add(instance)
Base = declarative_base()
engine = create_engine("sqlite://", echo=True)
Session = sessionmaker(engine)
# example 1, simple versioning
class Example(Versioned, Base):
__tablename__ = "example"
data = Column(String)
Base.metadata.create_all(engine)
session = Session()
e1 = Example(id=1, data="e1")
session.add(e1)
session.commit()
e1.data = "e2"
session.commit()
assert (
session.query(
Example.id,
Example.version_id,
Example.is_current_version,
Example.calc_is_current_version,
Example.data,
)
.order_by(Example.id, Example.version_id)
.all()
== ([(1, 1, False, False, "e1"), (1, 2, True, True, "e2")])
)
# example 2, versioning with a parent
class Parent(Base):
__tablename__ = "parent"
id = Column(Integer, primary_key=True)
child_id = Column(Integer)
child_version_id = Column(Integer)
child = relationship("Child", backref=backref("parent", uselist=False))
__table_args__ = (
ForeignKeyConstraint(
["child_id", "child_version_id"], ["child.id", "child.version_id"]
),
)
class Child(Versioned, Base):
__tablename__ = "child"
data = Column(String)
def new_version(self, session):
# expire parent's reference to us
session.expire(self.parent, ["child"])
# create new version
Versioned.new_version(self, session)
# re-add ourselves to the parent. this causes the
# parent foreign key to be updated also
self.parent.child = self
Base.metadata.create_all(engine)
session = Session()
p1 = Parent(child=Child(id=1, data="c1"))
session.add(p1)
session.commit()
p1.child.data = "c2"
session.commit()
assert p1.child_id == 1
assert p1.child.version_id == 2
assert (
session.query(
Child.id,
Child.version_id,
Child.is_current_version,
Child.calc_is_current_version,
Child.data,
)
.order_by(Child.id, Child.version_id)
.all()
== ([(1, 1, False, False, "c1"), (1, 2, True, True, "c2")])
)
| 26.322034
| 78
| 0.683408
|
from sqlalchemy import Boolean
from sqlalchemy import Column
from sqlalchemy import create_engine
from sqlalchemy import event
from sqlalchemy import ForeignKeyConstraint
from sqlalchemy import func
from sqlalchemy import Integer
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import attributes
from sqlalchemy.orm import backref
from sqlalchemy.orm import column_property
from sqlalchemy.orm import make_transient
from sqlalchemy.orm import relationship
from sqlalchemy.orm import Session
from sqlalchemy.orm import sessionmaker
class Versioned(object):
id = Column(Integer, primary_key=True)
version_id = Column(Integer, primary_key=True, default=1)
is_current_version = Column(Boolean, default=True)
@classmethod
def __declare_last__(cls):
alias = cls.__table__.alias()
cls.calc_is_current_version = column_property(
select(func.max(alias.c.version_id) == cls.version_id).where(
alias.c.id == cls.id
)
)
def new_version(self, session):
old_id = self.id
session.query(self.__class__).filter_by(id=old_id).update(
values=dict(is_current_version=False), synchronize_session=False
)
make_transient(self)
self.version_id += 1
@event.listens_for(Session, "before_flush")
def before_flush(session, flush_context, instances):
for instance in session.dirty:
if not isinstance(instance, Versioned):
continue
if not session.is_modified(instance, passive=True):
continue
if not attributes.instance_state(instance).has_identity:
continue
instance.new_version(session)
session.add(instance)
Base = declarative_base()
engine = create_engine("sqlite://", echo=True)
Session = sessionmaker(engine)
class Example(Versioned, Base):
__tablename__ = "example"
data = Column(String)
Base.metadata.create_all(engine)
session = Session()
e1 = Example(id=1, data="e1")
session.add(e1)
session.commit()
e1.data = "e2"
session.commit()
assert (
session.query(
Example.id,
Example.version_id,
Example.is_current_version,
Example.calc_is_current_version,
Example.data,
)
.order_by(Example.id, Example.version_id)
.all()
== ([(1, 1, False, False, "e1"), (1, 2, True, True, "e2")])
)
class Parent(Base):
__tablename__ = "parent"
id = Column(Integer, primary_key=True)
child_id = Column(Integer)
child_version_id = Column(Integer)
child = relationship("Child", backref=backref("parent", uselist=False))
__table_args__ = (
ForeignKeyConstraint(
["child_id", "child_version_id"], ["child.id", "child.version_id"]
),
)
class Child(Versioned, Base):
__tablename__ = "child"
data = Column(String)
def new_version(self, session):
session.expire(self.parent, ["child"])
# create new version
Versioned.new_version(self, session)
# re-add ourselves to the parent. this causes the
# parent foreign key to be updated also
self.parent.child = self
Base.metadata.create_all(engine)
session = Session()
p1 = Parent(child=Child(id=1, data="c1"))
session.add(p1)
session.commit()
p1.child.data = "c2"
session.commit()
assert p1.child_id == 1
assert p1.child.version_id == 2
assert (
session.query(
Child.id,
Child.version_id,
Child.is_current_version,
Child.calc_is_current_version,
Child.data,
)
.order_by(Child.id, Child.version_id)
.all()
== ([(1, 1, False, False, "c1"), (1, 2, True, True, "c2")])
)
| true
| true
|
790d305c4c82dbbdb373cbd1da0b2ee0f0a34716
| 964
|
py
|
Python
|
genestack_client/unaligned_reads.py
|
genestack/python-client
|
083eb0508dc99c7575ba7f115595f2535f007583
|
[
"MIT"
] | 2
|
2017-08-30T22:32:59.000Z
|
2021-07-20T10:08:23.000Z
|
genestack_client/unaligned_reads.py
|
genestack/python-client
|
083eb0508dc99c7575ba7f115595f2535f007583
|
[
"MIT"
] | 58
|
2015-10-19T08:36:00.000Z
|
2020-12-07T13:48:17.000Z
|
genestack_client/unaligned_reads.py
|
genestack/python-client
|
083eb0508dc99c7575ba7f115595f2535f007583
|
[
"MIT"
] | 6
|
2015-10-21T21:43:45.000Z
|
2021-01-06T20:33:53.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from future import standard_library
standard_library.install_aliases()
from builtins import *
from builtins import object
READS_LOCATION = 'genestack.location:reads'
READS_LINK = 'genestack.url:reads'
class Key(object):
SPACE = 'space'
FORMAT = 'format'
TYPE = 'type'
class Space(object):
BASESPACE = 'basespace'
COLORSPACE = 'colorspace'
class Format(object):
PHRED33 = 'phred33'
PHRED64 = 'phred64'
FASTA_QUAL = 'fasta-qual'
SRA = 'sra'
SFF = 'sff'
FAST5 = 'fast5'
class Type(object):
SINGLE = 'single'
PAIRED = 'paired'
PAIRED_WITH_UNPAIRED = 'paired-with-unpaired'
def compose_format_map(space, file_format, file_type):
return {Key.SPACE: space,
Key.FORMAT: file_format,
Key.TYPE: file_type}
| 21.422222
| 54
| 0.698133
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from future import standard_library
standard_library.install_aliases()
from builtins import *
from builtins import object
READS_LOCATION = 'genestack.location:reads'
READS_LINK = 'genestack.url:reads'
class Key(object):
SPACE = 'space'
FORMAT = 'format'
TYPE = 'type'
class Space(object):
BASESPACE = 'basespace'
COLORSPACE = 'colorspace'
class Format(object):
PHRED33 = 'phred33'
PHRED64 = 'phred64'
FASTA_QUAL = 'fasta-qual'
SRA = 'sra'
SFF = 'sff'
FAST5 = 'fast5'
class Type(object):
SINGLE = 'single'
PAIRED = 'paired'
PAIRED_WITH_UNPAIRED = 'paired-with-unpaired'
def compose_format_map(space, file_format, file_type):
return {Key.SPACE: space,
Key.FORMAT: file_format,
Key.TYPE: file_type}
| true
| true
|
790d30a5ac77cee86f9c0feb7e0f532a8157e19e
| 4,447
|
py
|
Python
|
zdb/drawing/generate_html.py
|
shane-breeze/zdb-analysis
|
d00b154368e0bcde6a2415727d8ba7012521fba1
|
[
"MIT"
] | null | null | null |
zdb/drawing/generate_html.py
|
shane-breeze/zdb-analysis
|
d00b154368e0bcde6a2415727d8ba7012521fba1
|
[
"MIT"
] | 2
|
2019-04-22T15:11:38.000Z
|
2019-10-28T14:35:17.000Z
|
zdb/drawing/generate_html.py
|
shane-breeze/zdb-analysis
|
d00b154368e0bcde6a2415727d8ba7012521fba1
|
[
"MIT"
] | null | null | null |
import os
from dominate import document
import dominate.tags as tags
import shlex
import subprocess as sp
from tqdm.auto import tqdm
style = (
"""
#myInput {
background-image: url('/css/searchicon.png'); /* Add a search icon to input */
background-position: 10px 12px; /* Position the search icon */
background-repeat: no-repeat; /* Do not repeat the icon image */
width: 100%; /* Full-width */
font-size: 16px; /* Increase font-size */
padding: 12px 20px 12px 40px; /* Add some padding */
border: 1px solid #ddd; /* Add a grey border */
margin-bottom: 12px; /* Add some space below the input */
}
#myUL {
/* Remove default list styling */
list-style-type: none;
padding: 0;
margin: 0;
}
#myUL li a {
border: 1px solid #ddd; /* Add a border to all links */
margin-top: -1px; /* Prevent double borders */
background-color: #f6f6f6; /* Grey background color */
padding: 12px; /* Add some padding */
text-decoration: none; /* Remove default text underline */
font-size: 18px; /* Increase the font-size */
color: black; /* Add a black text color */
display: block; /* Make it into a block element to fill the whole list */
}
#myUL li a:hover:not(.header) {
background-color: #eee; /* Add a hover effect to all links, except for headers */
}
""")
style2 = (
"""
.row {
display: flex;
}
.column {
flex: 33.33%;
padding: 5px;
}
""")
def runcommand(cmd):
p = sp.run(shlex.split(cmd), stdout=sp.PIPE, stderr=sp.PIPE)
return p.stdout, p.stderr
def generate_html(dirname, outdir, title="images"):
if not os.path.exists(outdir):
os.makedirs(outdir)
doc = document(title=title)
with doc.head:
tags.style(style)
with doc:
with tags.ul(id="myUL"):
for category in os.listdir(dirname):
tags.li(tags.a(category, href=category))
with open(os.path.join(outdir, "index.html"), 'w') as f:
f.write(doc.render())
pbar1 = tqdm(os.listdir(dirname), dynamic_ncols=False)
for category in pbar1:
pbar1.set_description(category)
if not os.path.exists(os.path.join(outdir, category)):
os.makedirs(os.path.join(outdir, category))
subdoc = document(title=category)
with subdoc.head:
tags.style(style)
with subdoc:
tags.a("back", href="..")
with tags.ul(id="myUL"):
for subcat in os.listdir(os.path.join(dirname, category)):
tags.li(tags.a(subcat, href=subcat))
with open(os.path.join(outdir, category, "index.html"), 'w') as f:
f.write(subdoc.render())
pbar2 = tqdm(os.listdir(os.path.join(dirname, category)), dynamic_ncols=False)
for subcat in pbar2:
pbar2.set_description(subcat)
if not os.path.exists(os.path.join(outdir, category, subcat)):
os.makedirs(os.path.join(outdir, category, subcat))
ssubdoc = document(title=subcat)
with ssubdoc.head:
tags.style(style2)
imgs = []
pbar3 = tqdm(os.listdir(os.path.join(dirname, category, subcat)), dynamic_ncols=False)
for img in pbar3:
pbar3.set_description(img)
imgpng = img.replace(".pdf", ".png")
imgs.append(imgpng)
runcommand(
"convert -density 150 {} -quality 100 {}".format(
os.path.join(dirname, category, subcat, img),
os.path.join(outdir, category, subcat, imgpng),
)
)
with ssubdoc:
tags.a("back", href="..")
ncols = 3
for idx in range(0, len(imgs), ncols):
with tags.div(_class="row"):
final = idx+ncols
if final>len(imgs)-1:
final = len(imgs)-1
for sidx in range(idx, final):
with tags.div(_class="column"):
tags.img(
src=imgs[sidx],
alt=os.path.splitext(imgs[sidx])[0],
style="height:500px",
)
with open(os.path.join(outdir, category, subcat, "index.html"), 'w') as f:
f.write(ssubdoc.render())
| 32.224638
| 98
| 0.54756
|
import os
from dominate import document
import dominate.tags as tags
import shlex
import subprocess as sp
from tqdm.auto import tqdm
style = (
"""
#myInput {
background-image: url('/css/searchicon.png'); /* Add a search icon to input */
background-position: 10px 12px; /* Position the search icon */
background-repeat: no-repeat; /* Do not repeat the icon image */
width: 100%; /* Full-width */
font-size: 16px; /* Increase font-size */
padding: 12px 20px 12px 40px; /* Add some padding */
border: 1px solid #ddd; /* Add a grey border */
margin-bottom: 12px; /* Add some space below the input */
}
#myUL {
/* Remove default list styling */
list-style-type: none;
padding: 0;
margin: 0;
}
#myUL li a {
border: 1px solid #ddd; /* Add a border to all links */
margin-top: -1px; /* Prevent double borders */
background-color: #f6f6f6; /* Grey background color */
padding: 12px; /* Add some padding */
text-decoration: none; /* Remove default text underline */
font-size: 18px; /* Increase the font-size */
color: black; /* Add a black text color */
display: block; /* Make it into a block element to fill the whole list */
}
#myUL li a:hover:not(.header) {
background-color: #eee; /* Add a hover effect to all links, except for headers */
}
""")
style2 = (
"""
.row {
display: flex;
}
.column {
flex: 33.33%;
padding: 5px;
}
""")
def runcommand(cmd):
p = sp.run(shlex.split(cmd), stdout=sp.PIPE, stderr=sp.PIPE)
return p.stdout, p.stderr
def generate_html(dirname, outdir, title="images"):
if not os.path.exists(outdir):
os.makedirs(outdir)
doc = document(title=title)
with doc.head:
tags.style(style)
with doc:
with tags.ul(id="myUL"):
for category in os.listdir(dirname):
tags.li(tags.a(category, href=category))
with open(os.path.join(outdir, "index.html"), 'w') as f:
f.write(doc.render())
pbar1 = tqdm(os.listdir(dirname), dynamic_ncols=False)
for category in pbar1:
pbar1.set_description(category)
if not os.path.exists(os.path.join(outdir, category)):
os.makedirs(os.path.join(outdir, category))
subdoc = document(title=category)
with subdoc.head:
tags.style(style)
with subdoc:
tags.a("back", href="..")
with tags.ul(id="myUL"):
for subcat in os.listdir(os.path.join(dirname, category)):
tags.li(tags.a(subcat, href=subcat))
with open(os.path.join(outdir, category, "index.html"), 'w') as f:
f.write(subdoc.render())
pbar2 = tqdm(os.listdir(os.path.join(dirname, category)), dynamic_ncols=False)
for subcat in pbar2:
pbar2.set_description(subcat)
if not os.path.exists(os.path.join(outdir, category, subcat)):
os.makedirs(os.path.join(outdir, category, subcat))
ssubdoc = document(title=subcat)
with ssubdoc.head:
tags.style(style2)
imgs = []
pbar3 = tqdm(os.listdir(os.path.join(dirname, category, subcat)), dynamic_ncols=False)
for img in pbar3:
pbar3.set_description(img)
imgpng = img.replace(".pdf", ".png")
imgs.append(imgpng)
runcommand(
"convert -density 150 {} -quality 100 {}".format(
os.path.join(dirname, category, subcat, img),
os.path.join(outdir, category, subcat, imgpng),
)
)
with ssubdoc:
tags.a("back", href="..")
ncols = 3
for idx in range(0, len(imgs), ncols):
with tags.div(_class="row"):
final = idx+ncols
if final>len(imgs)-1:
final = len(imgs)-1
for sidx in range(idx, final):
with tags.div(_class="column"):
tags.img(
src=imgs[sidx],
alt=os.path.splitext(imgs[sidx])[0],
style="height:500px",
)
with open(os.path.join(outdir, category, subcat, "index.html"), 'w') as f:
f.write(ssubdoc.render())
| true
| true
|
790d325de15e853276794af0de33ed55b12ea191
| 26,410
|
py
|
Python
|
simulator_control/simulator_util.py
|
izinga/xctestrunner
|
799193e6ff1ce2feafe5077c3a4155760f5f723a
|
[
"Apache-2.0"
] | 1
|
2019-12-21T00:07:00.000Z
|
2019-12-21T00:07:00.000Z
|
simulator_control/simulator_util.py
|
ios-bazel-users/xctestrunner
|
5c2d20ab829efc18dfc507269820c7a0609187b7
|
[
"Apache-2.0"
] | null | null | null |
simulator_control/simulator_util.py
|
ios-bazel-users/xctestrunner
|
5c2d20ab829efc18dfc507269820c7a0609187b7
|
[
"Apache-2.0"
] | 1
|
2021-11-23T05:00:00.000Z
|
2021-11-23T05:00:00.000Z
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The utility class for simulator."""
import json
import logging
import os
import pwd
import re
import shutil
import subprocess
import time
from shared import ios_constants
from shared import ios_errors
from shared import plist_util
from shared import xcode_info_util
from simulator_control import simtype_profile
_SIMULATOR_STATES_MAPPING = {
0: ios_constants.SimState.CREATING,
1: ios_constants.SimState.SHUTDOWN,
3: ios_constants.SimState.BOOTED
}
_PREFIX_RUNTIME_ID = 'com.apple.CoreSimulator.SimRuntime.'
_SIM_OPERATION_MAX_ATTEMPTS = 3
_SIMCTL_MAX_ATTEMPTS = 2
_SIMULATOR_CREATING_TO_SHUTDOWN_TIMEOUT_SEC = 10
_SIMULATOR_SHUTDOWN_TIMEOUT_SEC = 30
_SIM_ERROR_RETRY_INTERVAL_SEC = 2
_SIM_CHECK_STATE_INTERVAL_SEC = 0.5
_PATTERN_APP_CRASH_ON_SIM = (
r'com\.apple\.CoreSimulator\.SimDevice\.[A-Z0-9\-]+(.+) '
r'\(UIKitApplication:%s(.+)\): Service exited '
'(due to (signal|Terminated|Killed|Abort trap)|with abnormal code)')
_PATTERN_XCTEST_PROCESS_CRASH_ON_SIM = (
r'com\.apple\.CoreSimulator\.SimDevice\.[A-Z0-9\-]+(.+) '
r'\((.+)xctest\[[0-9]+\]\): Service exited '
'(due to (signal|Terminated|Killed|Abort trap)|with abnormal code)')
_PATTERN_CORESIMULATOR_CRASH = (
r'com\.apple\.CoreSimulator\.SimDevice\.[A-Z0-9\-]+(.+) '
r'\(com\.apple\.CoreSimulator(.+)\): Service exited due to ')
class Simulator(object):
"""The object for simulator in MacOS."""
def __init__(self, simulator_id):
"""Constructor of Simulator object.
Args:
simulator_id: string, the identity of the simulator.
"""
self._simulator_id = simulator_id
self._simulator_root_dir = None
self._simulator_log_root_dir = None
self._device_plist_object = None
@property
def simulator_id(self):
if not self._simulator_id:
raise ios_errors.SimError(
'The simulator has not been created or has been deleted.')
return self._simulator_id
@property
def simulator_system_log_path(self):
return os.path.join(self.simulator_log_root_dir, 'system.log')
@property
def simulator_root_dir(self):
"""Gets the simulator's root directory."""
if not self._simulator_root_dir:
home_dir = pwd.getpwuid(os.geteuid()).pw_dir
self._simulator_root_dir = os.path.join(
'%s/Library/Developer/CoreSimulator/Devices/%s' %
(home_dir, self.simulator_id))
return self._simulator_root_dir
@property
def simulator_log_root_dir(self):
"""Gets the root directory of the simulator's logs."""
if not self._simulator_log_root_dir:
home_dir = pwd.getpwuid(os.geteuid()).pw_dir
self._simulator_log_root_dir = os.path.join(
'%s/Library/Logs/CoreSimulator/%s' % (home_dir, self.simulator_id))
return self._simulator_log_root_dir
@property
def device_plist_object(self):
"""Gets the plist_util.Plist object of device.plist of the simulator.
Returns:
a plist_util.Plist object of device.plist of the simulator or None when
the simulator does not exist or is being created.
"""
if not self._device_plist_object:
device_plist_path = os.path.join(self.simulator_root_dir, 'device.plist')
if not os.path.exists(device_plist_path):
return None
self._device_plist_object = plist_util.Plist(device_plist_path)
return self._device_plist_object
def Shutdown(self):
"""Shuts down the simulator."""
sim_state = self.GetSimulatorState()
if sim_state == ios_constants.SimState.SHUTDOWN:
logging.info('Simulator %s has already shut down.', self.simulator_id)
return
if sim_state == ios_constants.SimState.CREATING:
raise ios_errors.SimError(
'Can not shut down the simulator in state CREATING.')
logging.info('Shutting down simulator %s.', self.simulator_id)
try:
RunSimctlCommand(['xcrun', 'simctl', 'shutdown', self.simulator_id])
except ios_errors.SimError as e:
if 'Unable to shutdown device in current state: Shutdown' in str(e):
logging.info('Simulator %s has already shut down.', self.simulator_id)
return
raise ios_errors.SimError('Failed to shutdown simulator %s: %s' %
(self.simulator_id, str(e)))
self.WaitUntilStateShutdown()
logging.info('Shut down simulator %s.', self.simulator_id)
def Delete(self):
"""Deletes the simulator asynchronously.
The simulator state should be SHUTDOWN when deleting it. Otherwise, it will
raise exception.
Raises:
ios_errors.SimError: The simulator's state is not SHUTDOWN.
"""
# In Xcode 9+, simctl can delete Booted simulator. In prior of Xcode 9,
# we have to shutdown the simulator first before deleting it.
if xcode_info_util.GetXcodeVersionNumber() < 900:
sim_state = self.GetSimulatorState()
if sim_state != ios_constants.SimState.SHUTDOWN:
raise ios_errors.SimError(
'Can only delete the simulator with state SHUTDOWN. The current '
'state of simulator %s is %s.' % (self._simulator_id, sim_state))
logging.info('Deleting simulator %s asynchronously.', self.simulator_id)
subprocess.Popen(['xcrun', 'simctl', 'delete', self.simulator_id],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
preexec_fn=os.setpgrp)
# The delete command won't delete the simulator log directory.
if os.path.exists(self.simulator_log_root_dir):
shutil.rmtree(self.simulator_log_root_dir, ignore_errors=True)
self._simulator_id = None
def FetchLogToFile(self, output_file_path, start_time=None, end_time=None):
"""Gets simulator log via running `log` tool on simulator.
Args:
output_file_path: string, the path of the stdout file.
start_time: datetime, the start time of the simulatro log.
end_time: datetime, the end time of the simulatro log.
"""
command = [
'xcrun', 'simctl', 'spawn', self._simulator_id, 'log', 'show',
'--style', 'syslog'
]
if start_time:
command.extend(('--start', start_time.strftime('%Y-%m-%d %H:%M:%S')))
if end_time:
command.extend(('--end', end_time.strftime('%Y-%m-%d %H:%M:%S')))
with open(output_file_path, 'w') as stdout_file:
try:
subprocess.Popen(command, stdout=stdout_file, stderr=subprocess.STDOUT)
except ios_errors.SimError as e:
raise ios_errors.SimError('Failed to get log on simulator %s: %s' %
(self.simulator_id, str(e)))
def GetAppDocumentsPath(self, app_bundle_id):
"""Gets the path of the app's Documents directory."""
if xcode_info_util.GetXcodeVersionNumber() >= 830:
try:
app_data_container = RunSimctlCommand([
'xcrun', 'simctl', 'get_app_container', self._simulator_id,
app_bundle_id, 'data'
])
return os.path.join(app_data_container, 'Documents')
except ios_errors.SimError as e:
raise ios_errors.SimError(
'Failed to get data container of the app %s in simulator %s: %s' %
(app_bundle_id, self._simulator_id, str(e)))
apps_dir = os.path.join(self.simulator_root_dir,
'data/Containers/Data/Application')
for sub_dir_name in os.listdir(apps_dir):
container_manager_plist = plist_util.Plist(
os.path.join(apps_dir, sub_dir_name,
'.com.apple.mobile_container_manager.metadata.plist'))
current_app_bundle_id = container_manager_plist.GetPlistField(
'MCMMetadataIdentifier')
if current_app_bundle_id == app_bundle_id:
return os.path.join(apps_dir, sub_dir_name, 'Documents')
raise ios_errors.SimError(
'Failed to get Documents directory of the app %s in simulator %s' %
(app_bundle_id, self._simulator_id))
def IsAppInstalled(self, app_bundle_id):
"""Checks if the simulator has installed the app with given bundle id."""
try:
RunSimctlCommand([
'xcrun', 'simctl', 'get_app_container', self._simulator_id,
app_bundle_id
])
return True
except ios_errors.SimError:
return False
def WaitUntilStateShutdown(self, timeout_sec=_SIMULATOR_SHUTDOWN_TIMEOUT_SEC):
"""Waits until the simulator state becomes SHUTDOWN.
Args:
timeout_sec: int, timeout of waiting simulator state for becoming SHUTDOWN
in seconds.
Raises:
ios_errors.SimError: when it is timeout to wait the simulator state
becomes SHUTDOWN.
"""
start_time = time.time()
while start_time + timeout_sec >= time.time():
if self.GetSimulatorState() == ios_constants.SimState.SHUTDOWN:
return
time.sleep(_SIM_CHECK_STATE_INTERVAL_SEC)
raise ios_errors.SimError('Timeout to wait for simulator shutdown in %ss.' %
timeout_sec)
def GetSimulatorState(self):
"""Gets the state of the simulator in real time.
Returns:
shared.ios_constants.SimState, the state of the simulator.
Raises:
ios_errors.SimError: The state can not be recognized.
"""
if self.device_plist_object is None:
return ios_constants.SimState.CREATING
state_num = self.device_plist_object.GetPlistField('state')
if state_num not in _SIMULATOR_STATES_MAPPING.keys():
logging.warning('The state %s of simulator %s can not be recognized.',
state_num, self.simulator_id)
return ios_constants.SimState.UNKNOWN
return _SIMULATOR_STATES_MAPPING[state_num]
def CreateNewSimulator(device_type=None, os_version=None, name_prefix=None):
"""Creates a new simulator according to arguments.
If neither device_type nor os_version is given, will use the latest iOS
version and latest iPhone type.
If os_version is given but device_type is not, will use latest iPhone type
according to the OS version limitation. E.g., if the given os_version is 9.3,
the latest simulator type is iPhone 6s Plus. Because the min OS version of
iPhone 7 is 10.0.
If device_type is given but os_version is not, will use the min value
between max OS version of the simulator type and current latest OS version.
E.g., if the given device_type is iPhone 5 and latest OS version is 10.3,
will use 10.2. Because the max OS version of iPhone 5 is 10.2.
Args:
device_type: string, device type of the new simulator. The value corresponds
to the output of `xcrun simctl list devicetypes`. E.g., iPhone 6, iPad
Air, etc.
os_version: string, OS version of the new simulator. The format is
{major}.{minor}, such as 9.3, 10.2.
name_prefix: string, name prefix of the new simulator. By default, it is
"New".
Returns:
a tuple with four items:
string, id of the new simulator.
string, simulator device type of the new simulator.
string, OS version of the new simulator.
string, name of the new simulator.
Raises:
ios_errors.SimError: when failed to create new simulator.
ios_errors.IllegalArgumentError: when the given argument is invalid.
"""
if not device_type:
os_type = ios_constants.OS.IOS
else:
_ValidateSimulatorType(device_type)
os_type = GetOsType(device_type)
if not os_version:
os_version = GetLastSupportedSimOsVersion(os_type, device_type=device_type)
else:
supported_sim_os_versions = GetSupportedSimOsVersions(os_type)
if os_version not in supported_sim_os_versions:
raise ios_errors.IllegalArgumentError(
'The simulator os version %s is not supported. Supported simulator '
'os versions are %s.' % (os_version, supported_sim_os_versions))
if not device_type:
device_type = GetLastSupportedIphoneSimType(os_version)
else:
_ValidateSimulatorTypeWithOsVersion(device_type, os_version)
if not name_prefix:
name_prefix = 'New'
name = '%s-%s-%s' % (name_prefix, device_type, os_version)
# Example
# Runtime ID of iOS 10.2: com.apple.CoreSimulator.SimRuntime.iOS-10-2
runtime_id = _PREFIX_RUNTIME_ID + os_type + '-' + os_version.replace('.', '-')
logging.info('Creating a new simulator:\nName: %s\nOS: %s %s\nType: %s', name,
os_type, os_version, device_type)
for i in range(0, _SIM_OPERATION_MAX_ATTEMPTS):
try:
new_simulator_id = RunSimctlCommand(
['xcrun', 'simctl', 'create', name, device_type, runtime_id])
except ios_errors.SimError as e:
raise ios_errors.SimError('Failed to create simulator: %s' % str(e))
new_simulator_obj = Simulator(new_simulator_id)
# After creating a new simulator, its state is CREATING. When the
# simulator's state becomes SHUTDOWN, the simulator is created.
try:
new_simulator_obj.WaitUntilStateShutdown(
_SIMULATOR_CREATING_TO_SHUTDOWN_TIMEOUT_SEC)
logging.info('Created new simulator %s.', new_simulator_id)
return new_simulator_id, device_type, os_version, name
except ios_errors.SimError as error:
logging.debug('Failed to create simulator %s: %s.', new_simulator_id,
error)
logging.debug('Deleted half-created simulator %s.', new_simulator_id)
new_simulator_obj.Delete()
if i != _SIM_OPERATION_MAX_ATTEMPTS - 1:
logging.debug('Will sleep %ss and retry again.',
_SIM_ERROR_RETRY_INTERVAL_SEC)
# If the simulator's state becomes SHUTDOWN, there may be something
# wrong in CoreSimulatorService. Sleeps a short interval(2s) can help
# reduce flakiness.
time.sleep(_SIM_ERROR_RETRY_INTERVAL_SEC)
raise ios_errors.SimError('Failed to create simulator in %d attempts.' %
_SIM_OPERATION_MAX_ATTEMPTS)
def GetSupportedSimDeviceTypes(os_type=None):
"""Gets the name list of supported simulator device types of given OS type.
If os_type is not provided, it will return all supported simulator device
types. The names are got from command result of `xcrun simctl list devices`.
So some simulator device types' names may be different in different Xcode.
E.g., the name of iPad Pro (12.9-inch) in Xcode 7.2.1 is "iPad Pro", but it is
"iPad Pro (12.9-inch)" in Xcode 8+.
Args:
os_type: shared.ios_constants.OS, OS type of simulator, such as iOS,
watchOS, tvOS.
Returns:
a list of string, each item is a simulator device type.
E.g., ["iPhone 5", "iPhone 6 Plus"]
"""
# Example output:
# {
# "devicetypes" : [
# {
# "name" : "iPhone 5",
# "identifier" : "com.apple.CoreSimulator.SimDeviceType.iPhone-5"
# }
# ]
# }
#
# See more examples in testdata/simctl_list_devicetypes.json
sim_types_infos_json = json.loads(
RunSimctlCommand(('xcrun', 'simctl', 'list', 'devicetypes', '-j')))
sim_types = []
for sim_types_info in sim_types_infos_json['devicetypes']:
sim_type = sim_types_info['name']
if (os_type is None or
(os_type == ios_constants.OS.IOS and sim_type.startswith('i')) or
(os_type == ios_constants.OS.TVOS and 'TV' in sim_type) or
(os_type == ios_constants.OS.WATCHOS and 'Watch' in sim_type)):
sim_types.append(sim_type)
return sim_types
def GetLastSupportedIphoneSimType(os_version):
""""Gets the last supported iPhone simulator type of the given OS version.
Currently, the last supported iPhone simulator type is the last iPhone from
the output of `xcrun simctl list devicetypes`.
Args:
os_version: string, OS version of the new simulator. The format is
{major}.{minor}, such as 9.3, 10.2.
Returns:
a string, the last supported iPhone simulator type.
Raises:
ios_errors.SimError: when there is no supported iPhone simulator type.
"""
supported_sim_types = GetSupportedSimDeviceTypes(ios_constants.OS.IOS)
supported_sim_types.reverse()
os_version_float = float(os_version)
for sim_type in supported_sim_types:
if sim_type.startswith('iPhone'):
min_os_version_float = float(
simtype_profile.SimTypeProfile(sim_type).min_os_version)
if os_version_float >= min_os_version_float:
return sim_type
raise ios_errors.SimError('Can not find supported iPhone simulator type.')
def GetSupportedSimOsVersions(os_type=ios_constants.OS.IOS):
"""Gets the supported version of given simulator OS type.
Args:
os_type: shared.ios_constants.OS, OS type of simulator, such as iOS,
watchOS, tvOS.
Returns:
a list of string, each item is an OS version number. E.g., ["10.1", "11.0"]
"""
if os_type is None:
os_type = ios_constants.OS.IOS
# Example output:
# {
# "runtimes" : [
# {
# "bundlePath" : "\/Applications\/Xcode10.app\/Contents\/Developer\
# /Platforms\/iPhoneOS.platform\/Developer\/Library\
# /CoreSimulator\/Profiles\/Runtimes\/iOS.simruntime",
# "availabilityError" : "",
# "buildversion" : "16A366",
# "availability" : "(available)",
# "isAvailable" : true,
# "identifier" : "com.apple.CoreSimulator.SimRuntime.iOS-12-0",
# "version" : "12.0",
# "name" : "iOS 12.0"
# }
# }
# See more examples in testdata/simctl_list_runtimes.json
xcode_version_num = xcode_info_util.GetXcodeVersionNumber()
sim_runtime_infos_json = json.loads(
RunSimctlCommand(('xcrun', 'simctl', 'list', 'runtimes', '-j')))
sim_versions = []
for sim_runtime_info in sim_runtime_infos_json['runtimes']:
# Normally, the json does not contain unavailable runtimes. To be safe,
# also checks the 'availability' field.
if 'availability' in sim_runtime_info and sim_runtime_info[
'availability'].find('unavailable') >= 0:
continue
elif 'isAvailable' in sim_runtime_info and not sim_runtime_info[
'isAvailable']:
continue
listed_os_type, listed_os_version = sim_runtime_info['name'].split(' ', 1)
if listed_os_type == os_type:
# `bundlePath` key may not exist in the old Xcode/macOS version.
if 'bundlePath' in sim_runtime_info:
runtime_path = sim_runtime_info['bundlePath']
info_plist_object = plist_util.Plist(
os.path.join(runtime_path, 'Contents/Info.plist'))
min_xcode_version_num = int(info_plist_object.GetPlistField('DTXcode'))
if xcode_version_num >= min_xcode_version_num:
sim_versions.append(listed_os_version)
else:
if os_type == ios_constants.OS.IOS:
ios_major_version, ios_minor_version = listed_os_version.split('.', 1)
# Ingores the potential build version
ios_minor_version = ios_minor_version[0]
ios_version_num = int(ios_major_version) * 100 + int(
ios_minor_version) * 10
# One Xcode version always maps to one max simulator's iOS version.
# The rules is almost max_sim_ios_version <= xcode_version + 200.
# E.g., Xcode 8.3.1/8.3.3 maps to iOS 10.3, Xcode 7.3.1 maps to iOS
# 9.3.
if ios_version_num > xcode_version_num + 200:
continue
sim_versions.append(listed_os_version)
return sim_versions
def GetLastSupportedSimOsVersion(os_type=ios_constants.OS.IOS,
device_type=None):
"""Gets the last supported version of given arguments.
If device_type is given, will return the last supported OS version of the
device type. Otherwise, will return the last supported OS version of the
OS type.
Args:
os_type: shared.ios_constants.OS, OS type of simulator, such as iOS,
watchOS, tvOS.
device_type: string, device type of the new simulator. The value corresponds
to the output of `xcrun simctl list devicetypes`. E.g., iPhone 6, iPad
Air, etc.
Returns:
a string, the last supported version.
Raises:
ios_errors.SimError: when there is no supported OS version of the given OS.
ios_errors.IllegalArgumentError: when the supported OS version can not match
the given simulator type.
"""
supported_os_versions = GetSupportedSimOsVersions(os_type)
if not supported_os_versions:
raise ios_errors.SimError('Can not find supported OS version of %s.' %
os_type)
if not device_type:
return supported_os_versions[-1]
simtype_max_os_version_float = float(
simtype_profile.SimTypeProfile(device_type).max_os_version)
supported_os_versions.reverse()
for os_version in supported_os_versions:
if float(os_version) <= simtype_max_os_version_float:
return os_version
if not supported_os_versions:
raise ios_errors.IllegalArgumentError(
'The supported OS version %s can not match simulator type %s. Because '
'its max OS version is %s' %
(supported_os_versions, device_type, simtype_max_os_version_float))
def GetOsType(device_type):
"""Gets the OS type of the given simulator.
This method can not work fine if the device_type is invalid. Please calls
simulator_util.ValidateSimulatorType(device_type, os_version) to validate
it first.
Args:
device_type: string, device type of the new simulator. The value corresponds
to the output of `xcrun simctl list devicetypes`. E.g., iPhone 6, iPad
Air, etc.
Returns:
shared.ios_constants.OS.
Raises:
ios_errors.IllegalArgumentError: when the OS type of the given simulator
device type can not be recognized.
"""
if device_type.startswith('i'):
return ios_constants.OS.IOS
if 'TV' in device_type:
return ios_constants.OS.TVOS
if 'Watch' in device_type:
return ios_constants.OS.WATCHOS
raise ios_errors.IllegalArgumentError(
'Failed to recognize the os type for simulator device type %s.' %
device_type)
def _ValidateSimulatorType(device_type):
"""Checks if the simulator type is valid.
Args:
device_type: string, device type of the new simulator. The value corresponds
to the output of `xcrun simctl list devicetypes`. E.g., iPhone 6, iPad
Air, etc.
Raises:
ios_errors.IllegalArgumentError: when the given simulator device type is
invalid.
"""
supported_sim_device_types = GetSupportedSimDeviceTypes()
if device_type not in supported_sim_device_types:
raise ios_errors.IllegalArgumentError(
'The simulator device type %s is not supported. Supported simulator '
'device types are %s.' % (device_type, supported_sim_device_types))
def _ValidateSimulatorTypeWithOsVersion(device_type, os_version):
"""Checks if the simulator type with the given os version is valid.
Args:
device_type: string, device type of the new simulator. The value corresponds
to the output of `xcrun simctl list devicetypes`. E.g., iPhone 6, iPad
Air, etc.
os_version: string, OS version of the new simulator. The format is
{major}.{minor}, such as 9.3, 10.2.
Raises:
ios_errors.IllegalArgumentError: when the given simulator device type can
not match the given OS version.
"""
os_version_float = float(os_version)
sim_profile = simtype_profile.SimTypeProfile(device_type)
min_os_version_float = float(sim_profile.min_os_version)
if min_os_version_float > os_version_float:
raise ios_errors.IllegalArgumentError(
'The min OS version of %s is %s. But current OS version is %s' %
(device_type, min_os_version_float, os_version))
max_os_version_float = float(sim_profile.max_os_version)
if max_os_version_float < os_version_float:
raise ios_errors.IllegalArgumentError(
'The max OS version of %s is %s. But current OS version is %s' %
(device_type, max_os_version_float, os_version))
def QuitSimulatorApp():
"""Quits the Simulator.app."""
if xcode_info_util.GetXcodeVersionNumber() >= 700:
simulator_name = 'Simulator'
else:
simulator_name = 'iOS Simulator'
subprocess.Popen(['killall', simulator_name],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
def IsAppFailedToLaunchOnSim(sim_sys_log, app_bundle_id=''):
"""Checks if the app failed to launch on simulator.
If app_bundle_id is not provided, will check if any UIKitApplication failed
to launch on simulator.
Args:
sim_sys_log: string, the content of the simulator's system.log.
app_bundle_id: string, the bundle id of the app.
Returns:
True if the app failed to launch on simulator.
"""
pattern = re.compile(_PATTERN_APP_CRASH_ON_SIM % app_bundle_id)
return pattern.search(sim_sys_log) is not None
def IsXctestFailedToLaunchOnSim(sim_sys_log):
"""Checks if the xctest process failed to launch on simulator.
Args:
sim_sys_log: string, the content of the simulator's system.log.
Returns:
True if the xctest process failed to launch on simulator.
"""
pattern = re.compile(_PATTERN_XCTEST_PROCESS_CRASH_ON_SIM)
return pattern.search(sim_sys_log) is not None
def IsCoreSimulatorCrash(sim_sys_log):
"""Checks if CoreSimulator crashes.
Args:
sim_sys_log: string, the content of the simulator's system.log.
Returns:
True if the CoreSimulator crashes.
"""
pattern = re.compile(_PATTERN_CORESIMULATOR_CRASH)
return pattern.search(sim_sys_log) is not None
def RunSimctlCommand(command):
"""Runs simctl command."""
for i in range(_SIMCTL_MAX_ATTEMPTS):
process = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if ios_constants.CORESIMULATOR_CHANGE_ERROR in stderr:
output = stdout
else:
output = '\n'.join([stdout, stderr])
output = output.strip()
if process.poll() != 0:
if (i < (_SIMCTL_MAX_ATTEMPTS - 1) and
ios_constants.CORESIMULATOR_INTERRUPTED_ERROR in output):
continue
raise ios_errors.SimError(output)
return output
| 38.611111
| 80
| 0.699356
|
import json
import logging
import os
import pwd
import re
import shutil
import subprocess
import time
from shared import ios_constants
from shared import ios_errors
from shared import plist_util
from shared import xcode_info_util
from simulator_control import simtype_profile
_SIMULATOR_STATES_MAPPING = {
0: ios_constants.SimState.CREATING,
1: ios_constants.SimState.SHUTDOWN,
3: ios_constants.SimState.BOOTED
}
_PREFIX_RUNTIME_ID = 'com.apple.CoreSimulator.SimRuntime.'
_SIM_OPERATION_MAX_ATTEMPTS = 3
_SIMCTL_MAX_ATTEMPTS = 2
_SIMULATOR_CREATING_TO_SHUTDOWN_TIMEOUT_SEC = 10
_SIMULATOR_SHUTDOWN_TIMEOUT_SEC = 30
_SIM_ERROR_RETRY_INTERVAL_SEC = 2
_SIM_CHECK_STATE_INTERVAL_SEC = 0.5
_PATTERN_APP_CRASH_ON_SIM = (
r'com\.apple\.CoreSimulator\.SimDevice\.[A-Z0-9\-]+(.+) '
r'\(UIKitApplication:%s(.+)\): Service exited '
'(due to (signal|Terminated|Killed|Abort trap)|with abnormal code)')
_PATTERN_XCTEST_PROCESS_CRASH_ON_SIM = (
r'com\.apple\.CoreSimulator\.SimDevice\.[A-Z0-9\-]+(.+) '
r'\((.+)xctest\[[0-9]+\]\): Service exited '
'(due to (signal|Terminated|Killed|Abort trap)|with abnormal code)')
_PATTERN_CORESIMULATOR_CRASH = (
r'com\.apple\.CoreSimulator\.SimDevice\.[A-Z0-9\-]+(.+) '
r'\(com\.apple\.CoreSimulator(.+)\): Service exited due to ')
class Simulator(object):
def __init__(self, simulator_id):
self._simulator_id = simulator_id
self._simulator_root_dir = None
self._simulator_log_root_dir = None
self._device_plist_object = None
@property
def simulator_id(self):
if not self._simulator_id:
raise ios_errors.SimError(
'The simulator has not been created or has been deleted.')
return self._simulator_id
@property
def simulator_system_log_path(self):
return os.path.join(self.simulator_log_root_dir, 'system.log')
@property
def simulator_root_dir(self):
if not self._simulator_root_dir:
home_dir = pwd.getpwuid(os.geteuid()).pw_dir
self._simulator_root_dir = os.path.join(
'%s/Library/Developer/CoreSimulator/Devices/%s' %
(home_dir, self.simulator_id))
return self._simulator_root_dir
@property
def simulator_log_root_dir(self):
if not self._simulator_log_root_dir:
home_dir = pwd.getpwuid(os.geteuid()).pw_dir
self._simulator_log_root_dir = os.path.join(
'%s/Library/Logs/CoreSimulator/%s' % (home_dir, self.simulator_id))
return self._simulator_log_root_dir
@property
def device_plist_object(self):
if not self._device_plist_object:
device_plist_path = os.path.join(self.simulator_root_dir, 'device.plist')
if not os.path.exists(device_plist_path):
return None
self._device_plist_object = plist_util.Plist(device_plist_path)
return self._device_plist_object
def Shutdown(self):
sim_state = self.GetSimulatorState()
if sim_state == ios_constants.SimState.SHUTDOWN:
logging.info('Simulator %s has already shut down.', self.simulator_id)
return
if sim_state == ios_constants.SimState.CREATING:
raise ios_errors.SimError(
'Can not shut down the simulator in state CREATING.')
logging.info('Shutting down simulator %s.', self.simulator_id)
try:
RunSimctlCommand(['xcrun', 'simctl', 'shutdown', self.simulator_id])
except ios_errors.SimError as e:
if 'Unable to shutdown device in current state: Shutdown' in str(e):
logging.info('Simulator %s has already shut down.', self.simulator_id)
return
raise ios_errors.SimError('Failed to shutdown simulator %s: %s' %
(self.simulator_id, str(e)))
self.WaitUntilStateShutdown()
logging.info('Shut down simulator %s.', self.simulator_id)
def Delete(self):
if xcode_info_util.GetXcodeVersionNumber() < 900:
sim_state = self.GetSimulatorState()
if sim_state != ios_constants.SimState.SHUTDOWN:
raise ios_errors.SimError(
'Can only delete the simulator with state SHUTDOWN. The current '
'state of simulator %s is %s.' % (self._simulator_id, sim_state))
logging.info('Deleting simulator %s asynchronously.', self.simulator_id)
subprocess.Popen(['xcrun', 'simctl', 'delete', self.simulator_id],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
preexec_fn=os.setpgrp)
if os.path.exists(self.simulator_log_root_dir):
shutil.rmtree(self.simulator_log_root_dir, ignore_errors=True)
self._simulator_id = None
def FetchLogToFile(self, output_file_path, start_time=None, end_time=None):
command = [
'xcrun', 'simctl', 'spawn', self._simulator_id, 'log', 'show',
'--style', 'syslog'
]
if start_time:
command.extend(('--start', start_time.strftime('%Y-%m-%d %H:%M:%S')))
if end_time:
command.extend(('--end', end_time.strftime('%Y-%m-%d %H:%M:%S')))
with open(output_file_path, 'w') as stdout_file:
try:
subprocess.Popen(command, stdout=stdout_file, stderr=subprocess.STDOUT)
except ios_errors.SimError as e:
raise ios_errors.SimError('Failed to get log on simulator %s: %s' %
(self.simulator_id, str(e)))
def GetAppDocumentsPath(self, app_bundle_id):
if xcode_info_util.GetXcodeVersionNumber() >= 830:
try:
app_data_container = RunSimctlCommand([
'xcrun', 'simctl', 'get_app_container', self._simulator_id,
app_bundle_id, 'data'
])
return os.path.join(app_data_container, 'Documents')
except ios_errors.SimError as e:
raise ios_errors.SimError(
'Failed to get data container of the app %s in simulator %s: %s' %
(app_bundle_id, self._simulator_id, str(e)))
apps_dir = os.path.join(self.simulator_root_dir,
'data/Containers/Data/Application')
for sub_dir_name in os.listdir(apps_dir):
container_manager_plist = plist_util.Plist(
os.path.join(apps_dir, sub_dir_name,
'.com.apple.mobile_container_manager.metadata.plist'))
current_app_bundle_id = container_manager_plist.GetPlistField(
'MCMMetadataIdentifier')
if current_app_bundle_id == app_bundle_id:
return os.path.join(apps_dir, sub_dir_name, 'Documents')
raise ios_errors.SimError(
'Failed to get Documents directory of the app %s in simulator %s' %
(app_bundle_id, self._simulator_id))
def IsAppInstalled(self, app_bundle_id):
try:
RunSimctlCommand([
'xcrun', 'simctl', 'get_app_container', self._simulator_id,
app_bundle_id
])
return True
except ios_errors.SimError:
return False
def WaitUntilStateShutdown(self, timeout_sec=_SIMULATOR_SHUTDOWN_TIMEOUT_SEC):
start_time = time.time()
while start_time + timeout_sec >= time.time():
if self.GetSimulatorState() == ios_constants.SimState.SHUTDOWN:
return
time.sleep(_SIM_CHECK_STATE_INTERVAL_SEC)
raise ios_errors.SimError('Timeout to wait for simulator shutdown in %ss.' %
timeout_sec)
def GetSimulatorState(self):
if self.device_plist_object is None:
return ios_constants.SimState.CREATING
state_num = self.device_plist_object.GetPlistField('state')
if state_num not in _SIMULATOR_STATES_MAPPING.keys():
logging.warning('The state %s of simulator %s can not be recognized.',
state_num, self.simulator_id)
return ios_constants.SimState.UNKNOWN
return _SIMULATOR_STATES_MAPPING[state_num]
def CreateNewSimulator(device_type=None, os_version=None, name_prefix=None):
if not device_type:
os_type = ios_constants.OS.IOS
else:
_ValidateSimulatorType(device_type)
os_type = GetOsType(device_type)
if not os_version:
os_version = GetLastSupportedSimOsVersion(os_type, device_type=device_type)
else:
supported_sim_os_versions = GetSupportedSimOsVersions(os_type)
if os_version not in supported_sim_os_versions:
raise ios_errors.IllegalArgumentError(
'The simulator os version %s is not supported. Supported simulator '
'os versions are %s.' % (os_version, supported_sim_os_versions))
if not device_type:
device_type = GetLastSupportedIphoneSimType(os_version)
else:
_ValidateSimulatorTypeWithOsVersion(device_type, os_version)
if not name_prefix:
name_prefix = 'New'
name = '%s-%s-%s' % (name_prefix, device_type, os_version)
# Example
# Runtime ID of iOS 10.2: com.apple.CoreSimulator.SimRuntime.iOS-10-2
runtime_id = _PREFIX_RUNTIME_ID + os_type + '-' + os_version.replace('.', '-')
logging.info('Creating a new simulator:\nName: %s\nOS: %s %s\nType: %s', name,
os_type, os_version, device_type)
for i in range(0, _SIM_OPERATION_MAX_ATTEMPTS):
try:
new_simulator_id = RunSimctlCommand(
['xcrun', 'simctl', 'create', name, device_type, runtime_id])
except ios_errors.SimError as e:
raise ios_errors.SimError('Failed to create simulator: %s' % str(e))
new_simulator_obj = Simulator(new_simulator_id)
# After creating a new simulator, its state is CREATING. When the
# simulator's state becomes SHUTDOWN, the simulator is created.
try:
new_simulator_obj.WaitUntilStateShutdown(
_SIMULATOR_CREATING_TO_SHUTDOWN_TIMEOUT_SEC)
logging.info('Created new simulator %s.', new_simulator_id)
return new_simulator_id, device_type, os_version, name
except ios_errors.SimError as error:
logging.debug('Failed to create simulator %s: %s.', new_simulator_id,
error)
logging.debug('Deleted half-created simulator %s.', new_simulator_id)
new_simulator_obj.Delete()
if i != _SIM_OPERATION_MAX_ATTEMPTS - 1:
logging.debug('Will sleep %ss and retry again.',
_SIM_ERROR_RETRY_INTERVAL_SEC)
# wrong in CoreSimulatorService. Sleeps a short interval(2s) can help
# reduce flakiness.
time.sleep(_SIM_ERROR_RETRY_INTERVAL_SEC)
raise ios_errors.SimError('Failed to create simulator in %d attempts.' %
_SIM_OPERATION_MAX_ATTEMPTS)
def GetSupportedSimDeviceTypes(os_type=None):
# Example output:
# {
# "devicetypes" : [
# {
# "name" : "iPhone 5",
# "identifier" : "com.apple.CoreSimulator.SimDeviceType.iPhone-5"
# }
# ]
# }
#
# See more examples in testdata/simctl_list_devicetypes.json
sim_types_infos_json = json.loads(
RunSimctlCommand(('xcrun', 'simctl', 'list', 'devicetypes', '-j')))
sim_types = []
for sim_types_info in sim_types_infos_json['devicetypes']:
sim_type = sim_types_info['name']
if (os_type is None or
(os_type == ios_constants.OS.IOS and sim_type.startswith('i')) or
(os_type == ios_constants.OS.TVOS and 'TV' in sim_type) or
(os_type == ios_constants.OS.WATCHOS and 'Watch' in sim_type)):
sim_types.append(sim_type)
return sim_types
def GetLastSupportedIphoneSimType(os_version):
supported_sim_types = GetSupportedSimDeviceTypes(ios_constants.OS.IOS)
supported_sim_types.reverse()
os_version_float = float(os_version)
for sim_type in supported_sim_types:
if sim_type.startswith('iPhone'):
min_os_version_float = float(
simtype_profile.SimTypeProfile(sim_type).min_os_version)
if os_version_float >= min_os_version_float:
return sim_type
raise ios_errors.SimError('Can not find supported iPhone simulator type.')
def GetSupportedSimOsVersions(os_type=ios_constants.OS.IOS):
if os_type is None:
os_type = ios_constants.OS.IOS
# Example output:
# {
# "runtimes" : [
# {
# "bundlePath" : "\/Applications\/Xcode10.app\/Contents\/Developer\
# /Platforms\/iPhoneOS.platform\/Developer\/Library\
# /CoreSimulator\/Profiles\/Runtimes\/iOS.simruntime",
# "availabilityError" : "",
# "buildversion" : "16A366",
# "availability" : "(available)",
# "isAvailable" : true,
# "identifier" : "com.apple.CoreSimulator.SimRuntime.iOS-12-0",
# "version" : "12.0",
# "name" : "iOS 12.0"
# }
# }
# See more examples in testdata/simctl_list_runtimes.json
xcode_version_num = xcode_info_util.GetXcodeVersionNumber()
sim_runtime_infos_json = json.loads(
RunSimctlCommand(('xcrun', 'simctl', 'list', 'runtimes', '-j')))
sim_versions = []
for sim_runtime_info in sim_runtime_infos_json['runtimes']:
# Normally, the json does not contain unavailable runtimes. To be safe,
# also checks the 'availability' field.
if 'availability' in sim_runtime_info and sim_runtime_info[
'availability'].find('unavailable') >= 0:
continue
elif 'isAvailable' in sim_runtime_info and not sim_runtime_info[
'isAvailable']:
continue
listed_os_type, listed_os_version = sim_runtime_info['name'].split(' ', 1)
if listed_os_type == os_type:
# `bundlePath` key may not exist in the old Xcode/macOS version.
if 'bundlePath' in sim_runtime_info:
runtime_path = sim_runtime_info['bundlePath']
info_plist_object = plist_util.Plist(
os.path.join(runtime_path, 'Contents/Info.plist'))
min_xcode_version_num = int(info_plist_object.GetPlistField('DTXcode'))
if xcode_version_num >= min_xcode_version_num:
sim_versions.append(listed_os_version)
else:
if os_type == ios_constants.OS.IOS:
ios_major_version, ios_minor_version = listed_os_version.split('.', 1)
# Ingores the potential build version
ios_minor_version = ios_minor_version[0]
ios_version_num = int(ios_major_version) * 100 + int(
ios_minor_version) * 10
# One Xcode version always maps to one max simulator's iOS version.
if ios_version_num > xcode_version_num + 200:
continue
sim_versions.append(listed_os_version)
return sim_versions
def GetLastSupportedSimOsVersion(os_type=ios_constants.OS.IOS,
device_type=None):
supported_os_versions = GetSupportedSimOsVersions(os_type)
if not supported_os_versions:
raise ios_errors.SimError('Can not find supported OS version of %s.' %
os_type)
if not device_type:
return supported_os_versions[-1]
simtype_max_os_version_float = float(
simtype_profile.SimTypeProfile(device_type).max_os_version)
supported_os_versions.reverse()
for os_version in supported_os_versions:
if float(os_version) <= simtype_max_os_version_float:
return os_version
if not supported_os_versions:
raise ios_errors.IllegalArgumentError(
'The supported OS version %s can not match simulator type %s. Because '
'its max OS version is %s' %
(supported_os_versions, device_type, simtype_max_os_version_float))
def GetOsType(device_type):
if device_type.startswith('i'):
return ios_constants.OS.IOS
if 'TV' in device_type:
return ios_constants.OS.TVOS
if 'Watch' in device_type:
return ios_constants.OS.WATCHOS
raise ios_errors.IllegalArgumentError(
'Failed to recognize the os type for simulator device type %s.' %
device_type)
def _ValidateSimulatorType(device_type):
supported_sim_device_types = GetSupportedSimDeviceTypes()
if device_type not in supported_sim_device_types:
raise ios_errors.IllegalArgumentError(
'The simulator device type %s is not supported. Supported simulator '
'device types are %s.' % (device_type, supported_sim_device_types))
def _ValidateSimulatorTypeWithOsVersion(device_type, os_version):
os_version_float = float(os_version)
sim_profile = simtype_profile.SimTypeProfile(device_type)
min_os_version_float = float(sim_profile.min_os_version)
if min_os_version_float > os_version_float:
raise ios_errors.IllegalArgumentError(
'The min OS version of %s is %s. But current OS version is %s' %
(device_type, min_os_version_float, os_version))
max_os_version_float = float(sim_profile.max_os_version)
if max_os_version_float < os_version_float:
raise ios_errors.IllegalArgumentError(
'The max OS version of %s is %s. But current OS version is %s' %
(device_type, max_os_version_float, os_version))
def QuitSimulatorApp():
if xcode_info_util.GetXcodeVersionNumber() >= 700:
simulator_name = 'Simulator'
else:
simulator_name = 'iOS Simulator'
subprocess.Popen(['killall', simulator_name],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
def IsAppFailedToLaunchOnSim(sim_sys_log, app_bundle_id=''):
pattern = re.compile(_PATTERN_APP_CRASH_ON_SIM % app_bundle_id)
return pattern.search(sim_sys_log) is not None
def IsXctestFailedToLaunchOnSim(sim_sys_log):
pattern = re.compile(_PATTERN_XCTEST_PROCESS_CRASH_ON_SIM)
return pattern.search(sim_sys_log) is not None
def IsCoreSimulatorCrash(sim_sys_log):
pattern = re.compile(_PATTERN_CORESIMULATOR_CRASH)
return pattern.search(sim_sys_log) is not None
def RunSimctlCommand(command):
for i in range(_SIMCTL_MAX_ATTEMPTS):
process = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if ios_constants.CORESIMULATOR_CHANGE_ERROR in stderr:
output = stdout
else:
output = '\n'.join([stdout, stderr])
output = output.strip()
if process.poll() != 0:
if (i < (_SIMCTL_MAX_ATTEMPTS - 1) and
ios_constants.CORESIMULATOR_INTERRUPTED_ERROR in output):
continue
raise ios_errors.SimError(output)
return output
| true
| true
|
790d325e7f78793db587c57e262cb3e64ce06a05
| 3,277
|
py
|
Python
|
src/rosrepo/cmd_clean.py
|
fkie/rosrepo
|
13cdf89e32f0c370d106a61540b0cd102675daf9
|
[
"Apache-2.0"
] | 5
|
2016-09-06T08:02:10.000Z
|
2018-06-10T20:45:21.000Z
|
src/rosrepo/cmd_clean.py
|
fkie/rosrepo
|
13cdf89e32f0c370d106a61540b0cd102675daf9
|
[
"Apache-2.0"
] | 2
|
2019-03-11T21:44:50.000Z
|
2020-03-17T09:20:47.000Z
|
src/rosrepo/cmd_clean.py
|
fkie/rosrepo
|
13cdf89e32f0c370d106a61540b0cd102675daf9
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
#
# ROSREPO
# Manage ROS workspaces with multiple Gitlab repositories
#
# Author: Timo Röhling
#
# Copyright 2016 Fraunhofer FKIE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
from .workspace import get_workspace_location, get_workspace_state, resolve_this, find_ros_root
from .config import Config
from .cache import Cache
from .ui import msg, warning, fatal, show_conflicts
from .util import call_process, PIPE
from .resolver import find_dependees
import os
try:
from os import scandir
except ImportError:
from scandir import scandir
def run(args):
wsdir = get_workspace_location(args.workspace)
config = Config(wsdir)
cache = Cache(wsdir)
ros_rootdir = find_ros_root(config.get("ros_root", None))
if ros_rootdir is None:
fatal("cannot detect ROS distribution. Have you sourced your setup.bash?\n")
if args.this:
if args.offline is None:
args.offline = config.get("offline_mode", False)
if args.offline:
warning("offline mode. Run 'rosrepo config --online' to disable\n")
ws_state = get_workspace_state(wsdir, config, cache, offline_mode=args.offline)
args.packages = resolve_this(wsdir, ws_state)
elif args.vanished or args.unused:
if args.offline is None:
args.offline = config.get("offline_mode", False)
if args.offline:
warning("offline mode. Run 'rosrepo config --online' to disable\n")
ws_state = get_workspace_state(wsdir, config, cache, offline_mode=args.offline)
args.packages = []
for d in scandir(os.path.join(wsdir, "build")):
if d.is_dir() and d.name not in ws_state.ws_packages and not d.name == "catkin_tools_prebuild":
args.packages.append(d.name)
if args.unused:
depends, _, conflicts = find_dependees(config["pinned_build"] + config["default_build"], ws_state, ignore_missing=True)
show_conflicts(conflicts)
if conflicts:
fatal("cannot resolve dependencies\n")
unused_packages = set(ws_state.ws_packages) - set(depends.keys())
args.packages += [p for p in unused_packages if os.path.isdir(os.path.join(wsdir, "build", p))]
if not args.packages:
msg("Nothing to clean\n")
return 0
if not args.dry_run:
invoke = ["catkin", "config", "--extend", ros_rootdir]
call_process(invoke, stdout=PIPE, stderr=PIPE)
config["last_ros_root"] = ros_rootdir
config.write()
catkin_clean = ["catkin", "clean", "--workspace", wsdir, "--yes"]
if args.dry_run:
catkin_clean.append("--dry-run")
catkin_clean += args.packages or ["--all"]
return call_process(catkin_clean)
| 39.481928
| 131
| 0.676533
|
from .workspace import get_workspace_location, get_workspace_state, resolve_this, find_ros_root
from .config import Config
from .cache import Cache
from .ui import msg, warning, fatal, show_conflicts
from .util import call_process, PIPE
from .resolver import find_dependees
import os
try:
from os import scandir
except ImportError:
from scandir import scandir
def run(args):
wsdir = get_workspace_location(args.workspace)
config = Config(wsdir)
cache = Cache(wsdir)
ros_rootdir = find_ros_root(config.get("ros_root", None))
if ros_rootdir is None:
fatal("cannot detect ROS distribution. Have you sourced your setup.bash?\n")
if args.this:
if args.offline is None:
args.offline = config.get("offline_mode", False)
if args.offline:
warning("offline mode. Run 'rosrepo config --online' to disable\n")
ws_state = get_workspace_state(wsdir, config, cache, offline_mode=args.offline)
args.packages = resolve_this(wsdir, ws_state)
elif args.vanished or args.unused:
if args.offline is None:
args.offline = config.get("offline_mode", False)
if args.offline:
warning("offline mode. Run 'rosrepo config --online' to disable\n")
ws_state = get_workspace_state(wsdir, config, cache, offline_mode=args.offline)
args.packages = []
for d in scandir(os.path.join(wsdir, "build")):
if d.is_dir() and d.name not in ws_state.ws_packages and not d.name == "catkin_tools_prebuild":
args.packages.append(d.name)
if args.unused:
depends, _, conflicts = find_dependees(config["pinned_build"] + config["default_build"], ws_state, ignore_missing=True)
show_conflicts(conflicts)
if conflicts:
fatal("cannot resolve dependencies\n")
unused_packages = set(ws_state.ws_packages) - set(depends.keys())
args.packages += [p for p in unused_packages if os.path.isdir(os.path.join(wsdir, "build", p))]
if not args.packages:
msg("Nothing to clean\n")
return 0
if not args.dry_run:
invoke = ["catkin", "config", "--extend", ros_rootdir]
call_process(invoke, stdout=PIPE, stderr=PIPE)
config["last_ros_root"] = ros_rootdir
config.write()
catkin_clean = ["catkin", "clean", "--workspace", wsdir, "--yes"]
if args.dry_run:
catkin_clean.append("--dry-run")
catkin_clean += args.packages or ["--all"]
return call_process(catkin_clean)
| true
| true
|
790d332218d7579e252524bfddb5c57cef5aeced
| 5,527
|
py
|
Python
|
mtgjson5/compiled_classes/mtgjson_enum_values.py
|
0az/mtgjson
|
64e4e0a452911418e608df932fbf12af5dcb1a35
|
[
"MIT"
] | null | null | null |
mtgjson5/compiled_classes/mtgjson_enum_values.py
|
0az/mtgjson
|
64e4e0a452911418e608df932fbf12af5dcb1a35
|
[
"MIT"
] | null | null | null |
mtgjson5/compiled_classes/mtgjson_enum_values.py
|
0az/mtgjson
|
64e4e0a452911418e608df932fbf12af5dcb1a35
|
[
"MIT"
] | null | null | null |
"""
MTGJSON EnumValues Object
"""
import json
import logging
import pathlib
from typing import Any, Dict, List, Union
from ..compiled_classes.mtgjson_all_printings import MtgjsonAllPrintingsObject
from ..consts import OUTPUT_PATH
from ..utils import sort_internal_lists
from .mtgjson_structures import MtgjsonStructuresObject
LOGGER = logging.getLogger(__name__)
class MtgjsonEnumValuesObject:
"""
MTGJSON EnumValues Object
"""
attr_value_dict: Dict[str, Union[Dict[str, List[str]], List[str]]]
set_key_struct = {
"card": [
"availability",
"borderColor",
"colorIdentity",
"colorIndicator",
"colors",
"duelDeck",
"frameEffects",
"frameVersion",
"layout",
"promoTypes",
"rarity",
"side",
"subtypes",
"supertypes",
"types",
"watermark",
],
"set": ["type"],
"foreignData": ["language"],
}
deck_key_struct = {"deck": ["type"]}
def __init__(self) -> None:
"""
Initializer to build the internal mapping
"""
self.attr_value_dict = {}
set_and_cards = self.construct_set_and_card_enums(
MtgjsonAllPrintingsObject().to_json()
)
self.attr_value_dict.update(set_and_cards)
decks = self.construct_deck_enums(OUTPUT_PATH.joinpath("decks"))
self.attr_value_dict.update(decks)
# Load in pre-generated Keywords content
keywords = OUTPUT_PATH.joinpath(MtgjsonStructuresObject().key_words + ".json")
if not keywords.is_file():
LOGGER.warning(f"Unable to find {keywords}")
else:
with keywords.open(encoding="utf-8") as file:
content = json.load(file).get("data", {})
self.attr_value_dict.update({"keywords": content})
def construct_deck_enums(self, decks_directory: pathlib.Path) -> Dict[str, Any]:
"""
Given Decks Path, compile enums based on the types found in the files
:param decks_directory: Path to the decks/ output directory
:return Sorted list of enum options for each key
"""
type_map: Dict[str, Any] = {}
for object_name, object_values in self.deck_key_struct.items():
type_map[object_name] = dict()
for object_field_name in object_values:
type_map[object_name][object_field_name] = set()
for deck in decks_directory.glob("**/*.json"):
with deck.open(encoding="utf-8") as file:
content = json.load(file).get("data", {})
for key in content.keys():
if key in self.deck_key_struct["deck"]:
type_map["deck"][key].add(content[key])
return dict(sort_internal_lists(type_map))
def construct_set_and_card_enums(
self, all_printing_content: Dict[str, Any]
) -> Dict[str, Any]:
"""
Given AllPrintings, compile enums based on the types found in the file
:param all_printing_content: AllPrintings internally
:return Sorted list of enum options for each key
"""
type_map: Dict[str, Any] = {}
for object_name, object_values in self.set_key_struct.items():
type_map[object_name] = dict()
for object_field_name in object_values:
type_map[object_name][object_field_name] = set()
for set_contents in all_printing_content.values():
for set_contents_key in set_contents.keys():
if set_contents_key in self.set_key_struct["set"]:
type_map["set"][set_contents_key].add(
set_contents.get(set_contents_key)
)
match_keys = set(self.set_key_struct["card"]).union(
set(self.set_key_struct.keys())
)
for card in set_contents.get("cards", []) + set_contents.get("tokens", []):
for card_key in card.keys():
if card_key not in match_keys:
continue
# Get the value when actually needed
card_value = card[card_key]
# For Dicts, we just enum the keys
if isinstance(card_value, dict):
for value in card_value.keys():
type_map["card"][card_key].add(value)
continue
# String, Integer, etc can be added as-is
if not isinstance(card_value, list):
type_map["card"][card_key].add(card_value)
continue
for single_value in card_value:
# Iterating a non-dict is fine
if not isinstance(single_value, dict):
type_map["card"][card_key].add(single_value)
continue
# Internal attributes are sometimes added
for attribute in self.set_key_struct.get(card_key, []):
type_map[card_key][attribute].add(single_value[attribute])
return dict(sort_internal_lists(type_map))
def to_json(self) -> Dict[str, Union[Dict[str, List[str]], List[str]]]:
"""
Support json.dump()
:return: JSON serialized object
"""
return self.attr_value_dict
| 35.658065
| 87
| 0.564502
|
import json
import logging
import pathlib
from typing import Any, Dict, List, Union
from ..compiled_classes.mtgjson_all_printings import MtgjsonAllPrintingsObject
from ..consts import OUTPUT_PATH
from ..utils import sort_internal_lists
from .mtgjson_structures import MtgjsonStructuresObject
LOGGER = logging.getLogger(__name__)
class MtgjsonEnumValuesObject:
attr_value_dict: Dict[str, Union[Dict[str, List[str]], List[str]]]
set_key_struct = {
"card": [
"availability",
"borderColor",
"colorIdentity",
"colorIndicator",
"colors",
"duelDeck",
"frameEffects",
"frameVersion",
"layout",
"promoTypes",
"rarity",
"side",
"subtypes",
"supertypes",
"types",
"watermark",
],
"set": ["type"],
"foreignData": ["language"],
}
deck_key_struct = {"deck": ["type"]}
def __init__(self) -> None:
self.attr_value_dict = {}
set_and_cards = self.construct_set_and_card_enums(
MtgjsonAllPrintingsObject().to_json()
)
self.attr_value_dict.update(set_and_cards)
decks = self.construct_deck_enums(OUTPUT_PATH.joinpath("decks"))
self.attr_value_dict.update(decks)
keywords = OUTPUT_PATH.joinpath(MtgjsonStructuresObject().key_words + ".json")
if not keywords.is_file():
LOGGER.warning(f"Unable to find {keywords}")
else:
with keywords.open(encoding="utf-8") as file:
content = json.load(file).get("data", {})
self.attr_value_dict.update({"keywords": content})
def construct_deck_enums(self, decks_directory: pathlib.Path) -> Dict[str, Any]:
type_map: Dict[str, Any] = {}
for object_name, object_values in self.deck_key_struct.items():
type_map[object_name] = dict()
for object_field_name in object_values:
type_map[object_name][object_field_name] = set()
for deck in decks_directory.glob("**/*.json"):
with deck.open(encoding="utf-8") as file:
content = json.load(file).get("data", {})
for key in content.keys():
if key in self.deck_key_struct["deck"]:
type_map["deck"][key].add(content[key])
return dict(sort_internal_lists(type_map))
def construct_set_and_card_enums(
self, all_printing_content: Dict[str, Any]
) -> Dict[str, Any]:
type_map: Dict[str, Any] = {}
for object_name, object_values in self.set_key_struct.items():
type_map[object_name] = dict()
for object_field_name in object_values:
type_map[object_name][object_field_name] = set()
for set_contents in all_printing_content.values():
for set_contents_key in set_contents.keys():
if set_contents_key in self.set_key_struct["set"]:
type_map["set"][set_contents_key].add(
set_contents.get(set_contents_key)
)
match_keys = set(self.set_key_struct["card"]).union(
set(self.set_key_struct.keys())
)
for card in set_contents.get("cards", []) + set_contents.get("tokens", []):
for card_key in card.keys():
if card_key not in match_keys:
continue
card_value = card[card_key]
if isinstance(card_value, dict):
for value in card_value.keys():
type_map["card"][card_key].add(value)
continue
if not isinstance(card_value, list):
type_map["card"][card_key].add(card_value)
continue
for single_value in card_value:
if not isinstance(single_value, dict):
type_map["card"][card_key].add(single_value)
continue
for attribute in self.set_key_struct.get(card_key, []):
type_map[card_key][attribute].add(single_value[attribute])
return dict(sort_internal_lists(type_map))
def to_json(self) -> Dict[str, Union[Dict[str, List[str]], List[str]]]:
return self.attr_value_dict
| true
| true
|
790d33e0ea2fbbf035d535622ae8ec9f4d3c9764
| 2,745
|
py
|
Python
|
01_Introduction/C0106_operations.py
|
zhuyuanxiang/tensorflow_cookbook
|
57d7ee719385ddd249a67c3a85bd336e884a67e5
|
[
"MIT"
] | 7
|
2019-11-30T05:42:47.000Z
|
2021-10-09T03:02:19.000Z
|
01_Introduction/C0106_operations.py
|
zhuyuanxiang/tensorflow_cookbook
|
57d7ee719385ddd249a67c3a85bd336e884a67e5
|
[
"MIT"
] | null | null | null |
01_Introduction/C0106_operations.py
|
zhuyuanxiang/tensorflow_cookbook
|
57d7ee719385ddd249a67c3a85bd336e884a67e5
|
[
"MIT"
] | 2
|
2019-12-05T06:44:48.000Z
|
2021-10-09T03:02:20.000Z
|
# -*- encoding: utf-8 -*-
"""
@Author : zYx.Tom
@Contact : 526614962@qq.com
@site : https://github.com/zhuyuanxiang/tensorflow_cookbook
---------------------------
@Software : PyCharm
@Project : TensorFlow_Machine_Learning_Cookbook
@File : C0106_operations.py
@Version : v0.1
@Time : 2019-10-29 14:11
@License : (C)Copyright 2018-2019, zYx.Tom
@Reference : 《TensorFlow机器学习实战指南,Nick McClure》, Sec0106,P110
@Desc : TensorFlow 基础,声明操作
"""
# common imports
import os
import sys
import matplotlib.pyplot as plt
import numpy as np # pip install numpy<1.17,小于1.17就不会报错
import sklearn
import tensorflow as tf
import winsound
from tensorflow.python.framework import ops
from tools import show_values
# 设置数据显示的精确度为小数点后3位
np.set_printoptions(precision = 8, suppress = True, threshold = np.inf, linewidth = 200)
# 利用随机种子,保证随机数据的稳定性,使得每次随机测试的结果一样
np.random.seed(42)
# 初始化默认的计算图
ops.reset_default_graph()
# Python ≥3.5 is required
assert sys.version_info >= (3, 5)
# Scikit-Learn ≥0.20 is required
assert sklearn.__version__ >= "0.20"
# 屏蔽警告:Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# Open graph session
sess = tf.Session()
show_values(tf.div(3, 4), "tf.div(3,4) = 整数除")
show_values(tf.truediv(3, 4), "tf.truediv(3,4) = 浮点除")
show_values(tf.floordiv(3.0, 4.0), "tf.floordiv(3.0,4.0) = 浮点取整除")
show_values(tf.mod(22.0, 5.0), "tf.mod(22.0,5.0) = 取模")
# 张量点积--Compute the pairwise cross product
# 张量点积:即两个向量的叉乘,又叫向量积、外积、叉积,叉乘的运算结果是一个向量而不是一个标量。
# 两个向量的点积与这两个向量组成的坐标平面垂直。
show_values(tf.cross([1., 0., 0.], [0., 1., 0.]),
"tf.cross([1., 0., 0.], [0., 1., 0.]) = 张量点积")
# 张量点积必须是三维的
# show_values(tf.cross([1., 0., 0., 0.], [0., 1., 0., 0.]),
# "tf.cross([1., 0., 0.,0.], [0., 1., 0.,0.]) = 张量点积")
# ToSee:P11,数学函数列表
show_values(tf.div(tf.sin(3.1416 / 4.), tf.cos(3.1416 / 4.)),
"tan(pi/4) = 1 = tf.div(tf.sin(3.1416/4.),tf.cos(3.1416/4.))")
test_nums = range(15)
# What should we get with list comprehension
expected_output = [3 * x * x - x + 10 for x in test_nums]
print('-' * 50)
print("[3 * x ^ 2 - x + 10 for x in test_nums] = ")
print(expected_output)
# 自定义函数
# 3x^2-x+10,x=11,=>
def custom_polynomial(value):
# return tf.subtract(3 * tf.square(value), value) + 10
return 3 * tf.square(value) - value + 10
show_values(custom_polynomial(11), "custom_polynomial(11) = 3x^2-x+10,x=11=>")
for num in test_nums:
show_values(custom_polynomial(num), "custom_polynomial({})".format(num))
# -----------------------------------------------------------------
# 运行结束的提醒
winsound.Beep(600, 500)
if len(plt.get_fignums()) != 0:
plt.show()
pass
| 30.842697
| 99
| 0.637887
|
import os
import sys
import matplotlib.pyplot as plt
import numpy as np
import sklearn
import tensorflow as tf
import winsound
from tensorflow.python.framework import ops
from tools import show_values
np.set_printoptions(precision = 8, suppress = True, threshold = np.inf, linewidth = 200)
np.random.seed(42)
ops.reset_default_graph()
assert sys.version_info >= (3, 5)
assert sklearn.__version__ >= "0.20"
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
sess = tf.Session()
show_values(tf.div(3, 4), "tf.div(3,4) = 整数除")
show_values(tf.truediv(3, 4), "tf.truediv(3,4) = 浮点除")
show_values(tf.floordiv(3.0, 4.0), "tf.floordiv(3.0,4.0) = 浮点取整除")
show_values(tf.mod(22.0, 5.0), "tf.mod(22.0,5.0) = 取模")
show_values(tf.cross([1., 0., 0.], [0., 1., 0.]),
"tf.cross([1., 0., 0.], [0., 1., 0.]) = 张量点积")
show_values(tf.div(tf.sin(3.1416 / 4.), tf.cos(3.1416 / 4.)),
"tan(pi/4) = 1 = tf.div(tf.sin(3.1416/4.),tf.cos(3.1416/4.))")
test_nums = range(15)
expected_output = [3 * x * x - x + 10 for x in test_nums]
print('-' * 50)
print("[3 * x ^ 2 - x + 10 for x in test_nums] = ")
print(expected_output)
def custom_polynomial(value):
return 3 * tf.square(value) - value + 10
show_values(custom_polynomial(11), "custom_polynomial(11) = 3x^2-x+10,x=11=>")
for num in test_nums:
show_values(custom_polynomial(num), "custom_polynomial({})".format(num))
winsound.Beep(600, 500)
if len(plt.get_fignums()) != 0:
plt.show()
pass
| true
| true
|
790d34d501f0113b2f8b6c70872c3f6647520f89
| 36,551
|
py
|
Python
|
repro_eval/Evaluator.py
|
irgroup/repro_eval
|
35a4cf083dbb5f4b29d6ef602a604f0686a537c9
|
[
"MIT"
] | 8
|
2020-10-27T02:11:53.000Z
|
2022-03-02T11:00:10.000Z
|
repro_eval/Evaluator.py
|
irgroup/repro_eval
|
35a4cf083dbb5f4b29d6ef602a604f0686a537c9
|
[
"MIT"
] | 2
|
2021-01-25T19:59:39.000Z
|
2021-12-07T09:29:01.000Z
|
repro_eval/Evaluator.py
|
irgroup/repro_eval
|
35a4cf083dbb5f4b29d6ef602a604f0686a537c9
|
[
"MIT"
] | 1
|
2021-04-16T16:21:16.000Z
|
2021-04-16T16:21:16.000Z
|
import pytrec_eval
from repro_eval.util import trim, break_ties
from repro_eval.measure.statistics import ttest
from repro_eval.measure.overall_effects import ER, deltaRI
from repro_eval.measure.document_order import ktau_union as ktu, RBO
from repro_eval.measure.effectiveness import rmse as RMSE, nrmse as nRMSE
from repro_eval.config import ERR_MSG
class Evaluator(object):
"""
An abstract evaluator that holds the original baseline and advanced run as well as
the reproduced/replicated baseline and advanced run.
"""
def __init__(self, **kwargs):
self.qrel_orig_path = kwargs.get('qrel_orig_path', None)
self.run_b_orig_path = kwargs.get('run_b_orig_path', None)
self.run_a_orig_path = kwargs.get('run_a_orig_path', None)
self.run_b_rep_path = kwargs.get('run_b_rep_path', None)
self.run_a_rep_path = kwargs.get('run_a_rep_path', None)
self.run_b_orig = None
self.run_a_orig = None
self.run_b_rep = None
self.run_a_rep = None
self.run_b_orig_score = None
self.run_a_orig_score = None
self.run_b_rep_score = None
self.run_a_rep_score = None
if self.qrel_orig_path:
with open(self.qrel_orig_path, 'r') as f_qrel:
qrel_orig = pytrec_eval.parse_qrel(f_qrel)
self.rel_eval = pytrec_eval.RelevanceEvaluator(qrel_orig, pytrec_eval.supported_measures)
if self.run_b_orig_path:
with open(self.run_b_orig_path, 'r') as f_run:
self.run_b_orig = pytrec_eval.parse_run(f_run)
self.run_b_orig = {t: self.run_b_orig[t] for t in sorted(self.run_b_orig)}
if self.run_a_orig_path:
with open(self.run_a_orig_path, 'r') as f_run:
self.run_a_orig = pytrec_eval.parse_run(f_run)
self.run_a_orig = {t: self.run_a_orig[t] for t in sorted(self.run_a_orig)}
if self.run_b_rep_path:
with open(self.run_b_rep_path, 'r') as f_run:
self.run_b_rep = pytrec_eval.parse_run(f_run)
self.run_b_rep = {t: self.run_b_rep[t] for t in sorted(self.run_b_rep)}
if self.run_a_rep_path:
with open(self.run_a_rep_path, 'r') as f_run:
self.run_a_rep = pytrec_eval.parse_run(f_run)
self.run_a_rep = {t: self.run_a_rep[t] for t in sorted(self.run_a_rep)}
def trim(self, t=None, run=None):
"""
Trims all runs of the Evaluator to the length specified by the threshold value t.
@param t: Threshold parameter or number of top-k documents to be considered.
@param run: If run is not None, only the provided run will be trimmed.
"""
if run:
run = break_ties(run)
if t:
trim(run, thresh=t)
else:
trim(run)
return
if self.run_b_orig:
self.run_b_orig = break_ties(self.run_b_orig)
if t:
trim(self.run_b_orig, thresh=t)
else:
trim(self.run_b_orig)
if self.run_a_orig:
self.run_a_orig = break_ties(self.run_a_orig)
if t:
trim(self.run_a_orig, thresh=t)
else:
trim(self.run_a_orig)
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
if t:
trim(self.run_b_rep, thresh=t)
else:
trim(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
if t:
trim(self.run_a_rep, thresh=t)
else:
trim(self.run_a_rep)
def evaluate(self, run=None):
"""
Evaluates the original baseline and advanced run if available.
@param run: Reproduced or replicated run that will be evaluated.
"""
if self.run_b_orig:
self.run_b_orig = break_ties(self.run_b_orig)
self.run_b_orig_score = self.rel_eval.evaluate(self.run_b_orig)
if self.run_a_orig:
self.run_a_orig = break_ties(self.run_a_orig)
self.run_a_orig_score = self.rel_eval.evaluate(self.run_a_orig)
def er(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the Effect Ratio (ER) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
The ER value is determined by the ratio between the mean improvements
of the original and reproduced/replicated experiments.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary containing the ER values for the specified run combination.
"""
if print_feedback:
print('Determining Effect Ratio (ER)')
if self.run_b_orig_score and self.run_a_orig_score and run_b_path and run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_a_rep)
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_rep_score, rep_score_a=run_a_rep_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and run_b_score and run_a_score:
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_score, rep_score_a=run_a_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and self.run_b_rep_score and self.run_a_rep_score:
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=self.run_b_rep_score, rep_score_a=self.run_a_rep_score, pbar=print_feedback)
else:
print(ERR_MSG)
def dri(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the Delta Relative Improvement (DeltaRI) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
The DeltaRI value is determined by the difference between the relative improvements
of the original and reproduced/replicated experiments.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary containing the DRI values for the specified run combination.
"""
if print_feedback:
print('Determining Delta Relative Improvement (DRI)')
if self.run_b_orig_score and self.run_a_orig_score and run_b_path and run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_a_rep)
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_rep_score, rep_score_a=run_a_rep_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and run_b_score and run_a_score:
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_score, rep_score_a=run_a_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and self.run_b_rep_score and self.run_a_rep_score:
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=self.run_b_rep_score, rep_score_a=self.run_a_rep_score, pbar=print_feedback)
else:
print(ERR_MSG)
def _ttest(self, rpd=True, run_b_score=None, run_a_score=None, print_feedback=False):
"""
Conducts either a paired (reproducibility) or unpaired (replicability) two-sided t-test according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param rpd: Boolean indicating if the evaluated runs are reproduced.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.
"""
if self.run_b_orig_score and (self.run_b_rep_score or run_b_score):
if run_b_score and run_a_score:
if print_feedback:
print('Determining p-values of t-test for baseline and advanced run.')
return {'baseline': ttest(self.run_b_orig_score, run_b_score, rpd=rpd, pbar=print_feedback),
'advanced': ttest(self.run_a_orig_score, run_a_score, rpd=rpd, pbar=print_feedback)}
if run_b_score:
if print_feedback:
print('Determining p-values of t-test for baseline run.')
return {'baseline': ttest(self.run_b_orig_score, run_b_score, rpd=rpd, pbar=print_feedback)}
if self.run_a_orig_score and self.run_a_rep_score:
if print_feedback:
print('Determining p-values of t-test for baseline and advanced run.')
return {'baseline': ttest(self.run_b_orig_score, self.run_b_rep_score, rpd=rpd, pbar=print_feedback),
'advanced': ttest(self.run_a_orig_score, self.run_a_rep_score, rpd=rpd, pbar=print_feedback)}
else:
if print_feedback:
print('Determining p-values of t-test for baseline run.')
return {'baseline': ttest(self.run_b_orig_score, self.run_b_rep_score, rpd=rpd, pbar=print_feedback)}
else:
print(ERR_MSG)
class RpdEvaluator(Evaluator):
"""
The Reproducibility Evaluator is used for quantifying the different levels of reproduction for runs that were
derived from the same test collection used in the original experiment.
"""
def evaluate(self, run=None):
"""
Evaluates the scores of the original and reproduced baseline and advanced runs.
If a (reproduced) run is provided only this one will be evaluated and a dictionary with the corresponding
scores is returned.
@param run: A reproduced run. If not specified, the original and reproduced runs of the the RpdEvaluator will
be used instead.
@return: If run is specified, a dictionary with the corresponding scores is returned.
"""
if run:
return self.rel_eval.evaluate(run)
super(RpdEvaluator, self).evaluate()
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
self.run_b_rep_score = self.rel_eval.evaluate(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
self.run_a_rep_score = self.rel_eval.evaluate(self.run_a_rep)
def ktau_union(self, run_b_rep=None, run_a_rep=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines Kendall's tau Union (KTU) between the original and reproduced document orderings
according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_rep: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_rep: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with KTU values that compare the document orderings of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback),
'advanced': ktu(self.run_a_orig, run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback)}
if self.run_b_orig and run_b_rep:
if self.run_a_orig and run_a_rep:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback),
'advanced': ktu(self.run_a_orig, run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback)}
if self.run_b_orig and self.run_b_rep:
if self.run_a_orig and self.run_a_rep:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
return {'baseline': ktu(self.run_b_orig, self.run_b_rep, pbar=print_feedback),
'advanced': ktu(self.run_a_orig, self.run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
return {'baseline': ktu(self.run_b_orig, self.run_b_rep, pbar=print_feedback)}
else:
print(ERR_MSG)
def rbo(self, run_b_rep=None, run_a_rep=None, run_b_path=None, run_a_path=None, print_feedback=False, misinfo=True):
"""
Determines the Rank-Biased Overlap (RBO) between the original and reproduced document orderings
according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_rep: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_rep: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@param misinfo: Use the RBO implementation that is also used in the TREC Health Misinformation Track.
See also: https://github.com/claclark/Compatibility
@return: Dictionary with RBO values that compare the document orderings of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo),
'advanced': RBO(self.run_a_orig, run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo)}
if self.run_b_orig and run_b_rep:
if self.run_a_orig and run_a_rep:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline and advanced run.")
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo),
'advanced': RBO(self.run_a_orig, run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline run.")
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo)}
if self.run_b_orig and self.run_b_rep:
if self.run_a_orig and self.run_a_rep:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline and advanced run.")
return {'baseline': RBO(self.run_b_orig, self.run_b_rep, pbar=print_feedback, misinfo=misinfo),
'advanced': RBO(self.run_a_orig, self.run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline run.")
return {'baseline': RBO(self.run_b_orig, self.run_b_rep, pbar=print_feedback, misinfo=misinfo)}
else:
print(ERR_MSG)
def rmse(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the Root Mean Square Error (RMSE) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with RMSE values that measure the closeness
between the topics scores of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return {'baseline': RMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback),
'advanced': RMSE(self.run_a_orig_score, run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return {'baseline': RMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback)}
if self.run_b_orig_score and run_b_score:
if self.run_a_orig_score and run_a_score:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': RMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback),
'advanced': RMSE(self.run_a_orig_score, run_a_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': RMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback)}
if self.run_b_orig_score and self.run_b_rep_score:
if self.run_a_orig_score and self.run_a_rep_score:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': RMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback),
'advanced': RMSE(self.run_a_orig_score, self.run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': RMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback)}
else:
print(ERR_MSG)
def nrmse(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Determines the normalized Root Mean Square Error (RMSE).
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with nRMSE values that measure the closeness
between the topics scores of the original and reproduced runs.
"""
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return {'baseline': nRMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback),
'advanced': nRMSE(self.run_a_orig_score, run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return {'baseline': nRMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback)}
if self.run_b_orig_score and run_b_score:
if self.run_a_orig_score and run_a_score:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': nRMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback),
'advanced': nRMSE(self.run_a_orig_score, run_a_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': nRMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback)}
if self.run_b_orig_score and self.run_b_rep_score:
if self.run_a_orig_score and self.run_a_rep_score:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': nRMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback),
'advanced': nRMSE(self.run_a_orig_score, self.run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': nRMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback)}
else:
print(ERR_MSG)
def ttest(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Conducts a paired two-tailed t-test for reproduced runs that were derived from the same test collection
as in the original experiment.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.
"""
if run_b_path:
if run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return self._ttest(run_b_score=run_b_rep_score, run_a_score=run_a_rep_score, print_feedback=print_feedback)
else:
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return self._ttest(run_b_score=run_b_rep_score, run_a_score=None, print_feedback=print_feedback)
return self._ttest(run_b_score=run_b_score, run_a_score=run_a_score, print_feedback=print_feedback)
class RplEvaluator(Evaluator):
"""
The Replicability Evaluator is used for quantifying the different levels of replication for runs that were
derived from a test collection not used in the original experiment.
"""
def __init__(self, **kwargs):
super(RplEvaluator, self).__init__(**kwargs)
self.qrel_rpl_path = kwargs.get('qrel_rpl_path', None)
if self.qrel_rpl_path:
with open(self.qrel_rpl_path, 'r') as f_qrel:
qrel_rpl = pytrec_eval.parse_qrel(f_qrel)
self.rel_eval_rpl = pytrec_eval.RelevanceEvaluator(qrel_rpl, pytrec_eval.supported_measures)
def evaluate(self, run=None):
"""
Evaluates the scores of the original and replicated baseline and advanced runs.
If a (replicated) run is provided only this one will be evaluated and a dictionary with the corresponding
scores is returned.
@param run: A replicated run. If not specified, the original and replicated runs of the the RplEvaluator will
be used instead.
@return: If run is specified, a dictionary with the corresponding scores is returned.
"""
if run:
return self.rel_eval_rpl.evaluate(run)
super(RplEvaluator, self).evaluate()
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
self.run_b_rep_score = self.rel_eval_rpl.evaluate(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
self.run_a_rep_score = self.rel_eval_rpl.evaluate(self.run_a_rep)
def ttest(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
"""
Conducts an un-paired two-tailed t-test for replicated runs that were derived from a test collection
not used in the original experiment.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another replicated baseline run,
if not provided the replicated baseline run of the RplEvaluator object will be used instead.
@param run_a_path: Path to another replicated advanced run,
if not provided the replicated advanced run of the RplEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.
"""
if run_b_path:
if run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep)
return self._ttest(rpd=False, run_b_score=run_b_rep_score, run_a_score=run_a_rep_score, print_feedback=print_feedback)
else:
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep)
return self._ttest(rpd=False, run_b_score=run_b_rep_score, run_a_score=None, print_feedback=print_feedback)
return self._ttest(rpd=False, run_b_score=run_b_score, run_a_score=run_a_score, print_feedback=print_feedback)
| 59.723856
| 143
| 0.643019
|
import pytrec_eval
from repro_eval.util import trim, break_ties
from repro_eval.measure.statistics import ttest
from repro_eval.measure.overall_effects import ER, deltaRI
from repro_eval.measure.document_order import ktau_union as ktu, RBO
from repro_eval.measure.effectiveness import rmse as RMSE, nrmse as nRMSE
from repro_eval.config import ERR_MSG
class Evaluator(object):
def __init__(self, **kwargs):
self.qrel_orig_path = kwargs.get('qrel_orig_path', None)
self.run_b_orig_path = kwargs.get('run_b_orig_path', None)
self.run_a_orig_path = kwargs.get('run_a_orig_path', None)
self.run_b_rep_path = kwargs.get('run_b_rep_path', None)
self.run_a_rep_path = kwargs.get('run_a_rep_path', None)
self.run_b_orig = None
self.run_a_orig = None
self.run_b_rep = None
self.run_a_rep = None
self.run_b_orig_score = None
self.run_a_orig_score = None
self.run_b_rep_score = None
self.run_a_rep_score = None
if self.qrel_orig_path:
with open(self.qrel_orig_path, 'r') as f_qrel:
qrel_orig = pytrec_eval.parse_qrel(f_qrel)
self.rel_eval = pytrec_eval.RelevanceEvaluator(qrel_orig, pytrec_eval.supported_measures)
if self.run_b_orig_path:
with open(self.run_b_orig_path, 'r') as f_run:
self.run_b_orig = pytrec_eval.parse_run(f_run)
self.run_b_orig = {t: self.run_b_orig[t] for t in sorted(self.run_b_orig)}
if self.run_a_orig_path:
with open(self.run_a_orig_path, 'r') as f_run:
self.run_a_orig = pytrec_eval.parse_run(f_run)
self.run_a_orig = {t: self.run_a_orig[t] for t in sorted(self.run_a_orig)}
if self.run_b_rep_path:
with open(self.run_b_rep_path, 'r') as f_run:
self.run_b_rep = pytrec_eval.parse_run(f_run)
self.run_b_rep = {t: self.run_b_rep[t] for t in sorted(self.run_b_rep)}
if self.run_a_rep_path:
with open(self.run_a_rep_path, 'r') as f_run:
self.run_a_rep = pytrec_eval.parse_run(f_run)
self.run_a_rep = {t: self.run_a_rep[t] for t in sorted(self.run_a_rep)}
def trim(self, t=None, run=None):
if run:
run = break_ties(run)
if t:
trim(run, thresh=t)
else:
trim(run)
return
if self.run_b_orig:
self.run_b_orig = break_ties(self.run_b_orig)
if t:
trim(self.run_b_orig, thresh=t)
else:
trim(self.run_b_orig)
if self.run_a_orig:
self.run_a_orig = break_ties(self.run_a_orig)
if t:
trim(self.run_a_orig, thresh=t)
else:
trim(self.run_a_orig)
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
if t:
trim(self.run_b_rep, thresh=t)
else:
trim(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
if t:
trim(self.run_a_rep, thresh=t)
else:
trim(self.run_a_rep)
def evaluate(self, run=None):
if self.run_b_orig:
self.run_b_orig = break_ties(self.run_b_orig)
self.run_b_orig_score = self.rel_eval.evaluate(self.run_b_orig)
if self.run_a_orig:
self.run_a_orig = break_ties(self.run_a_orig)
self.run_a_orig_score = self.rel_eval.evaluate(self.run_a_orig)
def er(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
if print_feedback:
print('Determining Effect Ratio (ER)')
if self.run_b_orig_score and self.run_a_orig_score and run_b_path and run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_a_rep)
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_rep_score, rep_score_a=run_a_rep_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and run_b_score and run_a_score:
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_score, rep_score_a=run_a_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and self.run_b_rep_score and self.run_a_rep_score:
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=self.run_b_rep_score, rep_score_a=self.run_a_rep_score, pbar=print_feedback)
else:
print(ERR_MSG)
def dri(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
if print_feedback:
print('Determining Delta Relative Improvement (DRI)')
if self.run_b_orig_score and self.run_a_orig_score and run_b_path and run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_a_rep)
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_rep_score, rep_score_a=run_a_rep_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and run_b_score and run_a_score:
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=run_b_score, rep_score_a=run_a_score, pbar=print_feedback)
if self.run_b_orig_score and self.run_a_orig_score and self.run_b_rep_score and self.run_a_rep_score:
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score,
rep_score_b=self.run_b_rep_score, rep_score_a=self.run_a_rep_score, pbar=print_feedback)
else:
print(ERR_MSG)
def _ttest(self, rpd=True, run_b_score=None, run_a_score=None, print_feedback=False):
if self.run_b_orig_score and (self.run_b_rep_score or run_b_score):
if run_b_score and run_a_score:
if print_feedback:
print('Determining p-values of t-test for baseline and advanced run.')
return {'baseline': ttest(self.run_b_orig_score, run_b_score, rpd=rpd, pbar=print_feedback),
'advanced': ttest(self.run_a_orig_score, run_a_score, rpd=rpd, pbar=print_feedback)}
if run_b_score:
if print_feedback:
print('Determining p-values of t-test for baseline run.')
return {'baseline': ttest(self.run_b_orig_score, run_b_score, rpd=rpd, pbar=print_feedback)}
if self.run_a_orig_score and self.run_a_rep_score:
if print_feedback:
print('Determining p-values of t-test for baseline and advanced run.')
return {'baseline': ttest(self.run_b_orig_score, self.run_b_rep_score, rpd=rpd, pbar=print_feedback),
'advanced': ttest(self.run_a_orig_score, self.run_a_rep_score, rpd=rpd, pbar=print_feedback)}
else:
if print_feedback:
print('Determining p-values of t-test for baseline run.')
return {'baseline': ttest(self.run_b_orig_score, self.run_b_rep_score, rpd=rpd, pbar=print_feedback)}
else:
print(ERR_MSG)
class RpdEvaluator(Evaluator):
def evaluate(self, run=None):
if run:
return self.rel_eval.evaluate(run)
super(RpdEvaluator, self).evaluate()
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
self.run_b_rep_score = self.rel_eval.evaluate(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
self.run_a_rep_score = self.rel_eval.evaluate(self.run_a_rep)
def ktau_union(self, run_b_rep=None, run_a_rep=None, run_b_path=None, run_a_path=None, print_feedback=False):
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback),
'advanced': ktu(self.run_a_orig, run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback)}
if self.run_b_orig and run_b_rep:
if self.run_a_orig and run_a_rep:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback),
'advanced': ktu(self.run_a_orig, run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback)}
if self.run_b_orig and self.run_b_rep:
if self.run_a_orig and self.run_a_rep:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
return {'baseline': ktu(self.run_b_orig, self.run_b_rep, pbar=print_feedback),
'advanced': ktu(self.run_a_orig, self.run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
return {'baseline': ktu(self.run_b_orig, self.run_b_rep, pbar=print_feedback)}
else:
print(ERR_MSG)
def rbo(self, run_b_rep=None, run_a_rep=None, run_b_path=None, run_a_path=None, print_feedback=False, misinfo=True):
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo),
'advanced': RBO(self.run_a_orig, run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo)}
if self.run_b_orig and run_b_rep:
if self.run_a_orig and run_a_rep:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline and advanced run.")
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo),
'advanced': RBO(self.run_a_orig, run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline run.")
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo)}
if self.run_b_orig and self.run_b_rep:
if self.run_a_orig and self.run_a_rep:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline and advanced run.")
return {'baseline': RBO(self.run_b_orig, self.run_b_rep, pbar=print_feedback, misinfo=misinfo),
'advanced': RBO(self.run_a_orig, self.run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print("Determining Rank-biased Overlap (RBO) for baseline run.")
return {'baseline': RBO(self.run_b_orig, self.run_b_rep, pbar=print_feedback, misinfo=misinfo)}
else:
print(ERR_MSG)
def rmse(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return {'baseline': RMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback),
'advanced': RMSE(self.run_a_orig_score, run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return {'baseline': RMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback)}
if self.run_b_orig_score and run_b_score:
if self.run_a_orig_score and run_a_score:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': RMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback),
'advanced': RMSE(self.run_a_orig_score, run_a_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': RMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback)}
if self.run_b_orig_score and self.run_b_rep_score:
if self.run_a_orig_score and self.run_a_rep_score:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': RMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback),
'advanced': RMSE(self.run_a_orig_score, self.run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': RMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback)}
else:
print(ERR_MSG)
def nrmse(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
if self.run_b_orig and run_b_path:
if self.run_a_orig and run_a_path:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return {'baseline': nRMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback),
'advanced': nRMSE(self.run_a_orig_score, run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return {'baseline': nRMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback)}
if self.run_b_orig_score and run_b_score:
if self.run_a_orig_score and run_a_score:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': nRMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback),
'advanced': nRMSE(self.run_a_orig_score, run_a_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': nRMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback)}
if self.run_b_orig_score and self.run_b_rep_score:
if self.run_a_orig_score and self.run_a_rep_score:
if print_feedback:
print("Determining Root Mean Square Error (RMSE) for baseline and advanced run.")
return {'baseline': nRMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback),
'advanced': nRMSE(self.run_a_orig_score, self.run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print("Determining normalized Root Mean Square Error (RMSE) for baseline run.")
return {'baseline': nRMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback)}
else:
print(ERR_MSG)
def ttest(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
if run_b_path:
if run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return self._ttest(run_b_score=run_b_rep_score, run_a_score=run_a_rep_score, print_feedback=print_feedback)
else:
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return self._ttest(run_b_score=run_b_rep_score, run_a_score=None, print_feedback=print_feedback)
return self._ttest(run_b_score=run_b_score, run_a_score=run_a_score, print_feedback=print_feedback)
class RplEvaluator(Evaluator):
def __init__(self, **kwargs):
super(RplEvaluator, self).__init__(**kwargs)
self.qrel_rpl_path = kwargs.get('qrel_rpl_path', None)
if self.qrel_rpl_path:
with open(self.qrel_rpl_path, 'r') as f_qrel:
qrel_rpl = pytrec_eval.parse_qrel(f_qrel)
self.rel_eval_rpl = pytrec_eval.RelevanceEvaluator(qrel_rpl, pytrec_eval.supported_measures)
def evaluate(self, run=None):
if run:
return self.rel_eval_rpl.evaluate(run)
super(RplEvaluator, self).evaluate()
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
self.run_b_rep_score = self.rel_eval_rpl.evaluate(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
self.run_a_rep_score = self.rel_eval_rpl.evaluate(self.run_a_rep)
def ttest(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
if run_b_path:
if run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep)
return self._ttest(rpd=False, run_b_score=run_b_rep_score, run_a_score=run_a_rep_score, print_feedback=print_feedback)
else:
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep)
return self._ttest(rpd=False, run_b_score=run_b_rep_score, run_a_score=None, print_feedback=print_feedback)
return self._ttest(rpd=False, run_b_score=run_b_score, run_a_score=run_a_score, print_feedback=print_feedback)
| true
| true
|
790d36e269dec80ce659412977caceb67a23b71f
| 9,455
|
py
|
Python
|
tests/unit/modules/test_boto_elb.py
|
markgras/salt
|
d66cd3c935533c63870b83228b978ce43e0ef70d
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/modules/test_boto_elb.py
|
markgras/salt
|
d66cd3c935533c63870b83228b978ce43e0ef70d
|
[
"Apache-2.0"
] | 1
|
2017-07-10T21:44:39.000Z
|
2017-07-10T21:44:39.000Z
|
tests/unit/modules/test_boto_elb.py
|
markgras/salt
|
d66cd3c935533c63870b83228b978ce43e0ef70d
|
[
"Apache-2.0"
] | null | null | null |
import logging
import os.path
from copy import deepcopy
import pkg_resources
import salt.config
import salt.loader
import salt.modules.boto_elb as boto_elb
import salt.utils.versions
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import TestCase, skipIf
# pylint: disable=import-error
try:
import boto
boto.ENDPOINTS_PATH = os.path.join(
RUNTIME_VARS.TESTS_DIR, "unit/files/endpoints.json"
)
import boto.ec2.elb
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
try:
from moto import mock_ec2_deprecated, mock_elb_deprecated
HAS_MOTO = True
except ImportError:
HAS_MOTO = False
def mock_ec2_deprecated(self):
"""
if the mock_ec2_deprecated function is not available due to import failure
this replaces the decorated function with stub_function.
Allows boto_elb unit tests to use the @mock_ec2_deprecated decorator
without a "NameError: name 'mock_ec2_deprecated' is not defined" error.
"""
def stub_function(self):
pass
return stub_function
def mock_elb_deprecated(self):
"""
if the mock_elb_deprecated function is not available due to import failure
this replaces the decorated function with stub_function.
Allows boto_elb unit tests to use the @mock_elb_deprecated decorator
without a "NameError: name 'mock_elb_deprecated' is not defined" error.
"""
def stub_function(self):
pass
return stub_function
# pylint: enable=import-error
log = logging.getLogger(__name__)
region = "us-east-1"
access_key = "GKTADJGHEIQSXMKKRBJ08H"
secret_key = "askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs"
conn_parameters = {
"region": region,
"key": access_key,
"keyid": secret_key,
"profile": {},
}
boto_conn_parameters = {
"aws_access_key_id": access_key,
"aws_secret_access_key": secret_key,
}
instance_parameters = {"instance_type": "t1.micro"}
required_moto = "0.3.7"
required_moto_py3 = "1.0.1"
def _has_required_moto():
"""
Returns True or False depending on if ``moto`` is installed and at the correct version,
depending on what version of Python is running these tests.
"""
if not HAS_MOTO:
return False
else:
moto_version = salt.utils.versions.LooseVersion(
pkg_resources.get_distribution("moto").version
)
if moto_version < salt.utils.versions.LooseVersion(required_moto):
return False
elif moto_version < salt.utils.versions.LooseVersion(required_moto_py3):
return False
return True
@skipIf(HAS_BOTO is False, "The boto module must be installed.")
@skipIf(HAS_MOTO is False, "The moto module must be installed.")
@skipIf(
_has_required_moto() is False,
"The moto module must be >= to {} for "
"PY2 or {} for PY3.".format(required_moto, required_moto_py3),
)
class BotoElbTestCase(TestCase, LoaderModuleMockMixin):
"""
TestCase for salt.modules.boto_elb module
"""
def setup_loader_modules(self):
opts = salt.config.DEFAULT_MASTER_OPTS.copy()
utils = salt.loader.utils(
opts, whitelist=["boto", "args", "systemd", "path", "platform"]
)
funcs = salt.loader.minion_mods(opts, utils=utils)
return {boto_elb: {"__opts__": opts, "__utils__": utils, "__salt__": funcs}}
def setUp(self):
TestCase.setUp(self)
# __virtual__ must be caller in order for _get_conn to be injected
boto_elb.__virtual__()
@mock_ec2_deprecated
@mock_elb_deprecated
def test_register_instances_valid_id_result_true(self):
"""
tests that given a valid instance id and valid ELB that
register_instances returns True.
"""
conn_ec2 = boto.ec2.connect_to_region(region, **boto_conn_parameters)
conn_elb = boto.ec2.elb.connect_to_region(region, **boto_conn_parameters)
zones = [zone.name for zone in conn_ec2.get_all_zones()]
elb_name = "TestRegisterInstancesValidIdResult"
conn_elb.create_load_balancer(elb_name, zones, [(80, 80, "http")])
reservations = conn_ec2.run_instances("ami-08389d60")
register_result = boto_elb.register_instances(
elb_name, reservations.instances[0].id, **conn_parameters
)
self.assertEqual(True, register_result)
@mock_ec2_deprecated
@mock_elb_deprecated
def test_register_instances_valid_id_string(self):
"""
tests that given a string containing a instance id and valid ELB that
register_instances adds the given instance to an ELB
"""
conn_ec2 = boto.ec2.connect_to_region(region, **boto_conn_parameters)
conn_elb = boto.ec2.elb.connect_to_region(region, **boto_conn_parameters)
zones = [zone.name for zone in conn_ec2.get_all_zones()]
elb_name = "TestRegisterInstancesValidIdResult"
conn_elb.create_load_balancer(elb_name, zones, [(80, 80, "http")])
reservations = conn_ec2.run_instances("ami-08389d60")
boto_elb.register_instances(
elb_name, reservations.instances[0].id, **conn_parameters
)
load_balancer_refreshed = conn_elb.get_all_load_balancers(elb_name)[0]
registered_instance_ids = [
instance.id for instance in load_balancer_refreshed.instances
]
log.debug(load_balancer_refreshed.instances)
self.assertEqual([reservations.instances[0].id], registered_instance_ids)
@mock_ec2_deprecated
@mock_elb_deprecated
def test_deregister_instances_valid_id_result_true(self):
"""
tests that given an valid id the boto_elb deregister_instances method
removes exactly one of a number of ELB registered instances
"""
conn_ec2 = boto.ec2.connect_to_region(region, **boto_conn_parameters)
conn_elb = boto.ec2.elb.connect_to_region(region, **boto_conn_parameters)
zones = [zone.name for zone in conn_ec2.get_all_zones()]
elb_name = "TestDeregisterInstancesValidIdResult"
load_balancer = conn_elb.create_load_balancer(
elb_name, zones, [(80, 80, "http")]
)
reservations = conn_ec2.run_instances("ami-08389d60")
load_balancer.register_instances(reservations.instances[0].id)
deregister_result = boto_elb.deregister_instances(
elb_name, reservations.instances[0].id, **conn_parameters
)
self.assertEqual(True, deregister_result)
@mock_ec2_deprecated
@mock_elb_deprecated
def test_deregister_instances_valid_id_string(self):
"""
tests that given an valid id the boto_elb deregister_instances method
removes exactly one of a number of ELB registered instances
"""
conn_ec2 = boto.ec2.connect_to_region(region, **boto_conn_parameters)
conn_elb = boto.ec2.elb.connect_to_region(region, **boto_conn_parameters)
zones = [zone.name for zone in conn_ec2.get_all_zones()]
elb_name = "TestDeregisterInstancesValidIdString"
load_balancer = conn_elb.create_load_balancer(
elb_name, zones, [(80, 80, "http")]
)
reservations = conn_ec2.run_instances("ami-08389d60", min_count=2)
all_instance_ids = [instance.id for instance in reservations.instances]
load_balancer.register_instances(all_instance_ids)
boto_elb.deregister_instances(
elb_name, reservations.instances[0].id, **conn_parameters
)
load_balancer_refreshed = conn_elb.get_all_load_balancers(elb_name)[0]
expected_instances = deepcopy(all_instance_ids)
expected_instances.remove(reservations.instances[0].id)
actual_instances = [
instance.id for instance in load_balancer_refreshed.instances
]
self.assertEqual(actual_instances, expected_instances)
@mock_ec2_deprecated
@mock_elb_deprecated
def test_deregister_instances_valid_id_list(self):
"""
tests that given an valid ids in the form of a list that the boto_elb
deregister_instances all members of the given list
"""
conn_ec2 = boto.ec2.connect_to_region(region, **boto_conn_parameters)
conn_elb = boto.ec2.elb.connect_to_region(region, **boto_conn_parameters)
zones = [zone.name for zone in conn_ec2.get_all_zones()]
elb_name = "TestDeregisterInstancesValidIdList"
load_balancer = conn_elb.create_load_balancer(
elb_name, zones, [(80, 80, "http")]
)
reservations = conn_ec2.run_instances("ami-08389d60", min_count=3)
all_instance_ids = [instance.id for instance in reservations.instances]
load_balancer.register_instances(all_instance_ids)
# reservations.instances[:-1] refers to all instances except list
# instance
deregister_instances = [instance.id for instance in reservations.instances[:-1]]
expected_instances = [reservations.instances[-1].id]
boto_elb.deregister_instances(elb_name, deregister_instances, **conn_parameters)
load_balancer_refreshed = conn_elb.get_all_load_balancers(elb_name)[0]
actual_instances = [
instance.id for instance in load_balancer_refreshed.instances
]
self.assertEqual(actual_instances, expected_instances)
| 38.279352
| 91
| 0.696563
|
import logging
import os.path
from copy import deepcopy
import pkg_resources
import salt.config
import salt.loader
import salt.modules.boto_elb as boto_elb
import salt.utils.versions
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import TestCase, skipIf
try:
import boto
boto.ENDPOINTS_PATH = os.path.join(
RUNTIME_VARS.TESTS_DIR, "unit/files/endpoints.json"
)
import boto.ec2.elb
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
try:
from moto import mock_ec2_deprecated, mock_elb_deprecated
HAS_MOTO = True
except ImportError:
HAS_MOTO = False
def mock_ec2_deprecated(self):
"""
if the mock_ec2_deprecated function is not available due to import failure
this replaces the decorated function with stub_function.
Allows boto_elb unit tests to use the @mock_ec2_deprecated decorator
without a "NameError: name 'mock_ec2_deprecated' is not defined" error.
"""
def stub_function(self):
pass
return stub_function
def mock_elb_deprecated(self):
"""
if the mock_elb_deprecated function is not available due to import failure
this replaces the decorated function with stub_function.
Allows boto_elb unit tests to use the @mock_elb_deprecated decorator
without a "NameError: name 'mock_elb_deprecated' is not defined" error.
"""
def stub_function(self):
pass
return stub_function
log = logging.getLogger(__name__)
region = "us-east-1"
access_key = "GKTADJGHEIQSXMKKRBJ08H"
secret_key = "askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs"
conn_parameters = {
"region": region,
"key": access_key,
"keyid": secret_key,
"profile": {},
}
boto_conn_parameters = {
"aws_access_key_id": access_key,
"aws_secret_access_key": secret_key,
}
instance_parameters = {"instance_type": "t1.micro"}
required_moto = "0.3.7"
required_moto_py3 = "1.0.1"
def _has_required_moto():
if not HAS_MOTO:
return False
else:
moto_version = salt.utils.versions.LooseVersion(
pkg_resources.get_distribution("moto").version
)
if moto_version < salt.utils.versions.LooseVersion(required_moto):
return False
elif moto_version < salt.utils.versions.LooseVersion(required_moto_py3):
return False
return True
@skipIf(HAS_BOTO is False, "The boto module must be installed.")
@skipIf(HAS_MOTO is False, "The moto module must be installed.")
@skipIf(
_has_required_moto() is False,
"The moto module must be >= to {} for "
"PY2 or {} for PY3.".format(required_moto, required_moto_py3),
)
class BotoElbTestCase(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self):
opts = salt.config.DEFAULT_MASTER_OPTS.copy()
utils = salt.loader.utils(
opts, whitelist=["boto", "args", "systemd", "path", "platform"]
)
funcs = salt.loader.minion_mods(opts, utils=utils)
return {boto_elb: {"__opts__": opts, "__utils__": utils, "__salt__": funcs}}
def setUp(self):
TestCase.setUp(self)
boto_elb.__virtual__()
@mock_ec2_deprecated
@mock_elb_deprecated
def test_register_instances_valid_id_result_true(self):
conn_ec2 = boto.ec2.connect_to_region(region, **boto_conn_parameters)
conn_elb = boto.ec2.elb.connect_to_region(region, **boto_conn_parameters)
zones = [zone.name for zone in conn_ec2.get_all_zones()]
elb_name = "TestRegisterInstancesValidIdResult"
conn_elb.create_load_balancer(elb_name, zones, [(80, 80, "http")])
reservations = conn_ec2.run_instances("ami-08389d60")
register_result = boto_elb.register_instances(
elb_name, reservations.instances[0].id, **conn_parameters
)
self.assertEqual(True, register_result)
@mock_ec2_deprecated
@mock_elb_deprecated
def test_register_instances_valid_id_string(self):
conn_ec2 = boto.ec2.connect_to_region(region, **boto_conn_parameters)
conn_elb = boto.ec2.elb.connect_to_region(region, **boto_conn_parameters)
zones = [zone.name for zone in conn_ec2.get_all_zones()]
elb_name = "TestRegisterInstancesValidIdResult"
conn_elb.create_load_balancer(elb_name, zones, [(80, 80, "http")])
reservations = conn_ec2.run_instances("ami-08389d60")
boto_elb.register_instances(
elb_name, reservations.instances[0].id, **conn_parameters
)
load_balancer_refreshed = conn_elb.get_all_load_balancers(elb_name)[0]
registered_instance_ids = [
instance.id for instance in load_balancer_refreshed.instances
]
log.debug(load_balancer_refreshed.instances)
self.assertEqual([reservations.instances[0].id], registered_instance_ids)
@mock_ec2_deprecated
@mock_elb_deprecated
def test_deregister_instances_valid_id_result_true(self):
conn_ec2 = boto.ec2.connect_to_region(region, **boto_conn_parameters)
conn_elb = boto.ec2.elb.connect_to_region(region, **boto_conn_parameters)
zones = [zone.name for zone in conn_ec2.get_all_zones()]
elb_name = "TestDeregisterInstancesValidIdResult"
load_balancer = conn_elb.create_load_balancer(
elb_name, zones, [(80, 80, "http")]
)
reservations = conn_ec2.run_instances("ami-08389d60")
load_balancer.register_instances(reservations.instances[0].id)
deregister_result = boto_elb.deregister_instances(
elb_name, reservations.instances[0].id, **conn_parameters
)
self.assertEqual(True, deregister_result)
@mock_ec2_deprecated
@mock_elb_deprecated
def test_deregister_instances_valid_id_string(self):
conn_ec2 = boto.ec2.connect_to_region(region, **boto_conn_parameters)
conn_elb = boto.ec2.elb.connect_to_region(region, **boto_conn_parameters)
zones = [zone.name for zone in conn_ec2.get_all_zones()]
elb_name = "TestDeregisterInstancesValidIdString"
load_balancer = conn_elb.create_load_balancer(
elb_name, zones, [(80, 80, "http")]
)
reservations = conn_ec2.run_instances("ami-08389d60", min_count=2)
all_instance_ids = [instance.id for instance in reservations.instances]
load_balancer.register_instances(all_instance_ids)
boto_elb.deregister_instances(
elb_name, reservations.instances[0].id, **conn_parameters
)
load_balancer_refreshed = conn_elb.get_all_load_balancers(elb_name)[0]
expected_instances = deepcopy(all_instance_ids)
expected_instances.remove(reservations.instances[0].id)
actual_instances = [
instance.id for instance in load_balancer_refreshed.instances
]
self.assertEqual(actual_instances, expected_instances)
@mock_ec2_deprecated
@mock_elb_deprecated
def test_deregister_instances_valid_id_list(self):
conn_ec2 = boto.ec2.connect_to_region(region, **boto_conn_parameters)
conn_elb = boto.ec2.elb.connect_to_region(region, **boto_conn_parameters)
zones = [zone.name for zone in conn_ec2.get_all_zones()]
elb_name = "TestDeregisterInstancesValidIdList"
load_balancer = conn_elb.create_load_balancer(
elb_name, zones, [(80, 80, "http")]
)
reservations = conn_ec2.run_instances("ami-08389d60", min_count=3)
all_instance_ids = [instance.id for instance in reservations.instances]
load_balancer.register_instances(all_instance_ids)
deregister_instances = [instance.id for instance in reservations.instances[:-1]]
expected_instances = [reservations.instances[-1].id]
boto_elb.deregister_instances(elb_name, deregister_instances, **conn_parameters)
load_balancer_refreshed = conn_elb.get_all_load_balancers(elb_name)[0]
actual_instances = [
instance.id for instance in load_balancer_refreshed.instances
]
self.assertEqual(actual_instances, expected_instances)
| true
| true
|
790d374b8e55abde416cd25922fb73cbf1bcc3be
| 174
|
py
|
Python
|
bookworm/__init__.py
|
xingkong0113/bookworm
|
7214067f48e7a951198806a1f9170e3fd8fc0cce
|
[
"MIT"
] | 36
|
2020-11-15T03:21:39.000Z
|
2022-03-05T01:11:26.000Z
|
bookworm/__init__.py
|
xingkong0113/bookworm
|
7214067f48e7a951198806a1f9170e3fd8fc0cce
|
[
"MIT"
] | 90
|
2020-10-06T14:46:07.000Z
|
2022-03-31T03:03:34.000Z
|
bookworm/__init__.py
|
xingkong0113/bookworm
|
7214067f48e7a951198806a1f9170e3fd8fc0cce
|
[
"MIT"
] | 20
|
2020-09-30T17:40:44.000Z
|
2022-03-17T19:59:53.000Z
|
# coding: utf-8
import gettext
# Make the gettext function _() available in the global namespace, even if no i18n is in use
gettext.install("bookworm", names=["ngettext"])
| 24.857143
| 92
| 0.741379
|
import gettext
gettext.install("bookworm", names=["ngettext"])
| true
| true
|
790d383757ed9f0cd5efae16d455b27a87b825d9
| 1,368
|
py
|
Python
|
farmblr/blog/migrations/0001_initial.py
|
Nemwel-Boniface/Farmblr
|
ca755e08a6510ef421bb6fd898b489a963831b56
|
[
"MIT"
] | 3
|
2022-02-25T09:12:47.000Z
|
2022-03-11T09:02:35.000Z
|
farmblr/blog/migrations/0001_initial.py
|
Nemwel-Boniface/Farmblr
|
ca755e08a6510ef421bb6fd898b489a963831b56
|
[
"MIT"
] | null | null | null |
farmblr/blog/migrations/0001_initial.py
|
Nemwel-Boniface/Farmblr
|
ca755e08a6510ef421bb6fd898b489a963831b56
|
[
"MIT"
] | null | null | null |
# Generated by Django 4.0 on 2021-10-12 22:38
import blog.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200, unique=True)),
('slug', models.SlugField(max_length=200, unique=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('summary', models.TextField(max_length=250)),
('content', models.TextField()),
('created_on', models.DateTimeField(auto_now_add=True)),
('status', models.IntegerField(choices=[(0, 'Draft'), (1, 'Publish')], default=0)),
('cover_image', models.ImageField(blank=True, null=True, upload_to=blog.models.get_unique_path)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='blog_posts', to='auth.user')),
],
options={
'ordering': ['-created_on'],
},
),
]
| 38
| 134
| 0.590643
|
import blog.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200, unique=True)),
('slug', models.SlugField(max_length=200, unique=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('summary', models.TextField(max_length=250)),
('content', models.TextField()),
('created_on', models.DateTimeField(auto_now_add=True)),
('status', models.IntegerField(choices=[(0, 'Draft'), (1, 'Publish')], default=0)),
('cover_image', models.ImageField(blank=True, null=True, upload_to=blog.models.get_unique_path)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='blog_posts', to='auth.user')),
],
options={
'ordering': ['-created_on'],
},
),
]
| true
| true
|
790d3874f140ed423064f140796a7dd6ee92cf5a
| 5,769
|
py
|
Python
|
mitsubishi_central_controller/CentralController.py
|
adgelbfish/mitsubishi-central-controller2
|
8767a3b24023d7c8b4148350139f78c91760b0cd
|
[
"MIT"
] | null | null | null |
mitsubishi_central_controller/CentralController.py
|
adgelbfish/mitsubishi-central-controller2
|
8767a3b24023d7c8b4148350139f78c91760b0cd
|
[
"MIT"
] | null | null | null |
mitsubishi_central_controller/CentralController.py
|
adgelbfish/mitsubishi-central-controller2
|
8767a3b24023d7c8b4148350139f78c91760b0cd
|
[
"MIT"
] | null | null | null |
from mitsubishi_central_controller.util.ControllerDictBuilder import ControllerDictBuilder
import aiohttp
import asyncio
from mitsubishi_central_controller.util.dict_utils import get_group_list_from_dict, get_system_data_from_dict, \
get_single_bulk_from_dict, get_single_racsw_from_dict, get_single_energycontrol_from_dict, get_lcd_name_from_dict, \
get_group_info_list_from_dict
from mitsubishi_central_controller.util.temperature_utils import f_to_c
from mitsubishi_central_controller.util.xml_utils import parse_xml
class CentralController:
def __init__(self, url):
self.url = url
self.full_url = url + "/servlet/MIMEReceiveServlet"
self.session = None
self.groups = None
self.system_data = None
self.semaphore = None
def print(self):
print(self.__dict__)
async def get_session(self):
if self.session is None:
self.session = aiohttp.ClientSession()
self.semaphore = asyncio.Semaphore(value=7)
return self.session
else:
return self.session
async def initialize_group(self, group):
await self.async_update_single_group_bulk(group)
group.update_from_bulk()
print(group.__dict__)
async def initialize_all(self):
await self.async_initialize_system_data()
await self.async_initialize_group_list()
await asyncio.wait([self.initialize_group(group) for group in self.groups])
async def async_send_command(self, command):
session = await self.get_session()
await self.semaphore.acquire()
resp = await session.post(self.full_url, data=command, headers={'Content-Type': 'text/xml'})
self.semaphore.release()
return await resp.text()
async def async_initialize_system_data(self):
xml = ControllerDictBuilder().get_system_data().to_xml()
xml_response = await self.async_send_command(xml)
parsed = parse_xml(xml_response)
self.system_data = get_system_data_from_dict(parsed)
async def async_initialize_group_list(self):
xml = ControllerDictBuilder().get_mnet_group_list().to_xml()
xml_response = await self.async_send_command(xml)
parsed = parse_xml(xml_response)
self.groups = get_group_list_from_dict(parsed)
await self.async_update_group_list_with_names()
async def async_update_group_list_with_names(self):
xml = ControllerDictBuilder().get_mnet_list().to_xml()
xml_response = await self.async_send_command(xml)
parsed = parse_xml(xml_response)
groups_info = get_group_info_list_from_dict(parsed)
for group in self.groups:
group.web_name = groups_info[group.group_id]["web_name"]
group.lcd_name = groups_info[group.group_id]["lcd_name"]
async def async_update_single_group_bulk(self, group):
xml = ControllerDictBuilder().get_single_bulk_data(group.group_id).to_xml()
xml_response = await self.async_send_command(xml)
parsed = parse_xml(xml_response)
group.bulk_string = get_single_bulk_from_dict(parsed)
group.rac_sw = get_single_racsw_from_dict(parsed)
group.energy_control = get_single_energycontrol_from_dict(parsed)
return group
async def update_lcd_name_for_group(self, group):
xml = ControllerDictBuilder().get_mnet(group.group_id, lcd_name=True).to_xml()
xml_response = await self.async_send_command(xml)
parsed = parse_xml(xml_response)
group.lcd_name = get_lcd_name_from_dict(parsed)
async def set_drive_for_group(self, group, drive_string):
xml = ControllerDictBuilder().set_mnet(group.group_id, drive=drive_string).to_xml()
await self.async_send_command(xml)
await self.async_update_single_group_bulk(group)
async def set_mode_for_group(self, group, mode):
xml = ControllerDictBuilder().set_mnet(group.group_id, mode=mode).to_xml()
await self.async_send_command(xml)
await self.async_update_single_group_bulk(group)
async def set_temperature_fahrenheit_for_group(self, group, temperature):
xml = ControllerDictBuilder().set_mnet(group.group_id, set_temp=f_to_c(int(temperature))).to_xml()
await self.async_send_command(xml)
await self.async_update_single_group_bulk(group)
async def set_air_direction_for_group(self, group, air_direction):
xml = ControllerDictBuilder().set_mnet(group.group_id, air_direction=air_direction).to_xml()
await self.async_send_command(xml)
await self.async_update_single_group_bulk(group)
async def set_fan_speed_for_group(self, group, fan_speed):
xml = ControllerDictBuilder().set_mnet(group.group_id, fan_speed=fan_speed).to_xml()
await self.async_send_command(xml)
await self.async_update_single_group_bulk(group)
async def set_remote_controller_for_group(self, group, remote_controller):
xml = ControllerDictBuilder().set_mnet(group.group_id, remote_controller=remote_controller).to_xml()
await self.async_send_command(xml)
await self.async_update_single_group_bulk(group)
async def reset_filter_for_group(self, group):
xml = ControllerDictBuilder().set_mnet(group.group_id, filter_sign="RESET").to_xml()
await self.async_send_command(xml)
await self.async_update_single_group_bulk(group)
async def reset_error_for_group(self, group):
xml = ControllerDictBuilder().set_mnet(group.group_id, error_sign="RESET").to_xml()
await self.async_send_command(xml)
await self.async_update_single_group_bulk(group)
async def close_connection(self):
s = await self.get_session()
await s.close()
| 44.72093
| 120
| 0.729243
|
from mitsubishi_central_controller.util.ControllerDictBuilder import ControllerDictBuilder
import aiohttp
import asyncio
from mitsubishi_central_controller.util.dict_utils import get_group_list_from_dict, get_system_data_from_dict, \
get_single_bulk_from_dict, get_single_racsw_from_dict, get_single_energycontrol_from_dict, get_lcd_name_from_dict, \
get_group_info_list_from_dict
from mitsubishi_central_controller.util.temperature_utils import f_to_c
from mitsubishi_central_controller.util.xml_utils import parse_xml
class CentralController:
def __init__(self, url):
self.url = url
self.full_url = url + "/servlet/MIMEReceiveServlet"
self.session = None
self.groups = None
self.system_data = None
self.semaphore = None
def print(self):
print(self.__dict__)
async def get_session(self):
if self.session is None:
self.session = aiohttp.ClientSession()
self.semaphore = asyncio.Semaphore(value=7)
return self.session
else:
return self.session
async def initialize_group(self, group):
await self.async_update_single_group_bulk(group)
group.update_from_bulk()
print(group.__dict__)
async def initialize_all(self):
await self.async_initialize_system_data()
await self.async_initialize_group_list()
await asyncio.wait([self.initialize_group(group) for group in self.groups])
async def async_send_command(self, command):
session = await self.get_session()
await self.semaphore.acquire()
resp = await session.post(self.full_url, data=command, headers={'Content-Type': 'text/xml'})
self.semaphore.release()
return await resp.text()
async def async_initialize_system_data(self):
xml = ControllerDictBuilder().get_system_data().to_xml()
xml_response = await self.async_send_command(xml)
parsed = parse_xml(xml_response)
self.system_data = get_system_data_from_dict(parsed)
async def async_initialize_group_list(self):
xml = ControllerDictBuilder().get_mnet_group_list().to_xml()
xml_response = await self.async_send_command(xml)
parsed = parse_xml(xml_response)
self.groups = get_group_list_from_dict(parsed)
await self.async_update_group_list_with_names()
async def async_update_group_list_with_names(self):
xml = ControllerDictBuilder().get_mnet_list().to_xml()
xml_response = await self.async_send_command(xml)
parsed = parse_xml(xml_response)
groups_info = get_group_info_list_from_dict(parsed)
for group in self.groups:
group.web_name = groups_info[group.group_id]["web_name"]
group.lcd_name = groups_info[group.group_id]["lcd_name"]
async def async_update_single_group_bulk(self, group):
xml = ControllerDictBuilder().get_single_bulk_data(group.group_id).to_xml()
xml_response = await self.async_send_command(xml)
parsed = parse_xml(xml_response)
group.bulk_string = get_single_bulk_from_dict(parsed)
group.rac_sw = get_single_racsw_from_dict(parsed)
group.energy_control = get_single_energycontrol_from_dict(parsed)
return group
async def update_lcd_name_for_group(self, group):
xml = ControllerDictBuilder().get_mnet(group.group_id, lcd_name=True).to_xml()
xml_response = await self.async_send_command(xml)
parsed = parse_xml(xml_response)
group.lcd_name = get_lcd_name_from_dict(parsed)
async def set_drive_for_group(self, group, drive_string):
xml = ControllerDictBuilder().set_mnet(group.group_id, drive=drive_string).to_xml()
await self.async_send_command(xml)
await self.async_update_single_group_bulk(group)
async def set_mode_for_group(self, group, mode):
xml = ControllerDictBuilder().set_mnet(group.group_id, mode=mode).to_xml()
await self.async_send_command(xml)
await self.async_update_single_group_bulk(group)
async def set_temperature_fahrenheit_for_group(self, group, temperature):
xml = ControllerDictBuilder().set_mnet(group.group_id, set_temp=f_to_c(int(temperature))).to_xml()
await self.async_send_command(xml)
await self.async_update_single_group_bulk(group)
async def set_air_direction_for_group(self, group, air_direction):
xml = ControllerDictBuilder().set_mnet(group.group_id, air_direction=air_direction).to_xml()
await self.async_send_command(xml)
await self.async_update_single_group_bulk(group)
async def set_fan_speed_for_group(self, group, fan_speed):
xml = ControllerDictBuilder().set_mnet(group.group_id, fan_speed=fan_speed).to_xml()
await self.async_send_command(xml)
await self.async_update_single_group_bulk(group)
async def set_remote_controller_for_group(self, group, remote_controller):
xml = ControllerDictBuilder().set_mnet(group.group_id, remote_controller=remote_controller).to_xml()
await self.async_send_command(xml)
await self.async_update_single_group_bulk(group)
async def reset_filter_for_group(self, group):
xml = ControllerDictBuilder().set_mnet(group.group_id, filter_sign="RESET").to_xml()
await self.async_send_command(xml)
await self.async_update_single_group_bulk(group)
async def reset_error_for_group(self, group):
xml = ControllerDictBuilder().set_mnet(group.group_id, error_sign="RESET").to_xml()
await self.async_send_command(xml)
await self.async_update_single_group_bulk(group)
async def close_connection(self):
s = await self.get_session()
await s.close()
| true
| true
|
790d388f4c350aca4588a6316cae497aee15325b
| 1,004
|
py
|
Python
|
shop/cascade/settings.py
|
haitwang-cloud/django-shop
|
8ac767a42022d66d226c0bb342f16ac3df3ca30b
|
[
"BSD-3-Clause"
] | 2
|
2019-10-17T09:03:40.000Z
|
2019-10-17T09:08:54.000Z
|
shop/cascade/settings.py
|
haitwang-cloud/django-shop
|
8ac767a42022d66d226c0bb342f16ac3df3ca30b
|
[
"BSD-3-Clause"
] | 10
|
2020-06-05T19:26:54.000Z
|
2022-03-11T23:33:14.000Z
|
shop/cascade/settings.py
|
haitwang-cloud/django-shop
|
8ac767a42022d66d226c0bb342f16ac3df3ca30b
|
[
"BSD-3-Clause"
] | 1
|
2022-02-18T18:03:17.000Z
|
2022-02-18T18:03:17.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from cmsplugin_cascade.extra_fields.config import PluginExtraFieldsConfig
CASCADE_PLUGINS = getattr(settings, 'SHOP_CASCADE_PLUGINS',
('auth', 'breadcrumb', 'catalog', 'cart', 'checkout', 'extensions', 'order', 'processbar', 'search',))
def set_defaults(config):
config.setdefault('plugins_with_extra_fields', {})
config['plugins_with_extra_fields'].setdefault('ShopReorderButtonPlugin', PluginExtraFieldsConfig(
inline_styles={
'extra_fields:Margins': ['margin-top', 'margin-right', 'margin-bottom', 'margin-left'],
'extra_units:Margins': 'px,em'
},
))
config['plugins_with_extra_fields'].setdefault('ShopCancelOrderButtonPlugin', PluginExtraFieldsConfig(
inline_styles={
'extra_fields:Margins': ['margin-top', 'margin-right', 'margin-bottom', 'margin-left'],
'extra_units:Margins': 'px,em'
},
))
| 38.615385
| 106
| 0.685259
|
from __future__ import unicode_literals
from django.conf import settings
from cmsplugin_cascade.extra_fields.config import PluginExtraFieldsConfig
CASCADE_PLUGINS = getattr(settings, 'SHOP_CASCADE_PLUGINS',
('auth', 'breadcrumb', 'catalog', 'cart', 'checkout', 'extensions', 'order', 'processbar', 'search',))
def set_defaults(config):
config.setdefault('plugins_with_extra_fields', {})
config['plugins_with_extra_fields'].setdefault('ShopReorderButtonPlugin', PluginExtraFieldsConfig(
inline_styles={
'extra_fields:Margins': ['margin-top', 'margin-right', 'margin-bottom', 'margin-left'],
'extra_units:Margins': 'px,em'
},
))
config['plugins_with_extra_fields'].setdefault('ShopCancelOrderButtonPlugin', PluginExtraFieldsConfig(
inline_styles={
'extra_fields:Margins': ['margin-top', 'margin-right', 'margin-bottom', 'margin-left'],
'extra_units:Margins': 'px,em'
},
))
| true
| true
|
790d389f0491ab6f0cebe2e02717a1e3689b0a1b
| 2,368
|
py
|
Python
|
csm_test_utils/message.py
|
opentelekomcloud-infra/csm-test-utils
|
ec3c4a6bf4d4806e76d0d8dfcfe024c39c9a0e36
|
[
"Apache-2.0"
] | 1
|
2021-02-08T08:53:01.000Z
|
2021-02-08T08:53:01.000Z
|
csm_test_utils/message.py
|
opentelekomcloud-infra/csm-test-utils
|
ec3c4a6bf4d4806e76d0d8dfcfe024c39c9a0e36
|
[
"Apache-2.0"
] | 22
|
2019-10-21T15:10:14.000Z
|
2021-04-07T07:27:20.000Z
|
csm_test_utils/message.py
|
opentelekomcloud-infra/csm-test-utils
|
ec3c4a6bf4d4806e76d0d8dfcfe024c39c9a0e36
|
[
"Apache-2.0"
] | 1
|
2021-02-08T08:53:07.000Z
|
2021-02-08T08:53:07.000Z
|
import datetime
import json
import logging
import socket
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.DEBUG)
class Base(dict):
"""Base metric class"""
def __init__(
self,
name: str,
environment: str,
zone: str,
timestamp: str = None
):
super().__init__()
self['name'] = name
self['environment'] = environment
self['zone'] = zone
if timestamp:
self['timestamp'] = timestamp
else:
self['timestamp'] = datetime.datetime.now().isoformat()
def serialize(self) -> str:
"""Serialize data as json string"""
try:
return json.dumps(self, separators=(',', ':'))
except json.JSONDecodeError as err:
return err.msg
def __bytes__(self) -> bytes:
"""Returns bytes interpretation of data"""
data = self.serialize()
return ('%s\n' % data).encode('utf8')
class Metric(Base):
"""Base metric"""
def __init__(
self,
name: str,
value: int,
environment: str = None,
zone: str = None,
**kwargs
):
super().__init__(
name=name,
environment=environment,
zone=zone,
)
self['__type'] = 'metric'
self['metric_type'] = kwargs.get('metric_type', 'ms')
self['value'] = value
self.update(**kwargs)
def get_message(msg):
"""Get metric instance from dictionary or string"""
if not isinstance(msg, dict):
try:
msg = json.loads(msg, encoding='utf-8')
except json.JSONDecodeError:
return None
typ = msg.pop('__type')
if typ == 'metric':
return Metric(**msg)
return None
def push_metric(data: Metric, message_socket_address):
"""push metrics to socket"""
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as _socket:
try:
_socket.connect(message_socket_address)
msg = '%s\n' % data.serialize()
_socket.sendall(msg.encode('utf8'))
return 'success'
except socket.error as err:
LOGGER.exception('Error establishing connection to socket')
raise err
except Exception as ex:
LOGGER.exception('Error writing message to socket')
raise ex
| 26.021978
| 71
| 0.559966
|
import datetime
import json
import logging
import socket
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.DEBUG)
class Base(dict):
def __init__(
self,
name: str,
environment: str,
zone: str,
timestamp: str = None
):
super().__init__()
self['name'] = name
self['environment'] = environment
self['zone'] = zone
if timestamp:
self['timestamp'] = timestamp
else:
self['timestamp'] = datetime.datetime.now().isoformat()
def serialize(self) -> str:
try:
return json.dumps(self, separators=(',', ':'))
except json.JSONDecodeError as err:
return err.msg
def __bytes__(self) -> bytes:
data = self.serialize()
return ('%s\n' % data).encode('utf8')
class Metric(Base):
def __init__(
self,
name: str,
value: int,
environment: str = None,
zone: str = None,
**kwargs
):
super().__init__(
name=name,
environment=environment,
zone=zone,
)
self['__type'] = 'metric'
self['metric_type'] = kwargs.get('metric_type', 'ms')
self['value'] = value
self.update(**kwargs)
def get_message(msg):
if not isinstance(msg, dict):
try:
msg = json.loads(msg, encoding='utf-8')
except json.JSONDecodeError:
return None
typ = msg.pop('__type')
if typ == 'metric':
return Metric(**msg)
return None
def push_metric(data: Metric, message_socket_address):
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as _socket:
try:
_socket.connect(message_socket_address)
msg = '%s\n' % data.serialize()
_socket.sendall(msg.encode('utf8'))
return 'success'
except socket.error as err:
LOGGER.exception('Error establishing connection to socket')
raise err
except Exception as ex:
LOGGER.exception('Error writing message to socket')
raise ex
| true
| true
|
790d3924b5c67002e392f6b99b27e9fb61c158bd
| 2,089
|
py
|
Python
|
test/ProbePlacement_multi/parallel/optimization_setup.py
|
kant/GlennOPT
|
ca816c3708a2db5b98f8f1a7885305a8e18e179e
|
[
"NASA-1.3"
] | null | null | null |
test/ProbePlacement_multi/parallel/optimization_setup.py
|
kant/GlennOPT
|
ca816c3708a2db5b98f8f1a7885305a8e18e179e
|
[
"NASA-1.3"
] | null | null | null |
test/ProbePlacement_multi/parallel/optimization_setup.py
|
kant/GlennOPT
|
ca816c3708a2db5b98f8f1a7885305a8e18e179e
|
[
"NASA-1.3"
] | null | null | null |
"""
Simple, non parallel optimization set up example.
"""
import sys,os
sys.path.insert(0,'../../../')
from glennopt.base import Parameter
from glennopt.helpers import mutation_parameters, de_mutation_type
from glennopt.optimizers import NSGA3
from glennopt.DOE import Default,CCD,FullFactorial,LatinHyperCube
import numpy as np
import os
# Initialize the DOE
doe = LatinHyperCube(samples=128,levels=4) # 128 random samples of the design space
# These are also available for use
# doe = FullFactorial(levels=2)
# doe = Default(15) # Default
# doe = CCD()
eval_parameters = list()
# Define evaluation parameters
nProbes = 10
minSpacing = 3
probeSpacing = 360/nProbes
tLo = np.zeros(nProbes)
tHi = np.zeros(nProbes)
for i in range(nProbes):
tLo[i] = probeSpacing*i
if i != nProbes-1:
tHi[i] = probeSpacing*(i+1) - minSpacing
else:
tHi[-1] = probeSpacing*(i+1)
doe.add_parameter(name="x"+str(i+1),min_value=tLo[i],max_value=tHi[i])
constraints = (tLo,tHi)
doe.add_objectives(name='objective1')
doe.add_objectives(name='objective2')
# Define any performance parameters you want to keep track of (tracking only)
doe.add_perf_parameter(name='PearsonR')
doe.add_perf_parameter(name='RMS_Error')
# Set up the optimizer
current_dir = os.getcwd()
pop_size = 48
ns = NSGA3(eval_command = "python evaluation.py", eval_folder="Evaluation",pop_size=pop_size,optimization_folder=current_dir)
ns.add_eval_parameters(eval_params=doe.eval_parameters)
ns.add_objectives(objectives=doe.objectives)
ns.add_performance_parameters(performance_params= doe.perf_parameters)
# Parallel Settings (You don't need to run this block if you only want serial execution)
ns.parallel_settings.concurrent_executions = 8 # Change to 1 for serial
ns.parallel_settings.cores_per_execution= 1
ns.parallel_settings.execution_timeout = 0.2 # minutes
# Start the optimizer
ns.mutation_params.mutation_type = de_mutation_type.de_rand_1_bin
ns.mutation_params.F = 0.6
ns.mutation_params.C = 0.7
# Start the Design of Experiments
ns.start_doe(doe.generate_doe())
| 33.693548
| 125
| 0.760651
|
import sys,os
sys.path.insert(0,'../../../')
from glennopt.base import Parameter
from glennopt.helpers import mutation_parameters, de_mutation_type
from glennopt.optimizers import NSGA3
from glennopt.DOE import Default,CCD,FullFactorial,LatinHyperCube
import numpy as np
import os
doe = LatinHyperCube(samples=128,levels=4)
arameters = list()
nProbes = 10
minSpacing = 3
probeSpacing = 360/nProbes
tLo = np.zeros(nProbes)
tHi = np.zeros(nProbes)
for i in range(nProbes):
tLo[i] = probeSpacing*i
if i != nProbes-1:
tHi[i] = probeSpacing*(i+1) - minSpacing
else:
tHi[-1] = probeSpacing*(i+1)
doe.add_parameter(name="x"+str(i+1),min_value=tLo[i],max_value=tHi[i])
constraints = (tLo,tHi)
doe.add_objectives(name='objective1')
doe.add_objectives(name='objective2')
doe.add_perf_parameter(name='PearsonR')
doe.add_perf_parameter(name='RMS_Error')
current_dir = os.getcwd()
pop_size = 48
ns = NSGA3(eval_command = "python evaluation.py", eval_folder="Evaluation",pop_size=pop_size,optimization_folder=current_dir)
ns.add_eval_parameters(eval_params=doe.eval_parameters)
ns.add_objectives(objectives=doe.objectives)
ns.add_performance_parameters(performance_params= doe.perf_parameters)
ns.parallel_settings.concurrent_executions = 8 # Change to 1 for serial
ns.parallel_settings.cores_per_execution= 1
ns.parallel_settings.execution_timeout = 0.2 # minutes
# Start the optimizer
ns.mutation_params.mutation_type = de_mutation_type.de_rand_1_bin
ns.mutation_params.F = 0.6
ns.mutation_params.C = 0.7
# Start the Design of Experiments
ns.start_doe(doe.generate_doe())
| true
| true
|
790d39801728b257bdacbd05420a76bd70f11934
| 932
|
py
|
Python
|
game.py
|
sabdllah/03-Text-adventure
|
ec6f6cdab29811dd77daff064a2748d9638a2667
|
[
"MIT"
] | null | null | null |
game.py
|
sabdllah/03-Text-adventure
|
ec6f6cdab29811dd77daff064a2748d9638a2667
|
[
"MIT"
] | null | null | null |
game.py
|
sabdllah/03-Text-adventure
|
ec6f6cdab29811dd77daff064a2748d9638a2667
|
[
"MIT"
] | null | null | null |
answer = input ("Would you like to play?")
if answer.lower().strip() == "yes":
print ("Yay! Let's get started.")
answer = input ("You have reached an apple tree, would you like to pick an apple?").lower ().strip()
if answer == "yes":
answer = input ("would you like to eat the apple?")
if answer == "yes":
print ("That was not a great idea!")
else:
print ("good choice, you made it out safely.")
answer = input ("you encounter the apple tree owner and are accussed of stealing. would you like to? (run/apologize)")
if answer == "run":
print ("you have been arressted! Game Over!")
else:
print ("you have won! Congratulations!")
elif answer == "no":
print ("congratulations you have won!")
else:
print ("Invalid choice, you lost!")
else:
print ("Aww that's so sad")
| 33.285714
| 131
| 0.562232
|
answer = input ("Would you like to play?")
if answer.lower().strip() == "yes":
print ("Yay! Let's get started.")
answer = input ("You have reached an apple tree, would you like to pick an apple?").lower ().strip()
if answer == "yes":
answer = input ("would you like to eat the apple?")
if answer == "yes":
print ("That was not a great idea!")
else:
print ("good choice, you made it out safely.")
answer = input ("you encounter the apple tree owner and are accussed of stealing. would you like to? (run/apologize)")
if answer == "run":
print ("you have been arressted! Game Over!")
else:
print ("you have won! Congratulations!")
elif answer == "no":
print ("congratulations you have won!")
else:
print ("Invalid choice, you lost!")
else:
print ("Aww that's so sad")
| false
| true
|
790d39c6d35aec6c26cc29f9915c24a0804dafe8
| 409
|
py
|
Python
|
Lib/test/test_distutils.py
|
cyyever/nogil
|
2607880dd93de52cf34045f1b7e850639a06c137
|
[
"0BSD"
] | 953
|
2021-10-08T17:12:34.000Z
|
2022-03-31T18:31:50.000Z
|
Lib/test/test_distutils.py
|
cyyever/nogil
|
2607880dd93de52cf34045f1b7e850639a06c137
|
[
"0BSD"
] | 27
|
2021-10-13T20:54:09.000Z
|
2022-03-27T14:41:13.000Z
|
Lib/test/test_distutils.py
|
cyyever/nogil
|
2607880dd93de52cf34045f1b7e850639a06c137
|
[
"0BSD"
] | 42
|
2021-10-08T16:05:57.000Z
|
2022-03-18T13:06:12.000Z
|
"""Tests for distutils.
The tests for distutils are defined in the distutils.tests package;
the test_suite() function there returns a test suite that's ready to
be run.
"""
import distutils.tests
import test.support
def load_tests(*_):
# used by unittest
return distutils.tests.test_suite()
def tearDownModule():
test.support.reap_children()
if __name__ == "__main__":
unittest.main()
| 17.782609
| 68
| 0.731051
|
import distutils.tests
import test.support
def load_tests(*_):
return distutils.tests.test_suite()
def tearDownModule():
test.support.reap_children()
if __name__ == "__main__":
unittest.main()
| true
| true
|
790d3a5e0044f9cab6dfde7bd011d2a2d79f547a
| 3,135
|
py
|
Python
|
Django/session_words/session_words/settings.py
|
justnclrk/Python
|
0922961cbd94694a69ae8132a5c33baf552d8d89
|
[
"MIT"
] | null | null | null |
Django/session_words/session_words/settings.py
|
justnclrk/Python
|
0922961cbd94694a69ae8132a5c33baf552d8d89
|
[
"MIT"
] | 8
|
2020-06-06T01:02:06.000Z
|
2022-03-12T00:24:13.000Z
|
Django/session_words/session_words/settings.py
|
justnclrk/Python
|
0922961cbd94694a69ae8132a5c33baf552d8d89
|
[
"MIT"
] | null | null | null |
"""
Django settings for session_words project.
Generated by 'django-admin startproject' using Django 1.11.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'u@cj5-77l85mz0t186p6@1c(d607sgv(0t5lm!4h$ok8to&h@v'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'apps.main',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'session_words.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'session_words.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| 25.696721
| 91
| 0.698565
|
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'u@cj5-77l85mz0t186p6@1c(d607sgv(0t5lm!4h$ok8to&h@v'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'apps.main',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'session_words.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'session_words.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| true
| true
|
790d3c676315e9ecbb552fc0aaff117b49326325
| 1,350
|
py
|
Python
|
output/models/ms_data/complex_type/ct_i040_xsd/ct_i040.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 1
|
2021-08-14T17:59:21.000Z
|
2021-08-14T17:59:21.000Z
|
output/models/ms_data/element/elem_t003_xsd/elem_t003.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 4
|
2020-02-12T21:30:44.000Z
|
2020-04-15T20:06:46.000Z
|
output/models/ms_data/element/elem_t007_xsd/elem_t007.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass, field
from typing import Dict, Optional
@dataclass
class FooType:
class Meta:
name = "fooType"
foo_ele1: Optional[str] = field(
default=None,
metadata={
"name": "fooEle1",
"type": "Element",
"namespace": "",
"required": True,
}
)
foo_ele2: Optional[int] = field(
default=None,
metadata={
"name": "fooEle2",
"type": "Element",
"namespace": "",
"required": True,
}
)
foo_ele3: Optional[bool] = field(
default=None,
metadata={
"name": "fooEle3",
"type": "Element",
"namespace": "",
}
)
other_attributes: Dict[str, str] = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##other",
}
)
@dataclass
class FooTest(FooType):
class Meta:
name = "fooTest"
@dataclass
class MyType(FooType):
class Meta:
name = "myType"
@dataclass
class Root:
class Meta:
name = "root"
foo_test: Optional[FooTest] = field(
default=None,
metadata={
"name": "fooTest",
"type": "Element",
"required": True,
}
)
| 19.285714
| 45
| 0.479259
|
from dataclasses import dataclass, field
from typing import Dict, Optional
@dataclass
class FooType:
class Meta:
name = "fooType"
foo_ele1: Optional[str] = field(
default=None,
metadata={
"name": "fooEle1",
"type": "Element",
"namespace": "",
"required": True,
}
)
foo_ele2: Optional[int] = field(
default=None,
metadata={
"name": "fooEle2",
"type": "Element",
"namespace": "",
"required": True,
}
)
foo_ele3: Optional[bool] = field(
default=None,
metadata={
"name": "fooEle3",
"type": "Element",
"namespace": "",
}
)
other_attributes: Dict[str, str] = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##other",
}
)
@dataclass
class FooTest(FooType):
class Meta:
name = "fooTest"
@dataclass
class MyType(FooType):
class Meta:
name = "myType"
@dataclass
class Root:
class Meta:
name = "root"
foo_test: Optional[FooTest] = field(
default=None,
metadata={
"name": "fooTest",
"type": "Element",
"required": True,
}
)
| true
| true
|
790d3cc8be6c56fb8f97a283ca95855e6ca2c466
| 405
|
py
|
Python
|
blog/migrations/0002_auto_20190209_0235.py
|
muntakim1/mblog
|
dd3104220ce77f63e362d157e62e3ce93b0e7cea
|
[
"MIT"
] | 2
|
2019-05-06T13:57:44.000Z
|
2020-02-19T04:12:33.000Z
|
blog/migrations/0002_auto_20190209_0235.py
|
muntakim1/mblog
|
dd3104220ce77f63e362d157e62e3ce93b0e7cea
|
[
"MIT"
] | 2
|
2019-10-21T19:54:44.000Z
|
2019-12-29T14:56:47.000Z
|
blog/migrations/0002_auto_20190209_0235.py
|
muntakim1/mblog
|
dd3104220ce77f63e362d157e62e3ce93b0e7cea
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.5 on 2019-02-08 20:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='post',
name='image',
field=models.ImageField(blank=True, null=True, upload_to='post/%Y/%m/%d'),
),
]
| 21.315789
| 86
| 0.582716
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='post',
name='image',
field=models.ImageField(blank=True, null=True, upload_to='post/%Y/%m/%d'),
),
]
| true
| true
|
790d3dcf0c2c3850b605f2b50d45d01b72f40000
| 2,260
|
py
|
Python
|
serpent_server/server.py
|
ChrisCalderon/SerpentServer
|
da6e17e468bd93197183cad16a01cb6f233f7344
|
[
"MIT"
] | null | null | null |
serpent_server/server.py
|
ChrisCalderon/SerpentServer
|
da6e17e468bd93197183cad16a01cb6f233f7344
|
[
"MIT"
] | null | null | null |
serpent_server/server.py
|
ChrisCalderon/SerpentServer
|
da6e17e468bd93197183cad16a01cb6f233f7344
|
[
"MIT"
] | null | null | null |
from http.server import HTTPServer, BaseHTTPRequestHandler
from socketserver import ThreadingMixIn
from .redirect import RedirectHandler
import threading
import ssl
__all__ = ['ThreadedServer', 'SecureServer']
class ThreadedServer(ThreadingMixIn, HTTPServer):
protocol_version = 'HTTP/1.1'
def __init__(self,
host: str,
port: int,
RequestHandlerClass: BaseHTTPRequestHandler,
bind_and_activate: bool=True):
self._serve_forever_thread = None # type: threading.Thread
super().__init__((host, port), RequestHandlerClass, bind_and_activate)
def serve_forever(self, poll_interval=0.5):
self._serve_forever_thread = threading.Thread(
target=super().serve_forever,
args=(poll_interval,)
)
self._serve_forever_thread.start()
class SecureServer(ThreadedServer):
def __init__(self,
certfile: str,
keyfile: str,
host: str,
port: int,
RequestHandlerClass: BaseHTTPRequestHandler,
bind_and_activate: bool = True):
self._certfile = certfile
self._keyfile = keyfile
self._redirect = ThreadedServer(host,
80,
RedirectHandler,
bind_and_activate)
super().__init__(host, port, RequestHandlerClass, bind_and_activate)
def server_bind(self):
super().server_bind()
self._redirect.server_bind()
self.socket = ssl.wrap_socket(self.socket,
server_side=True,
certfile=self._certfile,
keyfile=self._keyfile,
do_handshake_on_connect=False)
def get_request(self):
sock, addr = super().get_request()
sock.do_handshake()
return sock, addr
def serve_forever(self, poll_interval=0.5):
super().serve_forever(poll_interval)
self._redirect.serve_forever(poll_interval)
def shutdown(self):
super().shutdown()
self._redirect.shutdown()
| 34.242424
| 78
| 0.574779
|
from http.server import HTTPServer, BaseHTTPRequestHandler
from socketserver import ThreadingMixIn
from .redirect import RedirectHandler
import threading
import ssl
__all__ = ['ThreadedServer', 'SecureServer']
class ThreadedServer(ThreadingMixIn, HTTPServer):
protocol_version = 'HTTP/1.1'
def __init__(self,
host: str,
port: int,
RequestHandlerClass: BaseHTTPRequestHandler,
bind_and_activate: bool=True):
self._serve_forever_thread = None
super().__init__((host, port), RequestHandlerClass, bind_and_activate)
def serve_forever(self, poll_interval=0.5):
self._serve_forever_thread = threading.Thread(
target=super().serve_forever,
args=(poll_interval,)
)
self._serve_forever_thread.start()
class SecureServer(ThreadedServer):
def __init__(self,
certfile: str,
keyfile: str,
host: str,
port: int,
RequestHandlerClass: BaseHTTPRequestHandler,
bind_and_activate: bool = True):
self._certfile = certfile
self._keyfile = keyfile
self._redirect = ThreadedServer(host,
80,
RedirectHandler,
bind_and_activate)
super().__init__(host, port, RequestHandlerClass, bind_and_activate)
def server_bind(self):
super().server_bind()
self._redirect.server_bind()
self.socket = ssl.wrap_socket(self.socket,
server_side=True,
certfile=self._certfile,
keyfile=self._keyfile,
do_handshake_on_connect=False)
def get_request(self):
sock, addr = super().get_request()
sock.do_handshake()
return sock, addr
def serve_forever(self, poll_interval=0.5):
super().serve_forever(poll_interval)
self._redirect.serve_forever(poll_interval)
def shutdown(self):
super().shutdown()
self._redirect.shutdown()
| true
| true
|
790d3ee93addca429835b9a703e987f4b06b772c
| 1,244
|
py
|
Python
|
faces.py
|
clevtech/core_control_zhuldyz
|
68e62a363e874692b5cc54ff651b63a00b58f1cf
|
[
"MIT"
] | null | null | null |
faces.py
|
clevtech/core_control_zhuldyz
|
68e62a363e874692b5cc54ff651b63a00b58f1cf
|
[
"MIT"
] | null | null | null |
faces.py
|
clevtech/core_control_zhuldyz
|
68e62a363e874692b5cc54ff651b63a00b58f1cf
|
[
"MIT"
] | null | null | null |
import cv2
cap = cv2.VideoCapture(1)
cap.set(3, 640) #WIDTH
cap.set(4, 480) #HEIGHT
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
while True:
# while True:
# ret, frame = cap.read()
#
# # Our operations on the frame come here
# gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# faces = face_cascade.detectMultiScale(gray, 1.3, 5)
# try:
# number = len(faces)
# size = [faces[0][2], faces[0][3]]
# position = [faces[0][0], faces[0][1]]
# break
# except:
# a = 1
ret, frame = cap.read()
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
# print(number)
# print(size)
# print(position)
#print(len(faces))
# Display the resulting frame
for (x,y,w,h) in faces:
cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = frame[y:y+h, x:x+w]
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
| 26.468085
| 75
| 0.578778
|
import cv2
cap = cv2.VideoCapture(1)
cap.set(3, 640)
cap.set(4, 480)
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
while True:
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = frame[y:y+h, x:x+w]
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| true
| true
|
790d3f268dc6c97ef74323a266cd51779316c412
| 633
|
py
|
Python
|
test_batched_inv_mp.py
|
eldrin/wmf
|
7a4d72e47034f4289ea3c73d28886eabd6ab5762
|
[
"MIT"
] | 79
|
2015-01-27T00:11:03.000Z
|
2021-08-21T14:48:33.000Z
|
test_batched_inv_mp.py
|
eldrin/wmf
|
7a4d72e47034f4289ea3c73d28886eabd6ab5762
|
[
"MIT"
] | 4
|
2017-05-01T21:30:08.000Z
|
2018-07-26T09:30:08.000Z
|
test_batched_inv_mp.py
|
eldrin/wmf
|
7a4d72e47034f4289ea3c73d28886eabd6ab5762
|
[
"MIT"
] | 23
|
2015-04-29T01:41:53.000Z
|
2020-03-25T01:54:30.000Z
|
import numpy as np
import wmf
import batched_inv
import batched_inv_mp
import solve_mp
import solve_gpu
np.random.seed(123)
B = np.load("test_matrix.pkl")
S = wmf.log_surplus_confidence_matrix(B, alpha=2.0, epsilon=1e-6)
num_factors = 40 + 1
num_iterations = 1
batch_size = 1000
solve = batched_inv.solve_sequential
# solve = solve_mp.solve_mp
# solve = solve_gpu.solve_gpu
U, V = wmf.factorize(S, num_factors=num_factors, lambda_reg=1e-5, num_iterations=num_iterations, init_std=0.01, verbose=True, dtype='float32',
recompute_factors=batched_inv_mp.recompute_factors_bias_batched_mp, batch_size=batch_size, solve=solve)
| 24.346154
| 142
| 0.793049
|
import numpy as np
import wmf
import batched_inv
import batched_inv_mp
import solve_mp
import solve_gpu
np.random.seed(123)
B = np.load("test_matrix.pkl")
S = wmf.log_surplus_confidence_matrix(B, alpha=2.0, epsilon=1e-6)
num_factors = 40 + 1
num_iterations = 1
batch_size = 1000
solve = batched_inv.solve_sequential
U, V = wmf.factorize(S, num_factors=num_factors, lambda_reg=1e-5, num_iterations=num_iterations, init_std=0.01, verbose=True, dtype='float32',
recompute_factors=batched_inv_mp.recompute_factors_bias_batched_mp, batch_size=batch_size, solve=solve)
| true
| true
|
790d406bc2c98191db89bdc4c097a150bf664082
| 205
|
py
|
Python
|
client/util/html/tooling/base/document/ScriptElement.py
|
vincihb/stock-price-predictor
|
17f46bed7360817835a160ea4f1a6e057de4032d
|
[
"MIT"
] | null | null | null |
client/util/html/tooling/base/document/ScriptElement.py
|
vincihb/stock-price-predictor
|
17f46bed7360817835a160ea4f1a6e057de4032d
|
[
"MIT"
] | 1
|
2021-06-02T03:12:17.000Z
|
2021-06-02T03:12:17.000Z
|
client/util/html/tooling/base/document/ScriptElement.py
|
vincihb/stock-price-predictor
|
17f46bed7360817835a160ea4f1a6e057de4032d
|
[
"MIT"
] | null | null | null |
from client.util.html.tooling.base.HTMLElement import HTMLElement
class ScriptElement(HTMLElement):
def __init__(self, src):
super().__init__('script')
self.set_attribute('src', src)
| 25.625
| 65
| 0.712195
|
from client.util.html.tooling.base.HTMLElement import HTMLElement
class ScriptElement(HTMLElement):
def __init__(self, src):
super().__init__('script')
self.set_attribute('src', src)
| true
| true
|
790d40ac4ba26b892211d776e213de7ad820a9e8
| 2,429
|
py
|
Python
|
src/convert_to_wav.py
|
mori97/U-Net_MUSDB18
|
d452f0e6378c1d74e823dcb1e95d92307f4dea46
|
[
"MIT"
] | 5
|
2020-02-06T05:44:08.000Z
|
2021-07-21T07:16:49.000Z
|
src/convert_to_wav.py
|
mori97/U-Net_MUSDB18
|
d452f0e6378c1d74e823dcb1e95d92307f4dea46
|
[
"MIT"
] | 2
|
2021-06-21T11:09:30.000Z
|
2021-07-12T07:35:09.000Z
|
src/convert_to_wav.py
|
mori97/U-Net_MUSDB18
|
d452f0e6378c1d74e823dcb1e95d92307f4dea46
|
[
"MIT"
] | 1
|
2021-06-05T03:13:12.000Z
|
2021-06-05T03:13:12.000Z
|
"""Convert MUSDB18 dataset to .wav format.
Output .wav files contain 5 channels
- `0` - The mixture,
- `1` - The drums,
- `2` - The bass,
- `3` - The rest of the accompaniment,
- `4` - The vocals.
"""
import argparse
import os
import subprocess
import tempfile
import librosa
import numpy as np
import soundfile as sf
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('origin_dataset_dir',
help='Path of the original dataset (.mp4)',
type=str)
parser.add_argument('new_dataset_dir',
help='Output path of .wav dataset',
type=str)
parser.add_argument('--sr',
help='Sample rate. (Default: 22050) ',
type=int, default=22050)
args = parser.parse_args()
origin_dataset_dir = args.origin_dataset_dir
new_dataset_dir = args.new_dataset_dir
if os.path.isdir(new_dataset_dir):
raise FileExistsError(f'{new_dataset_dir} already exists.')
else:
os.mkdir(new_dataset_dir)
os.mkdir(os.path.join(new_dataset_dir, 'train'))
os.mkdir(os.path.join(new_dataset_dir, 'test'))
with tempfile.TemporaryDirectory() as tmpdir:
for subdir in ('train', 'test'):
origin_dir = os.path.join(origin_dataset_dir, subdir)
files = [f for f in os.listdir(origin_dir)
if os.path.splitext(f)[1] == '.mp4']
for file in files:
path = os.path.join(origin_dir, file)
name = os.path.splitext(file)[0]
wav_data = []
# Extract & save the sound of `ch` channel to a temp directory
# and then concatenate all channels to a single .wav file
for ch in range(5):
temp_fn = f'{name}.{ch}.wav'
out_path = os.path.join(tmpdir, temp_fn)
subprocess.run(['ffmpeg', '-i', path,
'-map', f'0:{ch}', out_path])
sound, _ = librosa.load(out_path, sr=args.sr, mono=True)
wav_data.append(sound)
wav_data = np.stack(wav_data, axis=1)
out_path = os.path.join(
new_dataset_dir, subdir, f'{name}.wav')
sf.write(out_path, wav_data, args.sr)
if __name__ == '__main__':
main()
| 35.202899
| 78
| 0.558666
|
import argparse
import os
import subprocess
import tempfile
import librosa
import numpy as np
import soundfile as sf
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('origin_dataset_dir',
help='Path of the original dataset (.mp4)',
type=str)
parser.add_argument('new_dataset_dir',
help='Output path of .wav dataset',
type=str)
parser.add_argument('--sr',
help='Sample rate. (Default: 22050) ',
type=int, default=22050)
args = parser.parse_args()
origin_dataset_dir = args.origin_dataset_dir
new_dataset_dir = args.new_dataset_dir
if os.path.isdir(new_dataset_dir):
raise FileExistsError(f'{new_dataset_dir} already exists.')
else:
os.mkdir(new_dataset_dir)
os.mkdir(os.path.join(new_dataset_dir, 'train'))
os.mkdir(os.path.join(new_dataset_dir, 'test'))
with tempfile.TemporaryDirectory() as tmpdir:
for subdir in ('train', 'test'):
origin_dir = os.path.join(origin_dataset_dir, subdir)
files = [f for f in os.listdir(origin_dir)
if os.path.splitext(f)[1] == '.mp4']
for file in files:
path = os.path.join(origin_dir, file)
name = os.path.splitext(file)[0]
wav_data = []
for ch in range(5):
temp_fn = f'{name}.{ch}.wav'
out_path = os.path.join(tmpdir, temp_fn)
subprocess.run(['ffmpeg', '-i', path,
'-map', f'0:{ch}', out_path])
sound, _ = librosa.load(out_path, sr=args.sr, mono=True)
wav_data.append(sound)
wav_data = np.stack(wav_data, axis=1)
out_path = os.path.join(
new_dataset_dir, subdir, f'{name}.wav')
sf.write(out_path, wav_data, args.sr)
if __name__ == '__main__':
main()
| true
| true
|
790d40e3b0a8176ed05f1b4dbc9c9d5c3395aefe
| 6,373
|
gyp
|
Python
|
android_webview/native/webview_native.gyp
|
tmpsantos/chromium
|
802d4aeeb33af25c01ee5994037bbf14086d4ac0
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
android_webview/native/webview_native.gyp
|
tmpsantos/chromium
|
802d4aeeb33af25c01ee5994037bbf14086d4ac0
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
android_webview/native/webview_native.gyp
|
tmpsantos/chromium
|
802d4aeeb33af25c01ee5994037bbf14086d4ac0
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'chromium_code': 1,
},
'targets': [
{
'target_name': 'webview_native',
'type': 'static_library',
'dependencies': [
'../../base/base.gyp:base_static',
'../../base/third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
'../../cc/cc.gyp:cc',
'../../components/components.gyp:autofill_content_browser',
'../../components/components.gyp:web_contents_delegate_android',
'../../content/content.gyp:content_common',
'../../media/media.gyp:player_android',
'../../net/net.gyp:net',
'../../skia/skia.gyp:skia',
'../../storage/storage_common.gyp:storage_common',
'../../ui/base/ui_base.gyp:ui_base',
'../../ui/gfx/gfx.gyp:gfx',
'../../ui/gfx/gfx.gyp:gfx_geometry',
'../../webkit/storage_browser.gyp:storage',
'../../third_party/boringssl/boringssl.gyp:boringssl',
'android_webview_native_jni',
],
'include_dirs': [
'../..',
'../../skia/config',
],
'sources': [
'android_protocol_handler.cc',
'android_protocol_handler.h',
'android_webview_jni_registrar.cc',
'android_webview_jni_registrar.h',
'aw_assets.cc',
'aw_assets.h',
'aw_autofill_client.cc',
'aw_autofill_client.h',
'aw_browser_dependency_factory.cc',
'aw_browser_dependency_factory.h',
'aw_contents.cc',
'aw_contents.h',
'aw_contents_client_bridge.cc',
'aw_contents_client_bridge.h',
'aw_contents_io_thread_client_impl.cc',
'aw_contents_io_thread_client_impl.h',
'aw_contents_statics.cc',
'aw_contents_statics.h',
'aw_dev_tools_server.cc',
'aw_dev_tools_server.h',
'aw_form_database.cc',
'aw_form_database.h',
'aw_http_auth_handler.cc',
'aw_http_auth_handler.h',
'aw_media_url_interceptor.cc',
'aw_media_url_interceptor.h',
'aw_pdf_exporter.cc',
'aw_pdf_exporter.h',
'aw_picture.cc',
'aw_picture.h',
'aw_quota_manager_bridge_impl.cc',
'aw_quota_manager_bridge_impl.h',
'aw_resource.cc',
'aw_resource.h',
'aw_settings.cc',
'aw_settings.h',
'aw_web_contents_delegate.cc',
'aw_web_contents_delegate.h',
'aw_web_contents_view_delegate.cc',
'aw_web_contents_view_delegate.h',
'aw_web_preferences_populater_impl.cc',
'aw_web_preferences_populater_impl.h',
'aw_web_resource_response_impl.cc',
'aw_web_resource_response_impl.h',
'cookie_manager.cc',
'cookie_manager.h',
'input_stream_impl.cc',
'input_stream_impl.h',
'java_browser_view_renderer_helper.cc',
'java_browser_view_renderer_helper.h',
'net_init_native_callback.cc',
'permission/aw_permission_request.cc',
'permission/aw_permission_request.h',
'permission/aw_permission_request_delegate.cc',
'permission/aw_permission_request_delegate.h',
'permission/media_access_permission_request.cc',
'permission/media_access_permission_request.h',
'permission/permission_request_handler.cc',
'permission/permission_request_handler.h',
'permission/permission_request_handler_client.cc',
'permission/permission_request_handler_client.h',
'permission/simple_permission_request.cc',
'permission/simple_permission_request.h',
'state_serializer.cc',
'state_serializer.h',
],
'conditions': [
['video_hole==1', {
'sources': [
'external_video_surface_container_impl.cc',
'external_video_surface_container_impl.h',
],
}],
],
},
{
'target_name': 'cancellation_signal_android_jar_jni_headers',
'type': 'none',
'variables': {
'jni_gen_package': 'android_webview',
'input_java_class': 'android/os/CancellationSignal.class',
},
'includes': [ '../../build/jar_file_jni_generator.gypi' ],
},
{
'target_name': 'android_webview_native_jni',
'type': 'none',
'sources': [
'../java/src/org/chromium/android_webview/AndroidProtocolHandler.java',
'../java/src/org/chromium/android_webview/AwAssets.java',
'../java/src/org/chromium/android_webview/AwAutofillClient.java',
'../java/src/org/chromium/android_webview/AwContents.java',
'../java/src/org/chromium/android_webview/AwContentsClientBridge.java',
'../java/src/org/chromium/android_webview/AwContentsIoThreadClient.java',
'../java/src/org/chromium/android_webview/AwContentsStatics.java',
'../java/src/org/chromium/android_webview/AwCookieManager.java',
'../java/src/org/chromium/android_webview/AwDevToolsServer.java',
'../java/src/org/chromium/android_webview/AwFormDatabase.java',
'../java/src/org/chromium/android_webview/AwHttpAuthHandler.java',
'../java/src/org/chromium/android_webview/AwPdfExporter.java',
'../java/src/org/chromium/android_webview/AwPicture.java',
'../java/src/org/chromium/android_webview/AwQuotaManagerBridge.java',
'../java/src/org/chromium/android_webview/AwResource.java',
'../java/src/org/chromium/android_webview/AwSettings.java',
'../java/src/org/chromium/android_webview/AwWebContentsDelegate.java',
'../java/src/org/chromium/android_webview/AwWebResourceResponse.java',
'../java/src/org/chromium/android_webview/ExternalVideoSurfaceContainer.java',
'../java/src/org/chromium/android_webview/InputStreamUtil.java',
'../java/src/org/chromium/android_webview/JavaBrowserViewRendererHelper.java',
'../java/src/org/chromium/android_webview/permission/AwPermissionRequest.java',
],
'variables': {
'jni_gen_package': 'android_webview',
},
'includes': [ '../../build/jni_generator.gypi' ],
'dependencies': [
'cancellation_signal_android_jar_jni_headers',
],
},
],
}
| 40.852564
| 97
| 0.637376
|
{
'variables': {
'chromium_code': 1,
},
'targets': [
{
'target_name': 'webview_native',
'type': 'static_library',
'dependencies': [
'../../base/base.gyp:base_static',
'../../base/third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
'../../cc/cc.gyp:cc',
'../../components/components.gyp:autofill_content_browser',
'../../components/components.gyp:web_contents_delegate_android',
'../../content/content.gyp:content_common',
'../../media/media.gyp:player_android',
'../../net/net.gyp:net',
'../../skia/skia.gyp:skia',
'../../storage/storage_common.gyp:storage_common',
'../../ui/base/ui_base.gyp:ui_base',
'../../ui/gfx/gfx.gyp:gfx',
'../../ui/gfx/gfx.gyp:gfx_geometry',
'../../webkit/storage_browser.gyp:storage',
'../../third_party/boringssl/boringssl.gyp:boringssl',
'android_webview_native_jni',
],
'include_dirs': [
'../..',
'../../skia/config',
],
'sources': [
'android_protocol_handler.cc',
'android_protocol_handler.h',
'android_webview_jni_registrar.cc',
'android_webview_jni_registrar.h',
'aw_assets.cc',
'aw_assets.h',
'aw_autofill_client.cc',
'aw_autofill_client.h',
'aw_browser_dependency_factory.cc',
'aw_browser_dependency_factory.h',
'aw_contents.cc',
'aw_contents.h',
'aw_contents_client_bridge.cc',
'aw_contents_client_bridge.h',
'aw_contents_io_thread_client_impl.cc',
'aw_contents_io_thread_client_impl.h',
'aw_contents_statics.cc',
'aw_contents_statics.h',
'aw_dev_tools_server.cc',
'aw_dev_tools_server.h',
'aw_form_database.cc',
'aw_form_database.h',
'aw_http_auth_handler.cc',
'aw_http_auth_handler.h',
'aw_media_url_interceptor.cc',
'aw_media_url_interceptor.h',
'aw_pdf_exporter.cc',
'aw_pdf_exporter.h',
'aw_picture.cc',
'aw_picture.h',
'aw_quota_manager_bridge_impl.cc',
'aw_quota_manager_bridge_impl.h',
'aw_resource.cc',
'aw_resource.h',
'aw_settings.cc',
'aw_settings.h',
'aw_web_contents_delegate.cc',
'aw_web_contents_delegate.h',
'aw_web_contents_view_delegate.cc',
'aw_web_contents_view_delegate.h',
'aw_web_preferences_populater_impl.cc',
'aw_web_preferences_populater_impl.h',
'aw_web_resource_response_impl.cc',
'aw_web_resource_response_impl.h',
'cookie_manager.cc',
'cookie_manager.h',
'input_stream_impl.cc',
'input_stream_impl.h',
'java_browser_view_renderer_helper.cc',
'java_browser_view_renderer_helper.h',
'net_init_native_callback.cc',
'permission/aw_permission_request.cc',
'permission/aw_permission_request.h',
'permission/aw_permission_request_delegate.cc',
'permission/aw_permission_request_delegate.h',
'permission/media_access_permission_request.cc',
'permission/media_access_permission_request.h',
'permission/permission_request_handler.cc',
'permission/permission_request_handler.h',
'permission/permission_request_handler_client.cc',
'permission/permission_request_handler_client.h',
'permission/simple_permission_request.cc',
'permission/simple_permission_request.h',
'state_serializer.cc',
'state_serializer.h',
],
'conditions': [
['video_hole==1', {
'sources': [
'external_video_surface_container_impl.cc',
'external_video_surface_container_impl.h',
],
}],
],
},
{
'target_name': 'cancellation_signal_android_jar_jni_headers',
'type': 'none',
'variables': {
'jni_gen_package': 'android_webview',
'input_java_class': 'android/os/CancellationSignal.class',
},
'includes': [ '../../build/jar_file_jni_generator.gypi' ],
},
{
'target_name': 'android_webview_native_jni',
'type': 'none',
'sources': [
'../java/src/org/chromium/android_webview/AndroidProtocolHandler.java',
'../java/src/org/chromium/android_webview/AwAssets.java',
'../java/src/org/chromium/android_webview/AwAutofillClient.java',
'../java/src/org/chromium/android_webview/AwContents.java',
'../java/src/org/chromium/android_webview/AwContentsClientBridge.java',
'../java/src/org/chromium/android_webview/AwContentsIoThreadClient.java',
'../java/src/org/chromium/android_webview/AwContentsStatics.java',
'../java/src/org/chromium/android_webview/AwCookieManager.java',
'../java/src/org/chromium/android_webview/AwDevToolsServer.java',
'../java/src/org/chromium/android_webview/AwFormDatabase.java',
'../java/src/org/chromium/android_webview/AwHttpAuthHandler.java',
'../java/src/org/chromium/android_webview/AwPdfExporter.java',
'../java/src/org/chromium/android_webview/AwPicture.java',
'../java/src/org/chromium/android_webview/AwQuotaManagerBridge.java',
'../java/src/org/chromium/android_webview/AwResource.java',
'../java/src/org/chromium/android_webview/AwSettings.java',
'../java/src/org/chromium/android_webview/AwWebContentsDelegate.java',
'../java/src/org/chromium/android_webview/AwWebResourceResponse.java',
'../java/src/org/chromium/android_webview/ExternalVideoSurfaceContainer.java',
'../java/src/org/chromium/android_webview/InputStreamUtil.java',
'../java/src/org/chromium/android_webview/JavaBrowserViewRendererHelper.java',
'../java/src/org/chromium/android_webview/permission/AwPermissionRequest.java',
],
'variables': {
'jni_gen_package': 'android_webview',
},
'includes': [ '../../build/jni_generator.gypi' ],
'dependencies': [
'cancellation_signal_android_jar_jni_headers',
],
},
],
}
| true
| true
|
790d417c3a4c2e214fc4b7b647e87ecfc9d01f41
| 1,456
|
py
|
Python
|
tests/test_quom/test_source_directory.py
|
Chaoses-Ib/quom
|
8d13a41baea1a930d27a869ff468aa72fe25b100
|
[
"MIT"
] | 1
|
2021-07-31T18:29:24.000Z
|
2021-07-31T18:29:24.000Z
|
tests/test_quom/test_source_directory.py
|
Chaoses-Ib/quom
|
8d13a41baea1a930d27a869ff468aa72fe25b100
|
[
"MIT"
] | null | null | null |
tests/test_quom/test_source_directory.py
|
Chaoses-Ib/quom
|
8d13a41baea1a930d27a869ff468aa72fe25b100
|
[
"MIT"
] | null | null | null |
import os
from io import StringIO
from pathlib import Path
from quom import Quom
from quom.__main__ import main
FILE_MAIN_HPP = """
int foo = 3;
int foo();
"""
FILE_MAIN_CPP = """
int foo() { return 42; }
"""
RESULT = """
int foo = 3;
int foo();
int foo() { return 42; }
"""
def test_source_directory(fs):
os.makedirs('project/')
os.chdir('project/')
os.makedirs('include/')
os.makedirs('src/')
with open('include/main.hpp', 'w+') as file:
file.write(FILE_MAIN_HPP)
with open('src/main.cpp', 'w+') as file:
file.write(FILE_MAIN_CPP)
dst = StringIO()
Quom(Path('include/main.hpp'), dst)
assert dst.getvalue() != RESULT
dst = StringIO()
Quom(Path('include/main.hpp'), dst, relative_source_directories=[Path('../src')])
assert dst.getvalue() == RESULT
dst = StringIO()
Quom(Path('include/main.hpp'), dst, source_directories=[Path('src').resolve()])
assert dst.getvalue() == RESULT
dst = StringIO()
Quom(Path('include/main.hpp'), dst, source_directories=[Path('/project/src')])
assert dst.getvalue() == RESULT
main(['include/main.hpp', 'result.hpp', '-S', './../src'])
assert Path('result.hpp').read_text() == RESULT
main(['include/main.hpp', 'result.hpp', '-S', 'src'])
assert Path('result.hpp').read_text() == RESULT
main(['include/main.hpp', 'result.hpp', '-S', '/project/src'])
assert Path('result.hpp').read_text() == RESULT
| 23.111111
| 85
| 0.618132
|
import os
from io import StringIO
from pathlib import Path
from quom import Quom
from quom.__main__ import main
FILE_MAIN_HPP = """
int foo = 3;
int foo();
"""
FILE_MAIN_CPP = """
int foo() { return 42; }
"""
RESULT = """
int foo = 3;
int foo();
int foo() { return 42; }
"""
def test_source_directory(fs):
os.makedirs('project/')
os.chdir('project/')
os.makedirs('include/')
os.makedirs('src/')
with open('include/main.hpp', 'w+') as file:
file.write(FILE_MAIN_HPP)
with open('src/main.cpp', 'w+') as file:
file.write(FILE_MAIN_CPP)
dst = StringIO()
Quom(Path('include/main.hpp'), dst)
assert dst.getvalue() != RESULT
dst = StringIO()
Quom(Path('include/main.hpp'), dst, relative_source_directories=[Path('../src')])
assert dst.getvalue() == RESULT
dst = StringIO()
Quom(Path('include/main.hpp'), dst, source_directories=[Path('src').resolve()])
assert dst.getvalue() == RESULT
dst = StringIO()
Quom(Path('include/main.hpp'), dst, source_directories=[Path('/project/src')])
assert dst.getvalue() == RESULT
main(['include/main.hpp', 'result.hpp', '-S', './../src'])
assert Path('result.hpp').read_text() == RESULT
main(['include/main.hpp', 'result.hpp', '-S', 'src'])
assert Path('result.hpp').read_text() == RESULT
main(['include/main.hpp', 'result.hpp', '-S', '/project/src'])
assert Path('result.hpp').read_text() == RESULT
| true
| true
|
790d42b29d4886095215195ac111bf47a488281a
| 1,771
|
py
|
Python
|
examples/ad_manager/v201811/activity_service/get_all_activities.py
|
beamc83/python-googleads
|
6039d08e2d85850a46a70f24359d362ffde2f7ed
|
[
"Apache-2.0"
] | 2
|
2019-07-11T13:01:56.000Z
|
2019-07-11T13:01:58.000Z
|
examples/ad_manager/v201811/activity_service/get_all_activities.py
|
SoungMo/googleads-python-lib
|
fe86335c416e0571328c0a481c4b0cff863c01d9
|
[
"Apache-2.0"
] | null | null | null |
examples/ad_manager/v201811/activity_service/get_all_activities.py
|
SoungMo/googleads-python-lib
|
fe86335c416e0571328c0a481c4b0cff863c01d9
|
[
"Apache-2.0"
] | 1
|
2020-07-19T14:24:05.000Z
|
2020-07-19T14:24:05.000Z
|
#!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all activities.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
def main(client):
# Initialize appropriate service.
activity_service = client.GetService('ActivityService', version='v201811')
# Create a statement to select activities.
statement = ad_manager.StatementBuilder(version='v201811')
# Retrieve a small amount of activities at a time, paging
# through until all activities have been retrieved.
while True:
response = activity_service.getActivitiesByStatement(statement.ToStatement(
))
if 'results' in response and len(response['results']):
for activity in response['results']:
# Print out some information for each activity.
print('Activity with ID "%d" and name "%s" was found.\n' %
(activity['id'], activity['name']))
statement.offset += statement.limit
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
| 34.72549
| 79
| 0.730661
|
"""This example gets all activities.
"""
from googleads import ad_manager
def main(client):
activity_service = client.GetService('ActivityService', version='v201811')
statement = ad_manager.StatementBuilder(version='v201811')
while True:
response = activity_service.getActivitiesByStatement(statement.ToStatement(
))
if 'results' in response and len(response['results']):
for activity in response['results']:
print('Activity with ID "%d" and name "%s" was found.\n' %
(activity['id'], activity['name']))
statement.offset += statement.limit
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
| false
| true
|
790d455c9de574f94746f67e49a9c645288fbe6f
| 391
|
py
|
Python
|
script_2.py
|
bhattbhavesh91/docly-demo
|
78d1a412ff0dd9ed913b5890c7dee3defa96e59f
|
[
"Apache-2.0"
] | 2
|
2020-11-27T16:53:46.000Z
|
2020-11-30T18:29:45.000Z
|
script_2.py
|
bharathjinka09/docly-demo
|
abe4e31282855ba6349f3eafb790af7fd44b25ea
|
[
"Apache-2.0"
] | null | null | null |
script_2.py
|
bharathjinka09/docly-demo
|
abe4e31282855ba6349f3eafb790af7fd44b25ea
|
[
"Apache-2.0"
] | 1
|
2020-11-27T16:53:50.000Z
|
2020-11-27T16:53:50.000Z
|
def calculate_critical_value(size : int, alpha : float) -> float:
t_dist = stats.t.ppf(1 - alpha / (2 * size), size - 2)
numerator = (size - 1) * np.sqrt(np.square(t_dist))
denominator = np.sqrt(size) * np.sqrt(size - 2 + np.square(t_dist))
critical_value = numerator / denominator
print("Grubbs Critical Value: {}".format(critical_value))
return critical_value
| 55.857143
| 72
| 0.659847
|
def calculate_critical_value(size : int, alpha : float) -> float:
t_dist = stats.t.ppf(1 - alpha / (2 * size), size - 2)
numerator = (size - 1) * np.sqrt(np.square(t_dist))
denominator = np.sqrt(size) * np.sqrt(size - 2 + np.square(t_dist))
critical_value = numerator / denominator
print("Grubbs Critical Value: {}".format(critical_value))
return critical_value
| true
| true
|
790d471ac45976f00d4a6a1b21e1602503be07f2
| 2,903
|
py
|
Python
|
create_pdb_annotations.py
|
stephenshank/taed-pv
|
9b40f0abbe90312e50a1cf57a794609c3ebdf02b
|
[
"MIT"
] | null | null | null |
create_pdb_annotations.py
|
stephenshank/taed-pv
|
9b40f0abbe90312e50a1cf57a794609c3ebdf02b
|
[
"MIT"
] | null | null | null |
create_pdb_annotations.py
|
stephenshank/taed-pv
|
9b40f0abbe90312e50a1cf57a794609c3ebdf02b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 4 17:07:18 2016
@author: sshank
"""
# Print out the required annotations at the moment... change to put into MySQL
from Bio.Seq import Seq
from Bio import AlignIO
from Bio.SeqRecord import SeqRecord
from argparse import ArgumentParser
parser = ArgumentParser()
rst_help = 'Path to parsed RST file (created with parse_rst.py).'
parser.add_argument('-r', '--rst', metavar='RST', help=rst_help, dest='rst')
input_help = 'Path to input fasta file (aligned).'
parser.add_argument('-i', '--input', metavar='INPUT', help=input_help, dest='input')
args = parser.parse_args()
rst_filename = args.rst
input_filename = args.input
descendent_sequence = ''
ancestral_sequence = ''
descendent_annotations = []
descendent_changes = []
with open(rst_filename, 'r') as file:
for line in file:
split = line.split()
descendent_codon = split[6]
ancestral_codon = split[16]
if descendent_codon != '---':
descendent_amino_acid = Seq(descendent_codon).translate()
descendent_sequence += str(descendent_amino_acid)
if descendent_codon == ancestral_codon or ancestral_codon == '---':
# No change or missing information
descendent_annotations.append(0)
descendent_changes.append('-')
else:
ancestral_amino_acid = Seq(ancestral_codon).translate()
if descendent_amino_acid == ancestral_amino_acid:
# Synonymous change
descendent_annotations.append(1)
change = ancestral_codon + '->' + descendent_codon
descendent_changes.append(change)
else:
# Nonsynonymous change
descendent_annotations.append(2)
change = str(ancestral_amino_acid) + '->' + str(descendent_amino_acid)
descendent_changes.append(change)
taed_descendent = SeqRecord(descendent_sequence, id='taed_descendent')
pdb_annotations = []
pdb_changes = []
alignment = AlignIO.read(input_filename, 'fasta')
d_index = 0
p_index = 0
for k in range(alignment.get_alignment_length()):
descendent_amino_acid, pdb_amino_acid = alignment[:, k]
if pdb_amino_acid != '-' and descendent_amino_acid != '-':
# There is a chance that something happened... append and increment both
pdb_annotations.append(descendent_annotations[d_index])
pdb_changes.append(descendent_changes[d_index])
p_index += 1
d_index += 1
else:
if pdb_amino_acid != '-':
pdb_annotations.append(0)
pdb_changes.append('-')
p_index += 1
if descendent_amino_acid != '-':
d_index += 1
print(','.join([str(i) for i in pdb_annotations]))
print('\n')
print("'" + "','".join([str(i) for i in pdb_changes])+ "'")
| 36.2875
| 90
| 0.635549
|
from Bio.Seq import Seq
from Bio import AlignIO
from Bio.SeqRecord import SeqRecord
from argparse import ArgumentParser
parser = ArgumentParser()
rst_help = 'Path to parsed RST file (created with parse_rst.py).'
parser.add_argument('-r', '--rst', metavar='RST', help=rst_help, dest='rst')
input_help = 'Path to input fasta file (aligned).'
parser.add_argument('-i', '--input', metavar='INPUT', help=input_help, dest='input')
args = parser.parse_args()
rst_filename = args.rst
input_filename = args.input
descendent_sequence = ''
ancestral_sequence = ''
descendent_annotations = []
descendent_changes = []
with open(rst_filename, 'r') as file:
for line in file:
split = line.split()
descendent_codon = split[6]
ancestral_codon = split[16]
if descendent_codon != '---':
descendent_amino_acid = Seq(descendent_codon).translate()
descendent_sequence += str(descendent_amino_acid)
if descendent_codon == ancestral_codon or ancestral_codon == '---':
descendent_annotations.append(0)
descendent_changes.append('-')
else:
ancestral_amino_acid = Seq(ancestral_codon).translate()
if descendent_amino_acid == ancestral_amino_acid:
descendent_annotations.append(1)
change = ancestral_codon + '->' + descendent_codon
descendent_changes.append(change)
else:
descendent_annotations.append(2)
change = str(ancestral_amino_acid) + '->' + str(descendent_amino_acid)
descendent_changes.append(change)
taed_descendent = SeqRecord(descendent_sequence, id='taed_descendent')
pdb_annotations = []
pdb_changes = []
alignment = AlignIO.read(input_filename, 'fasta')
d_index = 0
p_index = 0
for k in range(alignment.get_alignment_length()):
descendent_amino_acid, pdb_amino_acid = alignment[:, k]
if pdb_amino_acid != '-' and descendent_amino_acid != '-':
pdb_annotations.append(descendent_annotations[d_index])
pdb_changes.append(descendent_changes[d_index])
p_index += 1
d_index += 1
else:
if pdb_amino_acid != '-':
pdb_annotations.append(0)
pdb_changes.append('-')
p_index += 1
if descendent_amino_acid != '-':
d_index += 1
print(','.join([str(i) for i in pdb_annotations]))
print('\n')
print("'" + "','".join([str(i) for i in pdb_changes])+ "'")
| true
| true
|
790d48fc9ee07093ca65a37b73e0a3c66616d9a7
| 13,802
|
py
|
Python
|
codebase/datasets/adres_dataset.py
|
petercuret/woonfraude
|
2602464f9b9a8bf901d89590b61205ba18fe697d
|
[
"MIT"
] | null | null | null |
codebase/datasets/adres_dataset.py
|
petercuret/woonfraude
|
2602464f9b9a8bf901d89590b61205ba18fe697d
|
[
"MIT"
] | null | null | null |
codebase/datasets/adres_dataset.py
|
petercuret/woonfraude
|
2602464f9b9a8bf901d89590b61205ba18fe697d
|
[
"MIT"
] | null | null | null |
####################################################################################################
"""
adres_dataset.py
This module implements several classes to perform dataset-specific downloading, saving and
data-transformation operations.
Written by Swaan Dekkers & Thomas Jongstra
"""
####################################################################################################
#############
## Imports ##
#############
from pathlib import Path
import pandas.io.sql as sqlio
import pandas as pd
import numpy as np
import requests
import psycopg2
import time
import os
import re
# Import own modules.
import datasets, clean
# Define HOME and DATA_PATH on a global level.
HOME = Path.home() # Home path for old VAO.
# USERNAME = os.path.basename(HOME)
# HOME = os.path.join('/data', USERNAME) # Set home for new VAO.
DATA_PATH = os.path.join(HOME, 'Documents/woonfraude/data/')
########################
## AdresDataset class ##
########################
class AdresDataset(datasets.MyDataset):
"""Create a dataset for the adres data."""
# Set the class attributes.
name = 'adres'
table_name = 'import_adres'
id_column = 'adres_id'
def extract_leegstand(self):
"""Create a column indicating leegstand (no inhabitants on the address)."""
self.data['leegstand'] = ~self.data.inwnrs.notnull()
self.version += '_leegstand'
self.save()
def enrich_with_woning_id(self):
"""Add woning ids to the adres dataframe."""
adres_periodes = datasets.download_dataset('bwv_adres_periodes', 'bwv_adres_periodes')
self.data = self.data.merge(adres_periodes[['ads_id', 'wng_id']], how='left', left_on='adres_id', right_on='ads_id')
self.version += '_woningId'
self.save()
def prepare_bag(self, bag):
# To int
bag['huisnummer_nummeraanduiding'] = bag['huisnummer_nummeraanduiding'].astype(int)
bag['huisnummer_nummeraanduiding'] = bag['huisnummer_nummeraanduiding'].replace(0, -1)
# Fillna and replace ''
bag['huisletter_nummeraanduiding'] = bag['huisletter_nummeraanduiding'].replace('', 'None')
# bag['_openbare_ruimte_naam@bag'] = bag['_openbare_ruimte_naam@bag'].fillna('None')
bag['_openbare_ruimte_naam_nummeraanduiding'] = bag['_openbare_ruimte_naam_nummeraanduiding'].replace('', 'None')
# bag['_huisnummer_toevoeging@bag'] = bag['_huisnummer_toevoeging@bag'].fillna('None')
bag['huisnummer_toevoeging_nummeraanduiding'] = bag['huisnummer_toevoeging_nummeraanduiding'].replace('', 'None')
return bag
def prepare_adres(self, adres):
# To int
adres['hsnr'] = adres['hsnr'].astype(int)
adres['hsnr'] = adres['hsnr'].replace(0, -1)
return adres
def replace_string_nan_adres(self, adres):
adres['hsnr'] = adres['hsnr'].replace(-1, np.nan)
adres['sttnaam'] = adres['sttnaam'].replace('None', np.nan)
adres['hsltr'] = adres['hsltr'].replace('None', np.nan)
adres['toev'] = adres['toev'].replace('None', np.nan)
adres['huisnummer_nummeraanduiding'] = adres['huisnummer_nummeraanduiding'].replace(-1, np.nan)
adres['huisletter_nummeraanduiding'] = adres['huisletter_nummeraanduiding'].replace('None', np.nan)
adres['_openbare_ruimte_naam_nummeraanduiding'] = adres['_openbare_ruimte_naam_nummeraanduiding'].replace('None', np.nan)
adres['huisnummer_toevoeging_nummeraanduiding'] = adres['huisnummer_toevoeging_nummeraanduiding'].replace('None', np.nan)
return adres
def match_bwv_bag(self, adres, bag):
# Merge dataframes on adres dataframe.
new_df = pd.merge(adres, bag, how='left', left_on=['sttnaam','hsnr'], right_on = ['_openbare_ruimte_naam_nummeraanduiding', 'huisnummer_nummeraanduiding'])
# Find id's that have a direct match and that have multiple matches.
g = new_df.groupby('adres_id')
df_direct = g.filter(lambda x: len(x) == 1)
df_multiple = g.filter(lambda x: len(x) > 1)
# Make multiplematch more specific to construct perfect match.
df_multiple = df_multiple[(df_multiple['hsltr'] == df_multiple['huisletter_nummeraanduiding']) & (df_multiple['toev'] == df_multiple['huisnummer_toevoeging_nummeraanduiding'])]
# Concat df_direct and df_multiple.
df_result = pd.concat([df_direct, df_multiple])
# Because of the seperation of an object, there are two matching objects. Keep the oldest object with definif point.
df_result = df_result.sort_values(['adres_id', 'status_coordinaat_code'])
df_result = df_result.drop_duplicates(subset='adres_id', keep='first')
# Add adresses without match.
final_df = pd.merge(adres, df_result, how='left', on='adres_id', suffixes=('', '_y'))
final_df.drop(list(final_df.filter(regex='_y$')), axis=1, inplace=True)
# Set the name of the final adres dataframe again.
final_df.name = 'adres'
return final_df
def impute_values_for_bagless_addresses(self, adres):
"""Impute values for adresses where no BAG-match could be found."""
clean.impute_missing_values(adres)
# clean.impute_missing_values_mode(adres, ['status_coordinaat_code@bag'])
adres.fillna(value={'huisnummer_nummeraanduiding': 0,
'huisletter_nummeraanduiding': 'None',
'_openbare_ruimte_naam_nummeraanduiding': 'None',
'huisnummer_toevoeging_nummeraanduiding': 'None',
'type_woonobject_omschrijving': 'None',
'eigendomsverhouding_id': 'None',
'financieringswijze_id': -1,
'gebruik_id': -1,
'reden_opvoer_id': -1,
'status_id_verblijfsobject': -1,
'toegang_id': 'None'}, inplace=True)
return adres
def enrich_with_bag(self, bag):
"""Enrich the adres data with information from the BAG data. Uses the bag dataframe as input."""
bag = self.prepare_bag(bag)
self.data = self.prepare_adres(self.data)
self.data = self.match_bwv_bag(self.data, bag)
self.data = self.replace_string_nan_adres(self.data)
self.data = self.impute_values_for_bagless_addresses(self.data)
self.version += '_bag'
self.save()
print("The adres dataset is now enriched with BAG data.")
def enrich_with_personen_features(self, personen):
"""Add aggregated features relating to persons to the address dataframe. Uses the personen dataframe as input."""
# Create simple handle to the adres data.
adres = self.data
# Compute age of people in years (float)
today = pd.to_datetime('today')
# Set all dates within range allowed by Pandas (584 years?)
personen['geboortedatum'] = pd.to_datetime(personen['geboortedatum'], errors='coerce')
# Get the most frequent birthdate (mode).
geboortedatum_mode = personen['geboortedatum'].mode()[0]
# Compute the age (result is a TimeDelta).
personen['leeftijd'] = today - personen['geboortedatum']
# Convert the age to an approximation in years ("smearin out" the leap years).
personen['leeftijd'] = personen['leeftijd'].apply(lambda x: x.days / 365.25)
# Find the matching address ids between the adres df and the personen df.
adres_ids = adres.adres_id
personen_adres_ids = personen.ads_id_wa
intersect = set(adres_ids).intersection(set(personen_adres_ids))
# Iterate over all matching address ids and find all people at each address.
inhabitant_locs = {}
print("Now looping over all address ids that have a link with one or more inhabitants...")
for i, adres_id in enumerate(intersect):
if i % 1000 == 0:
print(i)
inhabitant_locs[adres_id] = personen_adres_ids[personen_adres_ids == adres_id]
# Create a new column in the dataframe showing the amount of people at each address.
# TODO: this step currently takes a few minutes to complete, should still be optimized.
adres['aantal_personen'] = 0
adres['aantal_vertrokken_personen'] = -1
adres['aantal_overleden_personen'] = -1
adres['aantal_niet_uitgeschrevenen'] = -1
adres['leegstand'] = True
adres['leeftijd_jongste_persoon'] = -1.
adres['leeftijd_oudste_persoon'] = -1.
adres['aantal_kinderen'] = 0
adres['percentage_kinderen'] = -1.
adres['aantal_mannen'] = 0
adres['percentage_mannen'] = -1.
adres['gemiddelde_leeftijd'] = -1.
adres['stdev_leeftijd'] = -1.
adres['aantal_achternamen'] = 0
adres['percentage_achternamen'] = -1.
for i in range(1,8):
adres[f'gezinsverhouding_{i}'] = 0
adres[f'percentage_gezinsverhouding_{i}'] = 0.
print("Now looping over all rows in the adres dataframe in order to add person information...")
for i in adres.index:
if i % 1000 == 0:
print(i)
row = adres.iloc[i]
adres_id = row['adres_id']
try:
# Get the inhabitants for the current address.
inhab_locs = inhabitant_locs[adres_id].keys()
inhab = personen.loc[inhab_locs]
# Check whether any registered inhabitants have left Amsterdam or have passed away.
aantal_vertrokken_personen = sum(inhab["vertrekdatum_adam"].notnull())
aantal_overleden_personen = sum(inhab["overlijdensdatum"].notnull())
aantal_niet_uitgeschrevenen = len(inhab[inhab["vertrekdatum_adam"].notnull() | inhab["overlijdensdatum"].notnull()])
adres['aantal_vertrokken_personen'] = aantal_vertrokken_personen
adres['aantal_overleden_personen'] = aantal_overleden_personen
adres['aantal_niet_uitgeschrevenen'] = aantal_niet_uitgeschrevenen
# If there are more inhabitants than people that are incorrectly still registered, then there is no 'leegstand'.
if len(inhab) > aantal_niet_uitgeschrevenen:
adres['leegstand'] = False
# Totaal aantal personen (int).
aantal_personen = len(inhab)
adres.at[i, 'aantal_personen'] = aantal_personen
# Leeftijd jongste persoon (float).
leeftijd_jongste_persoon = min(inhab['leeftijd'])
adres.at[i, 'leeftijd_jongste_persoon'] = leeftijd_jongste_persoon
# Leeftijd oudste persoon (float).
leeftijd_oudste_persoon = max(inhab['leeftijd'])
adres.at[i, 'leeftijd_oudste_persoon'] = leeftijd_oudste_persoon
# Aantal kinderen ingeschreven op adres (int/float).
aantal_kinderen = sum(inhab['leeftijd'] < 18)
adres.at[i, 'aantal_kinderen'] = aantal_kinderen
adres.at[i, 'percentage_kinderen'] = aantal_kinderen / aantal_personen
# Aantal mannen (int/float).
aantal_mannen = sum(inhab.geslacht == 'M')
adres.at[i, 'aantal_mannen'] = aantal_mannen
adres.at[i, 'percentage_mannen'] = aantal_mannen / aantal_personen
# Gemiddelde leeftijd (float).
gemiddelde_leeftijd = inhab.leeftijd.mean()
adres.at[i, 'gemiddelde_leeftijd'] = gemiddelde_leeftijd
# Standardeviatie van leeftijd (float). Set to 0 when the sample size is 1.
stdev_leeftijd = inhab.leeftijd.std()
adres.at[i, 'stdev_leeftijd'] = stdev_leeftijd if aantal_personen > 1 else 0
# Aantal verschillende achternamen (int/float).
aantal_achternamen = inhab.naam.nunique()
adres.at[i, 'aantal_achternamen'] = aantal_achternamen
adres.at[i, 'percentage_achternamen'] = aantal_achternamen / aantal_personen
# Gezinsverhouding (frequency count per klasse) (int/float).
gezinsverhouding = inhab.gezinsverhouding.value_counts()
for key in gezinsverhouding.keys():
val = gezinsverhouding[key]
adres.at[i, f'gezinsverhouding_{key}'] = val
adres.at[i, f'percentage_gezinsverhouding_{key}'] = val / aantal_personen
except (KeyError, ValueError) as e:
pass
print("...done!")
self.data = adres
self.version += '_personen'
self.save()
print("The adres dataset is now enriched with personen data.")
def add_hotline_features(self, hotline):
"""Add the hotline features to the adres dataframe."""
# Create a temporary merged df using the adres and hotline dataframes.
merge = self.data.merge(hotline, on='wng_id', how='left')
# Create a group for each adres_id
adres_groups = merge.groupby(by='adres_id')
# Count the number of hotline meldingen per group/adres_id.
# 'id' should be the primary key of hotline df, so it is usable for hotline entry counting.
hotline_counts = adres_groups['id'].agg(['count'])
# Rename column
hotline_counts.columns = ['aantal_hotline_meldingen']
# Enrich the 'adres' dataframe with the computed hotline counts.
self.data = self.data.merge(hotline_counts, on='adres_id', how='left')
self.version += '_hotline'
self.save()
print("The adres dataset is now enriched with hotline data.")
| 45.853821
| 184
| 0.624402
|
sonen
# Gezinsverhouding (frequency count per klasse) (int/float).
gezinsverhouding = inhab.gezinsverhouding.value_counts()
for key in gezinsverhouding.keys():
val = gezinsverhouding[key]
adres.at[i, f'gezinsverhouding_{key}'] = val
adres.at[i, f'percentage_gezinsverhouding_{key}'] = val / aantal_personen
except (KeyError, ValueError) as e:
pass
print("...done!")
self.data = adres
self.version += '_personen'
self.save()
print("The adres dataset is now enriched with personen data.")
def add_hotline_features(self, hotline):
# Create a temporary merged df using the adres and hotline dataframes.
merge = self.data.merge(hotline, on='wng_id', how='left')
# Create a group for each adres_id
adres_groups = merge.groupby(by='adres_id')
# Count the number of hotline meldingen per group/adres_id.
# 'id' should be the primary key of hotline df, so it is usable for hotline entry counting.
hotline_counts = adres_groups['id'].agg(['count'])
# Rename column
hotline_counts.columns = ['aantal_hotline_meldingen']
# Enrich the 'adres' dataframe with the computed hotline counts.
self.data = self.data.merge(hotline_counts, on='adres_id', how='left')
self.version += '_hotline'
self.save()
print("The adres dataset is now enriched with hotline data.")
| true
| true
|
790d4981cc1ea2aea3a28da4dcf1fa9ebe8a2314
| 2,270
|
py
|
Python
|
scripts/framework-applications/export-framework-applications-at-close.py
|
alphagov-mirror/digitalmarketplace-scripts
|
8a7ef9b2b5f5fffea6e012bd676b095a27d35101
|
[
"MIT"
] | 1
|
2020-06-23T01:55:31.000Z
|
2020-06-23T01:55:31.000Z
|
scripts/framework-applications/export-framework-applications-at-close.py
|
alphagov-mirror/digitalmarketplace-scripts
|
8a7ef9b2b5f5fffea6e012bd676b095a27d35101
|
[
"MIT"
] | 267
|
2015-10-12T12:43:52.000Z
|
2021-08-19T10:38:55.000Z
|
scripts/framework-applications/export-framework-applications-at-close.py
|
alphagov-mirror/digitalmarketplace-scripts
|
8a7ef9b2b5f5fffea6e012bd676b095a27d35101
|
[
"MIT"
] | 7
|
2015-11-11T16:47:41.000Z
|
2021-04-10T18:03:04.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
!!! This generally needs to be run right after the close of applications for a framework, and passed to product
!!! managers & CCS.
Generate a CSV with per-lot draft statistics for each supplier who registered interest in the framework,
whether or not they made a complete application in the end.
Fields included:
* Supplier ID
* Supplier DM name
* Application / no_application
* The status of their declaration
* The number of services submitted and left in draft per lot
Usage:
scripts/framework-applications/export-framework-applications-at-close.py <framework_slug> <stage> <auth_token>
<output-dir> [-e <exclude_suppliers>]
Example:
scripts/framework-applications/export-framework-applications-at-close.py g-cloud-11 preview myToken path/to/myfolder
-e 123,456,789
"""
import os
import sys
from datetime import datetime
from dmapiclient import DataAPIClient
from docopt import docopt
sys.path.insert(0, '.')
from dmscripts.export_framework_applications_at_close import GenerateFrameworkApplicationsCSV
from dmutils.env_helpers import get_api_endpoint_from_stage
if __name__ == "__main__":
arguments = docopt(__doc__)
output_dir = arguments['<output-dir>']
stage = arguments['<stage>']
framework_slug = arguments['<framework_slug>']
filename = "{}-how-application-looked-at-close-{}-{}.csv".format(
framework_slug,
stage,
datetime.utcnow().strftime("%Y-%m-%d_%H.%M-")
)
# Create output directory if it doesn't already exist
if not os.path.exists(output_dir):
os.makedirs(output_dir)
client = DataAPIClient(
base_url=get_api_endpoint_from_stage(stage),
auth_token=arguments['<auth_token>'],
)
csv_builder = GenerateFrameworkApplicationsCSV(
client=client,
target_framework_slug=framework_slug
)
if arguments.get('<exclude_suppliers>') is not None: # updates the generator with any IDs the user wants excluded
csv_builder.excluded_supplier_ids = [int(n) for n in arguments['<exclude_suppliers>'].split(',')]
csv_builder.populate_output()
with open(os.path.join(output_dir, filename), 'w') as csvfile:
csv_builder.write_csv(outfile=csvfile)
| 33.382353
| 120
| 0.725551
|
import os
import sys
from datetime import datetime
from dmapiclient import DataAPIClient
from docopt import docopt
sys.path.insert(0, '.')
from dmscripts.export_framework_applications_at_close import GenerateFrameworkApplicationsCSV
from dmutils.env_helpers import get_api_endpoint_from_stage
if __name__ == "__main__":
arguments = docopt(__doc__)
output_dir = arguments['<output-dir>']
stage = arguments['<stage>']
framework_slug = arguments['<framework_slug>']
filename = "{}-how-application-looked-at-close-{}-{}.csv".format(
framework_slug,
stage,
datetime.utcnow().strftime("%Y-%m-%d_%H.%M-")
)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
client = DataAPIClient(
base_url=get_api_endpoint_from_stage(stage),
auth_token=arguments['<auth_token>'],
)
csv_builder = GenerateFrameworkApplicationsCSV(
client=client,
target_framework_slug=framework_slug
)
if arguments.get('<exclude_suppliers>') is not None: # updates the generator with any IDs the user wants excluded
csv_builder.excluded_supplier_ids = [int(n) for n in arguments['<exclude_suppliers>'].split(',')]
csv_builder.populate_output()
with open(os.path.join(output_dir, filename), 'w') as csvfile:
csv_builder.write_csv(outfile=csvfile)
| true
| true
|
790d49c171e041ae3b34c4e3c77a0cc920b3f778
| 99,352
|
py
|
Python
|
jax/experimental/jax2tf/jax2tf.py
|
ho-oto/jax
|
e0f285fd218aa704fa65c47ab6e7695f4a38ddbd
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
jax/experimental/jax2tf/jax2tf.py
|
ho-oto/jax
|
e0f285fd218aa704fa65c47ab6e7695f4a38ddbd
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
jax/experimental/jax2tf/jax2tf.py
|
ho-oto/jax
|
e0f285fd218aa704fa65c47ab6e7695f4a38ddbd
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Experimental module transforms JAX functions to be executed by TensorFlow."""
import functools
import re
import string
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import jax
from jax import ad_util, api_util, config
from jax._src import api
from jax import core, custom_derivatives, dtypes
from jax import linear_util as lu
from jax import numpy as jnp
from jax import random, tree_util
from jax._src import util
from jax._src.lax import control_flow as lax_control_flow
from jax._src.lax import fft as lax_fft
from jax._src.lax import lax
from jax._src.lax import linalg as lax_linalg
import jax._src.random
from jax.api_util import flatten_fun
from jax.interpreters import ad
from jax.interpreters import pxla
from jax.interpreters import sharded_jit
from jax.interpreters import xla
from jax.lib import xla_client
from . import shape_poly
import numpy as np
import tensorflow as tf # type: ignore[import]
# These don't have public equivalents.
# pylint: disable=g-direct-tensorflow-import
from tensorflow.compiler.tf2xla.python import xla as tfxla # type: ignore[import]
from tensorflow.compiler.xla import xla_data_pb2 # type: ignore[import]
from tensorflow.compiler.xla.experimental.xla_sharding import xla_sharding # type: ignore[import]
# pylint: enable=g-direct-tensorflow-import
PolyShape = shape_poly.PolyShape
# The scope name need to be a valid TensorFlow name. See
# https://github.com/tensorflow/tensorflow/blob/r2.3/tensorflow/core/framework/node_def_util.cc#L731
_VALID_SCOPE_REGEX = re.compile("^[A-Za-z0-9.][A-Za-z0-9_.\\/>-]*$")
_INVALID_SCOPE_CHAR = re.compile("[^A-Za-z0-9_.\\/>-]")
def _sanitize_scope_name(name):
scope_name = _INVALID_SCOPE_CHAR.sub("_", name)
if not _VALID_SCOPE_REGEX.match(scope_name):
scope_name = ".{}".format(scope_name)
return scope_name
# A value suitable in a TF tracing context: tf.Tensor, tf.Variable,
# or Python scalar or numpy.ndarray. (A tf.EagerTensor is a tf.Tensor.)
TfVal = Any
DType = Any
PrecisionType = int # Enum xla_data.PrecisionConfig.Precision
def _is_tfval(v: TfVal) -> bool:
if isinstance(v, (tf.Tensor, tf.Variable)):
return True
try:
# Note: this conversion is overkill and just intended as a type check; this
# code is in principle only run if config.jax_enable_checks is True.
# TODO: it is not true that this code is run only with jax_enable_checks.
_safe_convert_to_tensor(v)
return True
except ValueError:
return False
def _safe_convert_to_tensor(val, dtype=None) -> TfVal:
dtype = dtype if dtype else (val.dtype if hasattr(val, "dtype") else None)
conversion_type = to_tf_dtype(dtype) if dtype else None
# The float0 type is not known to TF.
if dtype and dtype == dtypes.float0:
val = np.zeros(np.shape(val), conversion_type.as_numpy_dtype)
return tf.convert_to_tensor(val, dtype=conversion_type)
# The implementation rules for primitives. The rule will be called with the
# arguments (TfVal) and must return TfVal (or a sequence thereof,
# if primitive.multiple_results). The vast majority of primitives do not need
# to worry about core.unit inputs or results. The exception are primarily the
# control-flow primitives.
tf_impl: Dict[core.Primitive, Callable[..., Any]] = {}
# Some primitive implementation rules need the abstract values of arguments
# and the results. This is the case for the primitives implemented using
# _convert_jax_impl and those that need to adjust the shape of the outputs
# due to missing TF shape inference rules for TFXLA ops. The rules for these
# primitives should be added to `tf_impl_with_avals`.
# The abstract value are passed to the implementation as two special kwargs
# `_in_avals` (a tuple of core.AbstractValue) and `_out_aval` (a
# core.AbstractValue, or a tuple thereof when primitive.multiple_results).
tf_impl_with_avals: Dict[core.Primitive, Callable[..., Any]] = {}
# XLA is not linked in all environments; when converting a primitive, if this
# variable is disabled, we try harder to use only standard TF ops if they are
# applicable to the concrete use case; if the resulting conversion path ends up
# requiring a TFXLA operation, an exception is thrown instead.
_enable_xla = True
def _xla_disabled_error(primitive_name: str,
extra_msg: Optional[str] = None) -> Exception:
assert not _enable_xla
msg = f"Call to {primitive_name} cannot be converted with enable_xla=False."
if extra_msg:
msg += f" {extra_msg}"
return NotImplementedError(msg)
@functools.partial(api_util.api_hook, tag="jax2tf_convert")
def convert(fun: Callable,
*,
polymorphic_shapes: Optional[Sequence[Any]] = None,
with_gradient=True,
enable_xla=True) -> Callable:
"""Transforms `fun` to be executed by TensorFlow.
See
[README](https://github.com/google/jax/blob/master/jax/experimental/jax2tf/README.md)
for more details about usage and common problems.
Args:
fun: Function to be transformed. Its arguments and return value should be
JAX arrays, or nested standard Python containers (tuple/list/dict) thereof
(pytrees).
polymorphic_shapes: Specifies input shapes to be treated polymorphically
during conversion.
.. warning:: The shape-polymorphic conversion is an experimental feature.
It is meant to be sound, but it is known to reject some JAX programs
that are shape polymorphic. The details of this feature can change. It
should be a Python object with the same pytree structure as, or a prefix
of, the tuple of arguments to the function, but with a shape
specification corresponding to each argument. The default value is
`None`, which is a shortcut for a tuple of `None` one for each argument,
denoting that all shapes are monomorphic.
See [how optional parameters are matched to
arguments](https://jax.readthedocs.io/en/latest/pytrees.html#applying-optional-parameters-to-pytrees).
A shape specification for an array argument should be an object
`PolyShape(dim0, dim1, ..., dimn)`
where each `dim` is a dimension specification: a positive integer denoting
a monomorphic dimension of the given size, or a string denoting a
dimension variable assumed to range over non-zero dimension sizes, or
the special placeholder string "_" denoting a monomorphic dimension
whose size is given by the actual argument. As a shortcut, an Ellipsis
suffix in the list of dimension specifications stands for a list of "_"
placeholders. For convenience, a shape specification can also be given
as a string
representation, e.g.: "batch, ...", "batch, height, width, _", possibly
with surrounding parentheses: "(batch, ...)".
The conversion fails if it cannot ensure that the it would produce the same
sequence of TF ops for any non-zero values of the dimension variables.
polymorphic_shapes are only supported for positional arguments; shape
polymorphism is not supported for keyword arguments.
See [the README](https://github.com/google/jax/blob/master/jax/experimental/jax2tf/README.md#shape-polymorphic-conversion)
for more details.
in_shapes: DEPRECATED in favor of `polymorphic_shapes`.
with_gradient: if set, will add a tf.custom_gradient to the converted
function, by converting the ``jax.vjp(fun)``. Only first-order
differentiation is supported for now. If the converted function is saved
in a SavedModel, the custom gradients are currently lost and an error will
be raised if a gradient computation is attempted. This is due to a current
bug in TensorFlow.
enable_xla: if unset, the converter will try harder to use pure TF ops to
convert the function, and raise an error if it can not be converted
without resorting to XLA ops (default: True).
Returns:
A version of `fun` that expects TfVals as arguments (or
tuple/lists/dicts) thereof, and returns TfVals as outputs.
"""
api._check_callable(fun)
def converted_fun(*args: TfVal, **kwargs: TfVal) -> TfVal:
# TODO: is there a better way to check if we are inside a transformation?
if not core.trace_state_clean():
raise ValueError("convert must be used outside all JAX transformations." +
f"Trace state: {core.thread_local_state.trace_state}")
def check_arg(a):
if not _is_tfval(a):
msg = (f"Argument {a} of type {type(a)} of jax2tf.convert(f) should "
"be NumPy array, scalar, tf.Variable, or tf.Tensor")
raise TypeError(msg)
tree_util.tree_map(check_arg, args)
tree_util.tree_map(check_arg, list(kwargs.values()))
# Name input tensors
args = tuple(
tree_util.tree_map(lambda x, i=i: tf.identity(x, f"jax2tf_arg_{i}"),
a) # type: ignore
for i, a in enumerate(args))
kwargs = {k: tf.identity(v, f"jax2tf_arg_{k}") for k, v in kwargs.items()}
# This function may take pytrees of TfVals. We can only set
# tf.custom_gradient on functions that take a flat argument list.
args_flat, in_tree = tree_util.tree_flatten((args, kwargs))
if polymorphic_shapes is None:
polymorphic_shapes_ = (None,) * len(args)
else:
if not isinstance(polymorphic_shapes, Sequence) or len(args) != len(polymorphic_shapes):
msg = ("polymorphic_shapes must be a sequence with the same length as the positional argument list "
f"({len(args)}). Got polymorphic_shapes={polymorphic_shapes}.")
raise TypeError(msg)
polymorphic_shapes_ = tuple(polymorphic_shapes)
# Expand the polymorphic_shapes to match the argument pytree
polymorphic_shapes_flat = tuple(api_util.flatten_axes("jax2tf.convert polymorphic_shapes",
in_tree.children()[0],
polymorphic_shapes_))
# Add kwargs shapes.
polymorphic_shapes_flat = polymorphic_shapes_flat + tuple(
(None,) * (len(args_flat) - len(polymorphic_shapes_flat)))
# Construct the abstract values for the flat arguments, possibly based on
# the input shapes and the polymorphic_shapes if given. May create new shape
# variables.
args_avals_flat, shapeenv = _args_to_avals_and_env(args_flat,
polymorphic_shapes_flat)
f = lu.wrap_init(fun)
# out_tree_thunk() will be the output tree, after running _interpret_fun.
flat_fun, out_tree_thunk = flatten_fun(f, in_tree)
# Prepare the grad_fn for tf.custom_gradient.
def converted_grad_fn(*out_cts_flat: TfVal,
_out_cts_avals: Sequence[core.AbstractValue],
variables=None):
if variables:
raise ValueError(
"Unexpected variables used in forward pass. "
"This should not happen for first-order differentiation. "
f"variables={variables}")
def fun_vjp_jax(args_jax, out_cts_jax):
# One may think that we can get the pullback while we are converting
# the main function in the first place. That is problematic, because the
# pullback may contain captured tracers from the conversion of the
# main function. Those tracers will confuse the conversion of the
# pullback. So, we construct the vjp anew.
_, pullback_jax = jax.vjp(fun, *args_jax)
return pullback_jax(out_cts_jax)
if polymorphic_shapes is None:
vjp_polymorphic_shapes = None
else:
args_polymorphic_shapes = tree_util.tree_unflatten(
in_tree.children()[0], polymorphic_shapes_flat)
out_cts_polymorphic_shapes = tree_util.tree_unflatten(
out_tree_thunk(),
tuple(str(out_aval.shape)
for out_aval in _out_cts_avals)) # type: ignore
vjp_polymorphic_shapes = [
args_polymorphic_shapes, out_cts_polymorphic_shapes
]
out_cts = tree_util.tree_unflatten(out_tree_thunk(), out_cts_flat)
# TODO: enable higher-order gradients
with tf.name_scope("jax2tf_vjp"):
in_cts = convert(
fun_vjp_jax,
with_gradient=False,
polymorphic_shapes=vjp_polymorphic_shapes)(args, out_cts)
return in_cts
try:
global _shape_env
assert not _shape_env, f"Unexpected shape environment {_shape_env}"
global _enable_xla
prev_enable_xla = _enable_xla
_enable_xla = enable_xla
_shape_env = shapeenv
if with_gradient:
@tf.custom_gradient
def converted_fun_flat_with_custom_gradient(*args_flat: TfVal) -> TfVal:
out_with_avals = _interpret_fun(flat_fun, args_flat, args_avals_flat)
outs, out_avals = util.unzip2(out_with_avals)
return (tuple(outs),
functools.partial(
converted_grad_fn, _out_cts_avals=tuple(out_avals)))
out_flat = converted_fun_flat_with_custom_gradient(*args_flat)
else:
out_flat_raw = _interpret_fun(flat_fun, args_flat, args_avals_flat)
message = ("The jax2tf-converted function does not support gradients. "
"Use `with_gradient` parameter to enable gradients")
# We use PreventGradient, which is propagated through a SavedModel.
out_flat = [
tf.raw_ops.PreventGradient(input=o, message=message)
for o, _ in out_flat_raw
]
finally:
_shape_env = {}
_enable_xla = prev_enable_xla
out_flat = [tf.identity(x, "jax2tf_out") for x in out_flat]
out = tree_util.tree_unflatten(out_tree_thunk(), out_flat)
return out
return converted_fun
# Internals
def _interpret_fun(
fun: lu.WrappedFun, in_vals: Sequence[TfVal],
in_avals: Sequence[core.AbstractValue]
) -> Sequence[Tuple[TfVal, core.AbstractValue]]:
with core.new_base_main(TensorFlowTrace) as main: # type: ignore
fun = _interpret_subtrace(fun, main, in_avals)
with core.new_sublevel():
out_vals: Sequence[Tuple[TfVal, core.AbstractValue]] = \
fun.call_wrapped(*in_vals)
del main
return tuple(out_vals)
def _convert_jax_impl(jax_impl: Callable, *, multiple_results=True) -> Callable:
"""Convert the JAX implementation of a primitive.
Args:
jax_impl: typically the impl-rule for a primitive, with signature
`(*args: JaxVal, **kwargs) -> Sequence[JaxVal]`. This function implements
a primitive in terms of other primitives.
multiple_results: whether `jax_impl` returns a sequence of results.
Returns:
a function with signature `(*args: TfVal, _in_avals, _out_aval, **kwargs)
-> Sequence[TfVal]`.
"""
def wrapped(*tf_args: TfVal, _in_avals: Sequence[core.AbstractValue],
_out_aval: core.AbstractValue, **kwargs) -> Sequence[TfVal]:
# We wrap the jax_impl under _interpret_fun to abstract the TF values
# from jax_impl and turn them into JAX abstract values.
def jax_impl_jax_args(*jax_args):
jax_results = jax_impl(*jax_args, **kwargs)
return jax_results if multiple_results else [jax_results]
tf_results_with_avals = _interpret_fun(
lu.wrap_init(jax_impl_jax_args), tf_args, _in_avals)
tf_results, _ = util.unzip2(tf_results_with_avals)
return tf_results if multiple_results else tf_results[0]
return wrapped
@lu.transformation
def _interpret_subtrace(main: core.MainTrace,
in_avals: Sequence[core.AbstractValue],
*in_vals: TfVal):
trace = TensorFlowTrace(main, core.cur_sublevel())
in_tracers = tuple(
TensorFlowTracer(trace, val, aval)
for val, aval in util.safe_zip(in_vals, in_avals))
# The outs may be core.unit, see comment in TensorFlowTrace.pure.
outs = yield in_tracers, {} # type: Sequence[Union[TfVal, core.Unit]]
out_tracers: Iterable[TensorFlowTracer] = (
map(trace.full_raise, outs)) # type: ignore
out_vals_with_avals: Sequence[Tuple[TfVal, core.AbstractValue]] = (
tuple((t.val, t.aval) for t in out_tracers))
yield out_vals_with_avals
def _interpret_jaxpr(jaxpr: core.ClosedJaxpr, *args: TfVal) -> Sequence[TfVal]:
"""Evaluates a Jaxpr with tf.Tensor arguments.
The output is a sequence of TfVal (no `core.unit`), suitable for use with TF.
"""
fun: lu.WrappedFun = lu.wrap_init(core.jaxpr_as_fun(jaxpr))
out_with_avals = _interpret_fun(fun, args, jaxpr.in_avals)
return tuple(v for v, _ in out_with_avals)
### tracer
def _aval_to_tf_shape(aval: core.AbstractValue) -> Tuple[Optional[int], ...]:
"""Generate a TF shape, possibly containing None for polymorphic dimensions."""
return tuple(
map(lambda d: None if isinstance(d, shape_poly.DimVar) else d,
aval.shape)) # type: ignore[attr-defined]
def _tfval_shape_dtype(val: TfVal) -> Tuple[Sequence[Optional[int]], DType]:
"""Called for constants that occur in the program, or for input values to the converted function.
The returned shape may have unknown components, but only when called for
inputs.
"""
if isinstance(val, (tf.Tensor, tf.Variable)):
# May be partially known
return tuple(val.shape), to_jax_dtype(val.dtype)
else: # Must be a numeric value
assert not config.jax_enable_checks or _is_tfval(val), f"Non TfVal: {val}"
raw_aval = xla.abstractify(val)
return raw_aval.shape, raw_aval.dtype # type: ignore[attr-defined]
# A dimension environment maps dimension variables to TF expressions that
# compute the value of the dimension. These expressions refer to the TF
# function arguments.
_ShapeEnv = Dict[shape_poly.DimVar, TfVal]
def _args_to_avals_and_env(args: Sequence[TfVal],
polymorphic_shapes: Sequence[Optional[Union[str, PolyShape]]]) -> \
Tuple[Sequence[core.AbstractValue], _ShapeEnv]:
"""Computes abstract values and a dimension environment for arguments.
Args:
args: the arguments, TF inputs.
polymorphic_shapes: the polymorphic specifications for the arguments.
Returns: a tuple of a sequence of abtract values corresponding to the
arguments and a dimension environment.
"""
shapeenv: _ShapeEnv = {}
def input_aval(arg: TfVal,
polymorphic_shape: Optional[str]) -> core.AbstractValue:
"""The abstract value for an input."""
raw_shape, dtype = _tfval_shape_dtype(arg)
aval_shape = shape_poly.parse_spec(polymorphic_shape, raw_shape)
for i, d in enumerate(aval_shape):
if type(d) is int:
assert d == np.shape(arg)[i]
elif type(d) is shape_poly.DimVar and d not in shapeenv:
# Even if the shape of `arg` is known, we still use `tf.shape` for
# safety, because the promise is that we will convert the function
# to work for any value of the dimension.
shapeenv[d] = tf.shape(arg)[i] # type: ignore[index]
else:
# TODO: add an assertion tf.shape(arg)[i] == env[d]
pass
return core.ShapedArray(aval_shape, dtype)
avals = tuple(map(input_aval, args, polymorphic_shapes)) # type: ignore
return avals, shapeenv
# A shape environment maps shape variables to TfVal.
_shape_env = {} # type: _ShapeEnv
def _eval_shape(shape: Sequence[shape_poly.DimSize]) -> Sequence[TfVal]:
assert all(map(
lambda x: x is not None,
shape)), (f"Argument shape should be a valid JAX shape but got {shape}")
return tuple(_shape_env[d] # type: ignore[index]
if type(d) is shape_poly.DimVar else d
for d in shape)
def shape_as_value(x):
"""Injects the shape of `x` as an array value.
**Experimental: please give feedback, and expect changes!**
This allows the use of a shape expression as array argument to JAX functions.
A typical example is for implementing a mean operation:
jnp.sum(x) / np.prod(jax2tf.shape_as_value(x))
"""
# return shape_as_value_p.bind(x)
return NotImplementedError("shape_as_value is deprecated")
# # TODO: move this to masking or to some common library, if approved
# shape_as_value_p = core.Primitive("shape_as_value")
# shape_as_value_p.multiple_results = True
# def _shape_as_value_impl(x):
# x_shape = np.shape(x)
# def dim_to_int(dim: shape_poly.DimSize) -> int:
# dim_int = _poly_dim_to_tf_dim(dim)
# if dim_int is None:
# msg = ("shape_as_value is not implemented for non-constant shapes "
# "except for masking and jax2tf. "
# f"Has shape: {x_shape}")
# raise TypeError(msg)
# else:
# return dim_int
# return tuple(map(dim_to_int, x_shape))
#
# shape_as_value_p.def_impl(_shape_as_value_impl)
#
# def _shape_as_value_abstract(x_aval: core.AbstractValue) -> Sequence[core.AbstractValue]:
# rank = len(x_aval.shape) # type: ignore[attr-defined]
# return (core.ShapedArray((), dtypes.canonicalize_dtype(np.int_), weak_type=True),) * rank
#
# shape_as_value_p.def_abstract_eval(_shape_as_value_abstract)
#
# def _shape_as_value_translation(comp, x):
# return xla_client._xla.ops.Tuple(comp,
# tuple(xb.constant(comp, d)
# for d in comp.GetShape(x).dimensions()))
#
# xla.translations[shape_as_value_p] = _shape_as_value_translation
#
# def _shape_as_value_jvp_rule(primals, tangents):
# # The shape does not depend on the contents of the input
# x, = primals
# zero = ad.Zero.from_value(0.)
# return shape_as_value(x), (zero,) * len(x.shape)
#
# ad.primitive_jvps[shape_as_value_p] = _shape_as_value_jvp_rule
#
# def _shape_as_value__batching_rule(batched_args, batch_dims):
# xv, = batched_args
# batch_dim, = batch_dims
# batch_size = xv.shape[batch_dim]
# batched_shape = shape_as_value(xv)
# one_shape = batched_shape[0:batch_dim] + batched_shape[batch_dim+1:]
# res = tuple(jnp.broadcast_to(d, (batch_size, 1)) for d in one_shape)
# return res, (0,) * len(one_shape)
#
# batching.primitive_batchers[shape_as_value_p] = _shape_as_value__batching_rule
#
# def _shape_as_value_masking_rule(operands, operands_logical_shapes):
# x_logical_shape, = operands_logical_shapes
# return tuple(x_logical_shape)
#
# masking.masking_rules[shape_as_value_p] = _shape_as_value_masking_rule
#
# def _shape_as_value_tf(x: TfVal,
# _in_avals: Sequence[core.AbstractValue],
# _out_aval: core.AbstractValue) -> TfVal:
# x_aval = _in_avals[0]
# def dim_to_tfval(dim: shape_poly.DimSize, dim_idx: int) -> TfVal:
# dim_int = _poly_dim_to_tf_dim(dim)
# if dim_int is not None:
# return tf.convert_to_tensor(dim_int)
# else:
# return tf.shape(x)[dim_idx]
# return tuple(dim_to_tfval(dim, dim_idx)
# for dim_idx, dim in enumerate(x_aval.shape)) # type: ignore[attr-defined]
#
# tf_impl_with_avals[shape_as_value_p] = _shape_as_value_tf
# TODO(b/26854495): pylint doesn't understand slots and inheritance.
# pylint: disable=assigning-non-slot
class TensorFlowTracer(core.Tracer):
"""Tracer class that boxes a TF value and a JAX abstract value.
In addition to the TF value we carry the JAX abstract value because there are
two cases when it cannot be recovered from the value: (a) when the abstract
value is core.abstract_unit, in which case the value is tf.nan; (b) when we
are converting with polymorphic shapes, in which case the shape of the value
may have dimensions set to `None`, which the JAX abstract value may contain
more precise information.
When the value has a partially-known shape, the dimensions marked as `None`
must correspond to non-constant dimensions in the abstract value.
See README.md for details.
"""
# val: TfVal
# _aval: core.AbstractValue
__slots__ = ["val", "_aval"]
def __init__(self, trace: "TensorFlowTrace", val: TfVal,
aval: core.AbstractValue):
self._trace = trace
self._aval = aval
if aval is core.abstract_unit:
self.val = val
elif isinstance(val, (tf.Tensor, tf.Variable)):
val_shape, val_dtype = _tfval_shape_dtype(val)
aval_dtype = np.dtype(self._aval.dtype) # type: ignore[attr-defined]
if (val_dtype != aval_dtype and not config.x64_enabled and
(val_dtype == tf.int32 and aval_dtype == jnp.int64 or
val_dtype == tf.int64 and aval_dtype == jnp.int32 or
val_dtype == tf.float32 and aval_dtype == jnp.float64 or
val_dtype == tf.float64 and aval_dtype == jnp.float32 or
val_dtype == tf.complex128 and aval_dtype == jnp.complex64)):
# If JAX does not have x64 bit mode enabled, it will force the 64-bit
# values to use 32-bit precision. In order to make the TF conversion
# follow JAX's rules, we cast the TF values down to 32-bit mode.
val = tf.cast(val, dtype=aval_dtype)
val_dtype = aval_dtype
if config.jax_enable_checks:
assert aval_dtype == val_dtype, f"expected {aval_dtype} == {val_dtype}"
for aval_dim, val_dim in util.safe_zip(
self._aval.shape, val_shape): # type: ignore[attr-defined]
if val_dim is None:
assert isinstance(
aval_dim, shape_poly.DimVar
), f"expected {self._aval.shape} == {val_shape}" # type: ignore[attr-defined]
elif not isinstance(aval_dim, shape_poly.DimVar):
assert aval_dim == val_dim, f"expected {self._aval.shape} == {val_shape}" # type: ignore[attr-defined]
else:
# We have a TF value with known shape, and the abstract shape is a shape variable.
try:
aval_int = int(_eval_shape([aval_dim])) # type: ignore
except TypeError:
continue
assert aval_int == val_dim, f"expected {self._aval.shape} == {val_shape}. Found {aval_int} != {val_dim}." # type: ignore
self.val = val
else: # Must be a numeric value
self.val = _safe_convert_to_tensor(
val, dtype=self._aval.dtype) # type: ignore[attr-defined]
@property
def aval(self):
return self._aval
def full_lower(self):
return self
class TensorFlowTrace(core.Trace):
"""Trace class that underlies the jax2tf transformation.
We are going to ensure that jax2tf.convert is never nested inside other
transformations. This is sufficient for intended use cases (converting
fully-transformed JAX code). It also simplifies our job because we do not have
to handle situations where we apply primitives on a mix of TF values and
JAX tracers from an outer transformation. E.g., for addition both the TF
values
and the JAX tracers have an override and they get confused if they see values
from the other world.
Hence a TFT trace does not interact with non-TFT traces at lower-level. For
higher-order control-flow primitives we invoke recursively
_interpret_fun on the body of the conditional, which will create a nested TFT.
We do want to allow transformations nested inside a TensorFlowTrace (TFT), but
those will introduce their own MainTrace, and any operations involving those
will be done on those traces, i.e., not a concern for TFT.
"""
def pure(self, val: Union[TfVal, core.Unit]) -> TensorFlowTracer:
"""Lifts a non-Tracer into the TensorFlowTracer.
This function may be called by way of trace.full_raise.
The value may be a core.unit. During JAX transformations we sometimes
produce a Jaxpr that has arguments of abstract value core.abstract_unit
and results equal to core.unit. These are arguments and results that are
not used in the computation.
In TF world, we represent core.unit as NaN. This is safe, as these values
should never be used.
"""
if val is core.unit:
return TensorFlowTracer(self, tf.constant(np.nan, tf.float32),
core.abstract_unit)
else:
shape, dtype = _tfval_shape_dtype(val)
return TensorFlowTracer(self, val, core.ShapedArray(shape, dtype))
def lift(self, val: core.Tracer) -> TensorFlowTracer:
# This would be called when we need to raise a tracer from a lower-level
# main into the TensorFlowTrace. Since the TensorFlowTrace is never nested
# inside another transform, there are no lower-level main traces.
assert False
def sublift(self, val: TensorFlowTracer) -> TensorFlowTracer:
# This is called when we need to raise a tracer from the same master,
# but a lower sublevel. This could come from a nested jit.
return TensorFlowTracer(self, val.val, val._aval)
def process_primitive(self, primitive: core.Primitive,
tracers: Sequence[TensorFlowTracer],
params) -> TensorFlowTracer:
impl, impl_needs_avals = self.get_primitive_impl(primitive)
args_avals: Sequence[core.AbstractValue] = tuple(t.aval for t in tracers)
out_aval = primitive.abstract_eval(*args_avals, **params)
args_tf: Sequence[TfVal] = [t.val for t in tracers]
if impl_needs_avals:
val_out: TfVal = impl(
*args_tf,
_in_avals=args_avals, # type: ignore
_out_aval=out_aval,
**params)
else:
val_out = impl(*args_tf, **params)
if primitive.multiple_results:
out = [
TensorFlowTracer(self, v, a)
for v, a in util.safe_zip(val_out, out_aval)
] # type: ignore
else:
out = TensorFlowTracer(self, val_out, out_aval) # type: ignore
# Check that the impl rule returned a value of expected shape and dtype
# TODO: adapt this to match polymorphic shapes
if config.jax_enable_checks:
if primitive.multiple_results:
for o, expected_aval in zip(out, out_aval): # type: ignore
assert o.aval.strip_weak_type() == expected_aval.strip_weak_type(), (
f"{primitive}: out.aval = {o.aval}; expected {expected_aval}")
else:
assert out.aval == out_aval, ( # type: ignore
f"{primitive}: out.aval = {out.aval}; expected {out_aval}"
) # type: ignore
return out # type: ignore
def process_call(self, call_primitive: core.Primitive, f: lu.WrappedFun,
tracers: Sequence[TensorFlowTracer], params):
assert call_primitive.multiple_results
vals: Sequence[TfVal] = [t.val for t in tracers]
f = _interpret_subtrace(f, self.main, tuple(t.aval for t in tracers))
with core.new_sublevel():
if call_primitive == core.named_call_p:
with tf.name_scope(_sanitize_scope_name(params["name"])):
vals_out: Sequence[Tuple[TfVal, core.AbstractValue]] = \
f.call_wrapped(*vals)
elif call_primitive == sharded_jit.sharded_call_p:
vals_out = _sharded_call(f, vals, **params)
else:
vals_out = f.call_wrapped(*vals)
return [TensorFlowTracer(self, v, a) for v, a in vals_out]
def post_process_call(self, call_primitive: core.Primitive,
out_tracers: Sequence[TensorFlowTracer], params):
# We encountered a call primitive, e.g., remat_call_p, whose result
# (out_tracers) include TensorFlowTracer that were not passed through
# its arguments (captured from the environment).
vals = tuple(t.val for t in out_tracers)
main = self.main
def todo(vals: Sequence[TfVal]):
trace = TensorFlowTrace(main, core.cur_sublevel())
return [
TensorFlowTracer(trace, v, out_tracer.aval)
for v, out_tracer in util.safe_zip(vals, out_tracers)
]
return vals, todo
def process_map(self, map_primitive, f, tracers, params):
raise NotImplementedError("process_map")
def post_process_map(self, map_primitive, out_tracers, params):
raise NotImplementedError("post_process_map")
def process_custom_jvp_call(self, prim, fun, jvp, tracers):
# Drop the custom differentiation rule and act like a call primitive. This
# behavior is desirable because jax2tf stages code out of the JAX system, so
# there are no more JAX differentiation transformations to be applied.
del jvp # Unused.
return self.process_call(core.call_p, fun, tracers, {})
def post_process_custom_jvp_call(self, out_tracers, params):
assert False # unreachable assuming jax2tf runs with clean trace state
def process_custom_vjp_call(self, prim, fun, fwd, bwd, tracers, out_trees):
# Drop the custom differentiation rule and act like a call primitive. This
# behavior is desirable because jax2tf stages code out of the JAX system, so
# there are no more JAX differentiation transformations to be applied.
del fwd, bwd, out_trees # Unused.
return self.process_call(core.call_p, fun, tracers, {})
def post_process_custom_vjp_call(self, out_tracers, params):
assert False # unreachable assuming jax2tf runs with clean trace state
def get_primitive_impl(self, p: core.Primitive) -> Tuple[Callable, bool]:
# Returns the primitive implementation and whether the implementation
# takes abstract values (see definition of tf_impl_with_avals)
try:
return tf_impl[p], False
except KeyError:
try:
return tf_impl_with_avals[p], True
except KeyError as err:
msg = "TensorFlow interpretation rule for '{}' not implemented"
raise NotImplementedError(msg.format(p)) from err
def to_tf_dtype(jax_dtype):
if jax_dtype == dtypes.float0:
jax_dtype = dtypes.bfloat16
return tf.dtypes.as_dtype(jax_dtype)
def to_jax_dtype(tf_dtype):
return tf_dtype.as_numpy_dtype
def _unexpected_primitive(p: core.Primitive, *args, **kwargs):
assert False, f"Encountered unexpected primitive {p}"
for unexpected in xla.call_translations: # Call primitives are inlined
tf_impl[unexpected] = functools.partial(_unexpected_primitive, unexpected)
# Primitives that are not yet implemented must be explicitly declared here.
tf_not_yet_impl = [
"reduce",
"rng_uniform",
"clz",
"igamma_grad_a",
"random_gamma_grad",
"reduce_precision",
# Not high priority?
"after_all",
"all_to_all",
"create_token",
"infeed",
"outfeed",
"pmax_p",
"pmin",
"ppermute",
"psum",
"pmax",
"pgather",
"axis_index",
"pdot",
"all_gather",
"lu_pivots_to_permutation",
"rng_bit_generator",
"xla_pmap",
"call_tf",
]
tf_impl[ad_util.stop_gradient_p] = tf.stop_gradient
tf_impl[ad_util.zeros_like_p] = tf.zeros_like
def _add(x: TfVal, y: TfVal) -> TfVal:
return tf.raw_ops.AddV2(x=x, y=y)
tf_impl[ad_util.add_jaxvals_p] = _add
tf_impl[xla.device_put_p] = lambda x, device=None: x
tf_impl[lax.neg_p] = tf.math.negative
def _sign(x: TfVal) -> TfVal:
if x.dtype.is_unsigned:
# TF and XLA do not support tf.math.sign for unsigned types.
return tf.where(
tf.math.equal(x, 0), np.array(0, dtype=x.dtype),
np.array(1, dtype=x.dtype))
else:
return tf.math.sign(x)
tf_impl[lax.sign_p] = _sign
tf_impl[lax.floor_p] = tf.math.floor
tf_impl[lax.ceil_p] = tf.math.ceil
def _round(operand, *, rounding_method):
if rounding_method is lax.RoundingMethod.AWAY_FROM_ZERO:
sign = _sign(operand)
operand *= sign
floor = tf.math.floor(operand)
operand -= floor
cond = tf.math.equal(operand, tf.constant(np.array(0.5), operand.dtype))
return sign * (
tf.where(cond, tf.constant(np.array(1), operand.dtype),
tf.math.round(operand)) + floor)
else:
return tf.math.round(operand)
tf_impl[lax.round_p] = _round
tf_impl[lax.nextafter_p] = tf.math.nextafter
def _population_count(x):
orig_dtype = x.dtype
return tf.cast(tf.raw_ops.PopulationCount(x=x), orig_dtype)
tf_impl[lax.population_count_p] = _population_count
tf_impl[lax.is_finite_p] = tf.math.is_finite
def _abs(x: TfVal) -> TfVal:
# TF and XLA do not support tf.math.abs for unsigned types.
return tf.math.abs(x) if not x.dtype.is_unsigned else x
tf_impl[lax.abs_p] = _abs
tf_impl[lax.pow_p] = tf.math.pow
def _integer_pow(x, *, y: int, _in_avals: Sequence[core.AbstractValue],
_out_aval: core.AbstractValue):
# Follows the implementation in lax._integer_pow_translation_rule
if y == 0:
return tf.broadcast_to(
tf.constant(1, dtype=x.dtype, shape=()), _eval_shape(_out_aval.shape))
is_reciprocal = y < 0
if is_reciprocal:
y = -y
acc = None
while y > 0:
if y & 1:
acc = x if acc is None else tf.math.multiply(acc, x)
y >>= 1
if y > 0:
x = tf.math.multiply(x, x)
return tf.math.reciprocal(acc) if is_reciprocal else acc
tf_impl_with_avals[lax.integer_pow_p] = _integer_pow
tf_impl[lax.exp_p] = tf.math.exp
tf_impl[lax.expm1_p] = tf.math.expm1
tf_impl[lax.log_p] = tf.math.log
tf_impl[lax.log1p_p] = tf.math.log1p
tf_impl[lax.tan_p] = tf.math.tan
tf_impl[lax.tanh_p] = tf.math.tanh
tf_impl[lax.sin_p] = tf.math.sin
tf_impl[lax.sinh_p] = tf.math.sinh
tf_impl[lax.cos_p] = tf.math.cos
tf_impl[lax.cosh_p] = tf.math.cosh
tf_impl[lax.acos_p] = tf.math.acos
tf_impl[lax.asin_p] = tf.math.asin
tf_impl[lax.atan_p] = tf.math.atan
tf_impl[lax.atan2_p] = tf.math.atan2
tf_impl[lax.acosh_p] = tf.math.acosh
tf_impl[lax.atanh_p] = tf.math.atanh
tf_impl[lax.asinh_p] = tf.math.asinh
tf_impl[lax.sqrt_p] = tf.math.sqrt
tf_impl[lax.rsqrt_p] = tf.math.rsqrt
tf_impl[lax.lgamma_p] = tf.math.lgamma
tf_impl[lax.digamma_p] = tf.math.digamma
tf_impl[lax.igamma_p] = tf.math.igamma
tf_impl[lax.igammac_p] = tf.math.igammac
tf_impl[lax.regularized_incomplete_beta_p] = tf.math.betainc
tf_impl[lax.erf_p] = tf.math.erf
tf_impl[lax.erfc_p] = tf.math.erfc
tf_impl[lax.erf_inv_p] = tf.math.erfinv
tf_impl[lax.bessel_i0e_p] = tf.math.bessel_i0e
tf_impl[lax.bessel_i1e_p] = tf.math.bessel_i1e
tf_impl[lax.complex_p] = tf.complex
def _conj(x, **kwargs):
# The only dtypes that are allowed are: float32, float64, complex64, and
# complex128.
if x.dtype == tf.float32:
return tf.cast(x, tf.complex64)
elif x.dtype == tf.float64:
return tf.cast(x, tf.complex128)
else:
return tf.math.conj(x)
tf_impl[lax.conj_p] = _conj
tf_impl[lax.real_p] = tf.math.real
tf_impl[lax.imag_p] = tf.math.imag
tf_impl[lax.add_p] = _add
tf_impl[lax.sub_p] = tf.math.subtract
tf_impl[lax.mul_p] = tf.math.multiply
def _iota(*, dtype, shape, dimension):
dtype = to_tf_dtype(dtype)
# Some dtypes are unsupported, like uint32, so we just fall back to int32.
# TODO(mattjj, necula): improve tf.range dtype handling
shape_tf = _eval_shape(shape)
vec = tf.range(tf.cast(shape_tf[dimension], tf.int32), dtype=tf.int32)
vec_shape = [-1 if i == dimension else 1 for i in range(len(shape))]
return tf.cast(tf.broadcast_to(tf.reshape(vec, vec_shape), shape_tf), dtype)
tf_impl[lax.iota_p] = _iota
def _div(lhs, rhs):
if lhs.dtype.is_integer:
quotient = tf.math.floordiv(lhs, rhs)
select = tf.math.logical_and(
tf.not_equal(_sign(lhs), _sign(rhs)),
tf.not_equal(tf.math.floormod(lhs, rhs), 0))
return tf.where(select, quotient + 1, quotient)
else:
return tf.math.truediv(lhs, rhs)
def _rem(lhs, rhs):
return _sign(lhs) * tf.math.floormod(_abs(lhs), _abs(rhs))
tf_impl[lax.div_p] = _div
tf_impl[lax.rem_p] = _rem
tf_impl[lax.max_p] = tf.math.maximum
tf_impl[lax.min_p] = tf.math.minimum
# Map from TF signed types to TF unsigned types.
_SIGNED_TO_UNSIGNED_TABLE = {
tf.int8: tf.uint8,
tf.int16: tf.uint16,
tf.int32: tf.uint32,
tf.int64: tf.uint64,
}
# Map from TF unsigned types to TF signed types.
_UNSIGNED_TO_SIGNED_TABLE = {u: s for s, u in _SIGNED_TO_UNSIGNED_TABLE.items()}
# Note: Bitwise operations only yield identical results on unsigned integers!
# pylint: disable=protected-access
def _shift_right_arithmetic_raw(x, y):
if x.dtype.is_unsigned:
assert x.dtype == y.dtype
orig_dtype = x.dtype
signed_dtype = _UNSIGNED_TO_SIGNED_TABLE[orig_dtype]
x = tf.cast(x, signed_dtype)
y = tf.cast(y, signed_dtype)
res = tf.bitwise.right_shift(x, y)
return tf.cast(res, orig_dtype)
else:
return tf.bitwise.right_shift(x, y)
def _shift_right_arithmetic(x, y):
# TF shift is "implementation defined" if the shift amount is negative
# or larger or equal to the size of the value. We implement the XLA
# semantics to return the shift by the max value (x_bits - 1).
# TODO: it is likely better to add XlaOps for shifts
x_bits = 8 * x.dtype.size
clamp_y = tf.where(_shift_in_bounds(x, y), y, x_bits - 1)
return _shift_right_arithmetic_raw(x, clamp_y)
tf_impl[lax.shift_right_arithmetic_p] = _shift_right_arithmetic
def _shift_right_logical_raw(x, y):
if x.dtype.is_unsigned:
return tf.bitwise.right_shift(x, y)
else:
assert x.dtype == y.dtype
orig_dtype = x.dtype
unsigned_dtype = _SIGNED_TO_UNSIGNED_TABLE[orig_dtype]
x = tf.cast(x, unsigned_dtype)
y = tf.cast(y, unsigned_dtype)
res = tf.bitwise.right_shift(x, y)
return tf.cast(res, orig_dtype)
def _shift_right_logical(x, y):
# TF shift is "implementation defined" if the shift amount is negative
# or larger or equal to the size of the value. We implement the XLA semantics
# to return 0.
# TODO: it is likely better to add XlaOps for shifts
return tf.where(
_shift_in_bounds(x, y), _shift_right_logical_raw(x, y), tf.zeros_like(x))
tf_impl[lax.shift_right_logical_p] = _shift_right_logical
def _shift_left(x, y):
# TF shift is "implementation defined" if the shift amount is negative
# or larger or equal to the size of the value. We implement the XLA semantics
# to return 0.
# TODO: it is likely better to add XlaOps for shifts
return tf.where(
_shift_in_bounds(x, y), tf.bitwise.left_shift(x, y), tf.zeros_like(x))
tf_impl[lax.shift_left_p] = _shift_left
def _shift_in_bounds(x: TfVal, y: TfVal) -> TfVal:
# Return the TF expression for when y is within bounds (0 <= y < |x|)
x_bits = 8 * x.dtype.size
# TF does not have comparisons for uint16 and uint32 (despite what the
# documentation says)
y_comp = tf.cast(
y, _UNSIGNED_TO_SIGNED_TABLE[y.dtype]) if y.dtype.is_unsigned else y
y_lt_x_bits = tf.math.less(y_comp, x_bits)
y_ge_0 = tf.math.greater_equal(y_comp, 0)
return tf.logical_and(y_lt_x_bits, y_ge_0)
def _not(x):
"""Computes bitwise not with support for booleans.
Numpy and JAX support bitwise not for booleans by applying a logical not!
This means that applying bitwise_not yields an unexected result:
jnp.bitwise_not(jnp.array([True, False]))
>> DeviceArray([False, True], dtype=bool)
if you assume that booleans are simply casted to integers.
jnp.bitwise_not(jnp.array([True, False]).astype(np.int32)).astype(bool)
>> DeviceArray([True, True], dtype=bool)
"""
if x.dtype == tf.bool:
return tf.logical_not(x)
else:
return tf.bitwise.invert(x)
tf_impl[lax.not_p] = _not
def bool_to_int8(f, argnums):
"""Computes bool valued functions using int8."""
argnums = tf.nest.flatten(argnums)
def wrapper(*args, **kwargs):
if not any(args[i].dtype == tf.bool for i in argnums):
return f(*args, **kwargs)
else:
args_cast = [(tf.cast(a, tf.int8) if i in argnums else a)
for i, a in enumerate(args)]
if "_in_avals" in kwargs:
def cast_aval(aval):
return core.ShapedArray(aval.shape, np.int8)
_in_avals_cast = [
cast_aval(aval) if i in argnums else aval
for i, aval in enumerate(kwargs["_in_avals"])
]
_out_aval_cast = tf.nest.map_structure(cast_aval, kwargs["_out_aval"])
kwargs = dict(
kwargs, _in_avals=_in_avals_cast, _out_aval=_out_aval_cast)
out = f(*args_cast, **kwargs)
return tf.nest.map_structure(lambda o: tf.cast(o, tf.bool), out)
return wrapper
tf_impl[lax.or_p] = bool_to_int8(tf.bitwise.bitwise_or, argnums=(0, 1))
tf_impl[lax.and_p] = bool_to_int8(tf.bitwise.bitwise_and, argnums=(0, 1))
tf_impl[lax.xor_p] = bool_to_int8(tf.bitwise.bitwise_xor, argnums=(0, 1))
tf_impl[lax.eq_p] = tf.math.equal
tf_impl[lax.ne_p] = tf.math.not_equal
tf_impl[lax.ge_p] = tf.math.greater_equal
tf_impl[lax.gt_p] = tf.math.greater
tf_impl[lax.le_p] = tf.math.less_equal
tf_impl[lax.lt_p] = tf.math.less
tf_impl[lax_linalg.cholesky_p] = tf.linalg.cholesky
def _convert_element_type(operand, *, new_dtype, weak_type=False):
old_dtype = operand.dtype.as_numpy_dtype
if (dtypes.issubdtype(old_dtype, np.complexfloating) and
not dtypes.issubdtype(new_dtype, np.complexfloating)):
operand = tf.math.real(operand)
if (dtypes.issubdtype(old_dtype, np.floating) and
not (dtypes.issubdtype(new_dtype, np.floating) or dtypes.issubdtype(
new_dtype, np.complexfloating) or new_dtype == np.bool_)):
sign = _sign(operand)
operand = sign * tf.math.floor(sign * operand)
return tf.dtypes.cast(operand, to_tf_dtype(new_dtype))
tf_impl[lax.convert_element_type_p] = _convert_element_type
def _bitcast_convert_type(operand, new_dtype):
return tf.bitcast(operand, to_tf_dtype(new_dtype))
tf_impl[lax.bitcast_convert_type_p] = _bitcast_convert_type
def _clamp(minval, operand, maxval, *, _in_avals, _out_aval):
# The below permits mirroring the behavior of JAX when maxval < minval
op_shape_tf_val = _eval_shape(_in_avals[1].shape)
maxval = tf.broadcast_to(maxval, op_shape_tf_val)
minval = tf.math.minimum(tf.broadcast_to(minval, op_shape_tf_val), maxval)
return tf.clip_by_value(operand, minval, maxval)
tf_impl_with_avals[lax.clamp_p] = _clamp
def _concatenate(*operands, dimension):
return tf.concat(operands, axis=dimension)
tf_impl[lax.concatenate_p] = _concatenate
def _conv_general_dimension_numbers_proto(dimension_numbers):
"""Converts a ConvDimensionNumbers to an XLA ConvolutionDimensionNumbers."""
assert isinstance(dimension_numbers, lax.ConvDimensionNumbers)
lhs_spec, rhs_spec, out_spec = dimension_numbers
proto = xla_data_pb2.ConvolutionDimensionNumbers()
proto.input_batch_dimension = lhs_spec[0]
proto.input_feature_dimension = lhs_spec[1]
proto.output_batch_dimension = out_spec[0]
proto.output_feature_dimension = out_spec[1]
proto.kernel_output_feature_dimension = rhs_spec[0]
proto.kernel_input_feature_dimension = rhs_spec[1]
proto.input_spatial_dimensions.extend(lhs_spec[2:])
proto.kernel_spatial_dimensions.extend(rhs_spec[2:])
proto.output_spatial_dimensions.extend(out_spec[2:])
return proto
def _precision_config_proto(precision: Optional[Tuple[PrecisionType,
PrecisionType]]):
"""Convert an integer to an XLA.PrecisionConfig."""
if precision is None:
return None
proto = xla_data_pb2.PrecisionConfig()
proto.operand_precision.append(int(precision[0]))
proto.operand_precision.append(int(precision[1]))
return proto
def _try_tf_conv(lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count, batch_group_count,
preferred_element_type: Optional[DType],
out_shape) -> TfVal:
def error(msg):
suffix = ("See source code for the precise conditions under which "
"convolutions can be converted without XLA.")
return _xla_disabled_error("conv_general_dilated", f"{msg} - {suffix}")
# TODO(bchetioui): this function is not exhaustive wrt which convolution cases
# can be translated into TF primitives. Further investigation is needed to
# fully flesh it out.
if lhs.dtype not in [tf.float16, tf.float32, tf.float64]:
raise error(f"tf.nn.convolution is not supported for dtype {lhs.dtype}")
if feature_group_count != 1:
raise error("tf.nn.convolution does not support grouped convolutions")
# TODO(bchetioui): is there something to do with batch_group_count?
if batch_group_count != 1:
raise error("Unimplemented support for batch_group_count != 1")
nb_spatial_dimensions = len(lhs.shape) - 2
# TF can only deal with 1D, 2D and 3D convolution
if nb_spatial_dimensions < 1 or nb_spatial_dimensions > 3:
raise error("TensorFlow can only handle convolutions with 1, 2, or 3 "
"spatial dimensions")
# TODO(bchetioui): handle different stride cases
if list(window_strides) != [1] * nb_spatial_dimensions:
raise error("Unimplemented support for window_strides != "
f"{tuple([1] * nb_spatial_dimensions)}")
if preferred_element_type is not None and preferred_element_type != lhs.dtype:
raise error("Unimplemented support for preferred_element_type")
def convert_padding() -> str:
# TODO(bchetioui): in this instance, we can not use padtype_to_pads as
# string padding is not implemented for transposed convolution.
if list(lhs_dilation) != [1] * nb_spatial_dimensions:
raise error("Padding conversion is not supported for transposed "
"convolution.")
lhs_perm, rhs_perm, _ = dimension_numbers
effective_rhs_shape = [
(k - 1) * r + 1
for k, r in zip(np.take(rhs.shape, rhs_perm)[2:], rhs_dilation)
]
lhs_shape = np.take(lhs.shape, lhs_perm)[2:]
# TF only allows 'VALID' and 'SAME' padding
for pad_str in ["VALID", "SAME"]:
gen_padding = lax.padtype_to_pads(
lhs_shape, effective_rhs_shape, window_strides, pad_str)
if list(gen_padding) == list(padding):
return pad_str
raise error("Input padding not supported in TensorFlow.")
def convert_dim_nums() -> str:
lhs_spec, rhs_spec, out_spec = dimension_numbers
# TF only allows filters with shape:
# spatial_filter_shape + [in_channels, out_channels]. In JAX however,
# rhs_spec is represented as a tuple containing the following:
# [out_channels, in_channels] + spatial_filter_shape.
supported_rhs_shape = ([nb_spatial_dimensions + 1, nb_spatial_dimensions] +
list(range(nb_spatial_dimensions)))
if list(rhs_spec) != supported_rhs_shape:
raise error("Input filter (RHS) shape format not supported in "
"TensorFlow.")
# TF only supports same LHS and output data format
if lhs_spec != out_spec:
raise error("TensorFlow requires the same data format for LHS and "
"output.")
# Alphabet extracted from the documentation of tf.conv{1,2,3}d
spatial_dim_alphabet = "DHW"[-nb_spatial_dimensions:]
# TF only supports the following data formats:
# - [batch_size, in_channels] + input_spatial_shape
# TODO(bchetioui): TF currently does not support the above on CPU. To avoid
# failing on this platform, this path is commented out for now.
# if list(lhs_spec) == list(range(len(lhs_spec))):
# return "NC" + spatial_dim_alphabet
# - [batch_size] + input_spatial_shape + [in_channels]
if list(lhs_spec) == ([0, len(lhs_spec) - 1] +
list(range(1,
len(lhs_spec) - 1))):
return "N" + spatial_dim_alphabet + "C"
raise error("Data format is unsupported by TensorFlow.")
def convert_dilation_and_compute_result(tf_padding: str,
tf_dim_nums: str) -> TfVal:
no_dilation = [1] * nb_spatial_dimensions
# TODO(bchetioui): is there a generic way to do a transposed atrous
# convolution in TensorFlow?
if not (list(lhs_dilation) == no_dilation or
list(rhs_dilation) == no_dilation):
raise error("Both LHS and RHS dilations are set.")
# This is a non-dilated or atrous convolution
if list(lhs_dilation) == no_dilation:
return tf.nn.convolution(
lhs,
rhs,
strides=window_strides,
padding=tf_padding,
data_format=tf_dim_nums,
dilations=rhs_dilation)
# TODO(bchetioui): the below path is unreachable for now, as passing a lhs
# dilation to this function will result in convert_padding returning None
# systematically. This must be investigated further.
# Dilation of the LHS is transposed convolution
return tf.nn.conv_transpose(
lhs,
rhs,
out_shape,
window_strides,
padding=tf_padding,
data_format=tf_dim_nums,
dilations=lhs_dilation)
tf_padding = convert_padding()
tf_dim_nums = convert_dim_nums()
return convert_dilation_and_compute_result(tf_padding, tf_dim_nums)
def _conv_general_dilated(lhs, rhs, *,
window_strides, padding, lhs_dilation,
rhs_dilation,
dimension_numbers: lax.ConvDimensionNumbers,
feature_group_count: int,
batch_group_count: int,
lhs_shape: Sequence[int],
rhs_shape: Sequence[int],
precision: Optional[Tuple[PrecisionType, PrecisionType]],
preferred_element_type: Optional[DType],
_in_avals: Sequence[core.AbstractValue],
_out_aval: core.AbstractValue):
"""Implementation of lax.conv_general_dilated_p using XlaConv."""
out_tf_shape = _aval_to_tf_shape(_out_aval)
if not _enable_xla:
return _try_tf_conv(
lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count, batch_group_count,
preferred_element_type, out_tf_shape)
dnums_proto = _conv_general_dimension_numbers_proto(dimension_numbers)
precision_config_proto = _precision_config_proto(precision)
assert batch_group_count == 1 # TODO(necula): implement batch_group_count
def gen_conv(lhs, rhs, preferred_element_type: Optional[DType]):
out = tfxla.conv(
lhs,
rhs,
window_strides,
padding,
lhs_dilation,
rhs_dilation,
dnums_proto,
feature_group_count=feature_group_count,
precision_config=precision_config_proto,
preferred_element_type=preferred_element_type)
# TODO: implement shape inference for XlaConv
out.set_shape(out_tf_shape)
return out
# Follow the lowering for complex convolutions from
# lax._conv_general_dilated_translation. We can use the same conversion on all
# platforms because on XLA:TPU the compiler does the same as a rewrite.
if np.issubdtype(_in_avals[0].dtype, np.complexfloating):
if preferred_element_type is not None:
# Convert complex dtype to types used for real and imaginary parts
assert np.issubdtype(preferred_element_type, np.complexfloating)
preferred_float_et = (
np.float64 if preferred_element_type == np.complex128 else np.float32)
else:
preferred_float_et = None
lhs_real, lhs_imag = tf.math.real(lhs), tf.math.imag(lhs)
rhs_real, rhs_imag = tf.math.real(rhs), tf.math.imag(rhs)
k1 = gen_conv(_add(lhs_real, lhs_imag), rhs_real, preferred_float_et)
k2 = gen_conv(lhs_real, tf.math.subtract(rhs_imag, rhs_real),
preferred_float_et)
k3 = gen_conv(lhs_imag, _add(rhs_real, rhs_imag), preferred_float_et)
return tf.complex(tf.math.subtract(k1, k3), _add(k1, k2))
else:
return gen_conv(lhs, rhs, preferred_element_type)
tf_impl_with_avals[lax.conv_general_dilated_p] = _conv_general_dilated
def _dot_general(lhs, rhs, *, dimension_numbers,
precision: Optional[Tuple[PrecisionType, PrecisionType]],
preferred_element_type: Optional[DType],
_in_avals: Sequence[core.AbstractValue],
_out_aval: core.AbstractValue):
"""Implementation of lax.dot_general_p in terms of tf.linalg.einsum."""
(lhs_contracting, rhs_contracting), (lhs_batch, rhs_batch) = dimension_numbers
lhs_ndim, rhs_ndim = len(lhs.shape), len(rhs.shape)
if _enable_xla:
dnums_proto = xla_data_pb2.DotDimensionNumbers()
dnums_proto.lhs_contracting_dimensions.extend(lhs_contracting)
dnums_proto.rhs_contracting_dimensions.extend(rhs_contracting)
dnums_proto.lhs_batch_dimensions.extend(lhs_batch)
dnums_proto.rhs_batch_dimensions.extend(rhs_batch)
precision_config_proto = _precision_config_proto(precision)
res = tfxla.dot_general(
lhs,
rhs,
dnums_proto,
precision_config_proto,
preferred_element_type=preferred_element_type)
# TODO: in presence of None dimensions, XlaDot shape inference returns
# unknown shape.
res.set_shape(_aval_to_tf_shape(_out_aval))
return res
# This condition ensures that:
# 1) the batch dimensions are ordered in the same way in lhs and rhs (this is
# not strictly necessary, but we would have to reshape the array if that
# were not the case;
# 2) lhs and rhs have the same number of dimensions +/- 1
# 3) the number of non-batch dimensions in both tensors is either 1 or 2
# 4) the contracting dimensions are consistent with those of a classic
# matrix/matrix, vector/matrix or matrix/vector multiplication.
if (lhs_batch == rhs_batch == tuple(range(len(lhs_batch))) and
lhs_ndim - rhs_ndim in [-1, 0, 1] and
1 <= lhs_ndim - len(lhs_batch) <= 2 and
1 <= rhs_ndim - len(rhs_batch) <= 2 and
lhs_contracting == (len(lhs.shape) - 1,) and
rhs_contracting == (len(lhs_batch),)):
# All the inputs to tf.linalg.matmul must have 2 inner dimensions,
# after their batch dimensions, so we need to expand the dimensions
# appropriately. We can get to this branch with three combinations of
# inner shapes:
# - lhs.inner_shape == [a, b], rhs.inner_shape == [b, c]
# - in this case, the resulting inner shape is [a, c];
# - lhs.inner_shape == [b] , rhs.inner_shape == [b, c]
# - in this case, we need to expand lhs to [1, b], and the resulting
# shape is [c]. We need to squeeze the result of tf.linalg.matmul
# as it will have shape [1, c];
# - lhs.shape == [batch] + [a, b], rhs.shape == [batch] + [b]
# - in this case, we need to expand rhs to [b, 1], and the resulting
# shape is [a]. We need to squeeze the result of tf.linalg.matmul
# as it will have shape [a, 1];
# - lhs.shape == [batch] + [b] , rhs.shape == [batch] + [b]
# - in this case, we need to expand lhs to [1, b] and rhs to [b, 1],
# and the resulting shape is (). We need to squeeze the result of
# tf.linalg.matmul as it will have shape [1, 1].
squeeze_idxs = []
if lhs_ndim - len(lhs_batch) == 1:
lhs = tf.expand_dims(lhs, lhs_ndim - 1)
squeeze_idxs.append(len(lhs.shape) - 2)
if rhs_ndim - len(rhs_batch) == 1:
rhs = tf.expand_dims(rhs, rhs_ndim)
squeeze_idxs.append(len(rhs.shape) - 1)
result = tf.linalg.matmul(lhs, rhs)
if len(squeeze_idxs) != 0:
assert all([result.shape[i] == 1 for i in squeeze_idxs])
result = tf.squeeze(result, squeeze_idxs)
return result
new_id = iter(string.ascii_letters)
lhs_axis_ids = [next(new_id) for _ in lhs.shape]
rhs_axis_ids = [next(new_id) for _ in rhs.shape]
lhs_out_axis_ids = lhs_axis_ids[:]
rhs_out_axis_ids = rhs_axis_ids[:]
for lhs_axis, rhs_axis in zip(lhs_contracting, rhs_contracting):
shared_id = next(new_id)
lhs_axis_ids[lhs_axis] = shared_id
rhs_axis_ids[rhs_axis] = shared_id
lhs_out_axis_ids[lhs_axis] = None # type: ignore[call-overload]
rhs_out_axis_ids[rhs_axis] = None # type: ignore[call-overload]
batch_ids = []
for lhs_axis, rhs_axis in zip(lhs_batch, rhs_batch):
shared_id = next(new_id)
lhs_axis_ids[lhs_axis] = shared_id
rhs_axis_ids[rhs_axis] = shared_id
lhs_out_axis_ids[lhs_axis] = None # type: ignore[call-overload]
rhs_out_axis_ids[rhs_axis] = None # type: ignore[call-overload]
batch_ids.append(shared_id)
not_none = lambda x: x is not None
out_axis_ids = list(
filter(not_none, batch_ids + lhs_out_axis_ids + rhs_out_axis_ids))
assert lhs.dtype == rhs.dtype
spec = "{},{}->{}".format("".join(lhs_axis_ids), "".join(rhs_axis_ids),
"".join(out_axis_ids))
return tf.linalg.einsum(spec, lhs, rhs)
tf_impl_with_avals[lax.dot_general_p] = _dot_general
def _broadcast(operand, *, sizes):
result_shape = tf.TensorShape(sizes).concatenate(operand.shape)
return tf.broadcast_to(operand, result_shape)
tf_impl[lax.broadcast_p] = _broadcast
def _broadcast_in_dim(operand, *, shape, broadcast_dimensions):
inshape = [1] * len(shape)
for orig_shape_i, broadcast_dim_i in zip(operand.shape, broadcast_dimensions):
if orig_shape_i != 1:
inshape[broadcast_dim_i] = shape[broadcast_dim_i]
inshape_tf = _eval_shape(inshape)
shape_tf = _eval_shape(shape)
return tf.broadcast_to(tf.reshape(operand, inshape_tf), shape_tf)
tf_impl[lax.broadcast_in_dim_p] = _broadcast_in_dim
def _reshape(operand, *, new_sizes, dimensions):
if dimensions is None:
dimensions = tf.range(tf.rank(operand))
new_sizes_tf = _eval_shape(new_sizes)
return tf.reshape(tf.transpose(operand, dimensions), new_sizes_tf)
tf_impl[lax.reshape_p] = _reshape
def _squeeze(operand, *, dimensions, _in_avals, _out_aval):
op_shape = _in_avals[0].shape
new_shape = tuple(d for i, d in enumerate(op_shape) if i not in dimensions)
new_shape_tf = _eval_shape(new_shape)
return tf.reshape(operand, new_shape_tf)
tf_impl_with_avals[lax.squeeze_p] = _squeeze
def _pad(operand, padding_value, *, padding_config,
_in_avals: Sequence[core.AbstractValue],
_out_aval: core.AbstractValue):
del _in_avals
low, high, interior = util.unzip3(padding_config)
if _enable_xla:
out = tfxla.pad(operand, padding_value, low, high, interior)
return out
if all(lo >= 0 and hi >= 0 and i == 0 for lo, hi, i in padding_config):
return tf.pad(
operand,
util.safe_zip(low, high),
mode="CONSTANT",
constant_values=padding_value)
raise _xla_disabled_error("pad", "Only use cases without interior or negative padding can be converted without XLA.")
tf_impl_with_avals[lax.pad_p] = _pad
def _rev(operand, *, dimensions):
return tf.reverse(operand, dimensions)
tf_impl[lax.rev_p] = _rev
tf_impl[lax.select_p] = tf.where
def _transpose(operand, *, permutation):
return tf.transpose(operand, perm=permutation)
tf_impl[lax.transpose_p] = _transpose
axes_to_axis = lambda func: lambda operand, axes: func(operand, axis=axes)
tf_impl[lax.reduce_sum_p] = (
bool_to_int8(axes_to_axis(tf.reduce_sum), argnums=0))
tf_impl[lax.reduce_prod_p] = (
bool_to_int8(axes_to_axis(tf.reduce_prod), argnums=0))
tf_impl[lax.reduce_max_p] = (
bool_to_int8(axes_to_axis(tf.reduce_max), argnums=0))
tf_impl[lax.reduce_min_p] = (
bool_to_int8(axes_to_axis(tf.reduce_min), argnums=0))
tf_impl[lax.reduce_or_p] = axes_to_axis(tf.reduce_any)
tf_impl[lax.reduce_and_p] = axes_to_axis(tf.reduce_all)
def _argminmax(fn, operand, axes, index_dtype):
axis, = axes
output_type = tf.int32
if dtypes.iinfo(index_dtype).bits > 32:
output_type = tf.int64
# TODO(phawkins): handle axes larger than 2^31.
result = fn(operand, axis=axis, output_type=output_type)
return tf.cast(result, to_tf_dtype(index_dtype))
tf_impl[lax.argmin_p] = functools.partial(_argminmax, tf.math.argmin)
tf_impl[lax.argmax_p] = functools.partial(_argminmax, tf.math.argmax)
_add_fn = tf.function(_add, autograph=False)
_ge_fn = tf.function(tf.math.greater_equal, autograph=False)
def _select_and_gather_add(
tangents: TfVal, operand: TfVal, select_prim: core.Primitive,
window_dimensions: Sequence[int], window_strides: Sequence[int],
base_dilation: Sequence[int], window_dilation: Sequence[int],
padding: Sequence[Tuple[int, int]], _in_avals: Sequence[core.AbstractValue],
_out_aval: core.AbstractValue):
# Note: this function follows the pattern in
# jax.lax._select_and_gather_add_translation.
dtype = operand.dtype
nbits = dtypes.finfo(dtype.as_numpy_dtype).bits
# Specializing the function for 64 bits. Only up to 32 bits are supported on TPU,
# we thus intend to let the code throw a different exception on this platform.
max_bits = 64
assert nbits <= max_bits
double_word_reduction = nbits * 2 <= max_bits
const = lambda dtype, x: tf.constant(np.array(x), dtype)
if double_word_reduction:
word_dtype = lax._UINT_DTYPES[nbits]
double_word_dtype = lax._UINT_DTYPES[nbits * 2]
# Packs two values into a tuple.
def pack(a, b):
a = _bitcast_convert_type(a, word_dtype)
b = _bitcast_convert_type(b, word_dtype)
a = _convert_element_type(a, new_dtype=double_word_dtype)
b = _convert_element_type(b, new_dtype=double_word_dtype)
a = tf.bitwise.left_shift(a, const(double_word_dtype, nbits))
return tf.bitwise.bitwise_or(a, b)
# Unpacks the first element of a tuple.
def fst(t):
assert t.dtype == double_word_dtype
st = _shift_right_logical(t, const(double_word_dtype, nbits))
return _bitcast_convert_type(
_convert_element_type(st, new_dtype=word_dtype), dtype)
# Unpacks the second element of a tuple.
def snd(t):
return _bitcast_convert_type(
_convert_element_type(t, new_dtype=word_dtype), dtype)
else:
raise NotImplementedError(
f"TODO: need to pack {nbits * 2} bits but this platform can only go up to {max_bits} bits."
)
assert select_prim is lax.ge_p or select_prim is lax.le_p, select_prim
def reducer(x, y):
which = tf_impl[select_prim]
return tf_impl[lax.select_p](which(fst(x), fst(y)), x=x, y=y)
init = -np.inf if select_prim is lax.ge_p else np.inf
init_identity = lambda x: pack(const(dtype, init), const(dtype, 0))
out = _specialized_reduce_window(
reducer,
init_identity,
pack(operand, tangents),
window_dimensions=window_dimensions,
window_strides=window_strides,
padding=padding,
base_dilation=base_dilation,
window_dilation=window_dilation,
_in_avals=_in_avals,
_out_aval=_out_aval)
return snd(out)
tf_impl_with_avals[lax.select_and_gather_add_p] = _select_and_gather_add
def _get_shape_from_tensor_or_array(x):
if isinstance(x.shape, tf.TensorShape):
return tuple(x.shape.as_list())
return tuple(x.shape)
def _common_reduce_window(operand, init_val, reducer, window_dimensions,
window_strides, padding, base_dilation,
window_dilation, _in_avals, _out_aval):
o_spec = tf.TensorSpec((), dtype=operand.dtype)
reducer_fn = tf.function(
reducer, autograph=False).get_concrete_function(o_spec, o_spec)
if not isinstance(init_val, tf.Tensor):
assert not config.jax_enable_checks or _is_tfval(
init_val), f"Non TfVal: {init_val}"
init_val = tf.constant(init_val, operand.dtype)
out = tfxla.reduce_window(
operand,
init_val,
reducer_fn,
window_dimensions,
window_strides,
base_dilations=base_dilation,
window_dilations=window_dilation,
padding=padding)
# TODO: implement shape inference for XlaReduceWindow
out.set_shape(_aval_to_tf_shape(_out_aval))
return out
def _reduce_window(operand, init_value, *, jaxpr, consts, window_dimensions,
window_strides, padding, base_dilation, window_dilation,
_in_avals, _out_aval):
"""TensorFlow implementation of reduce_window.
Args:
operand: N dimensional array containing elements of type T
init_value: starting value of the reduction
jaxpr: the jaxpr corresponding to the reduction function
consts: the constants associated with jaxpr.
window_dimensions: array of integers for window dimension values
window_strides: array of integers for window stride values
padding: array of pairs of integers for padding values
base_dilation: array of integers for base dilation values
window_dilation: array of integers for window dilation values
Returns:
The reduced operand.
"""
assert len(consts) == 0, "Reduction computation cannot have constants"
if not _enable_xla:
raise _xla_disabled_error("reduce_window")
def reducer(arg1: TfVal, arg2: TfVal) -> TfVal:
closed_jaxpr = core.ClosedJaxpr(jaxpr, consts)
res, = _interpret_jaxpr(closed_jaxpr, arg1, arg2)
return res
return _common_reduce_window(operand, init_value, reducer, window_dimensions,
window_strides, padding, base_dilation,
window_dilation, _in_avals, _out_aval)
# _try_tf_pool currently only supports reduce_window_max and reduce_window_sum.
# TODO(bchetioui): this function is not exhaustive wrt which
# reduce_window_max or reduce_window_sum cases can be translated into a call to
# max_pool or avg_pool. Further investigation is needed to fully flesh it out.
def _try_tf_pool(op_name, operand, window_dimensions, window_strides, padding,
base_dilation, window_dilation) -> TfVal:
def error(msg):
suffix = ("See source code for the precise conditions under which "
"reduce_window can be converted without XLA.")
return _xla_disabled_error("reduce_window", f"{msg} - {suffix}")
dtype = operand.dtype
# Contrarily to the main path, tf.int8 is actually a valid type for
# tf.nn.max_pool.
if op_name == "reduce_window_max" and dtype in [
tf.bool, tf.uint32, tf.uint64, tf.complex64, tf.complex128
]:
raise error(f"tf.nn.max_pool does not support operands of type {dtype}")
if op_name == "reduce_window_sum" and operand.dtype not in [
tf.float16, tf.float32, tf.float64
]:
raise error(f"tf.nn.avg_pool does not support operands of type {dtype}")
has_batch_dim = window_dimensions[0] == 1
has_channel_dim = window_dimensions[-1] == 1
nb_spatial_dimensions = len(operand.shape) - has_batch_dim - has_channel_dim
if nb_spatial_dimensions < 1 or nb_spatial_dimensions > 3:
raise error("TensorFlow can only handle pooling for arrays with 1, 2, or "
"3 spatial dimensions")
# TODO(bchetioui): does a simple conversion with another base dilation exist?
if list(base_dilation) != [1] * len(operand.shape):
raise error("Unimplemented support for base dilation")
# TODO(bchetioui): does a simple conversion with another window_dilation
# exist? The whole story seems similar to convolution.
if list(window_dilation) != [1] * len(operand.shape):
raise error("Unimplemented support for window dilation")
if list(padding) != [(0, 0)] * len(operand.shape):
raise error("Unimplemented support for padding")
# ReduceWindow in XLA takes an array of rank N as a parameter, but
# tf.nn.max_pool / tf.nn.avg_pool take an array of rank N+2, with a default
# shape of the form [batch_size] + input_spatial_shape + [num_channels]
tf_operand = operand
tf_window_dimensions = list(window_dimensions)
tf_window_strides = list(window_strides)
if not has_batch_dim:
tf_operand = tf.expand_dims(tf_operand, 0)
tf_window_dimensions = [1] + tf_window_dimensions
tf_window_strides = [1] + tf_window_strides
if not has_channel_dim:
tf_operand = tf.expand_dims(tf_operand, -1)
tf_window_dimensions.append(1)
tf_window_strides.append(1)
tf_data_format = "N" + "DHW"[-nb_spatial_dimensions:] + "C"
tf_padding = "VALID"
if op_name == "reduce_window_max":
result = tf.nn.max_pool(tf_operand, tf_window_dimensions, tf_window_strides,
tf_padding, tf_data_format)
elif op_name == "reduce_window_sum":
avg = tf.nn.avg_pool(tf_operand, tf_window_dimensions, tf_window_strides,
tf_padding, tf_data_format)
result = avg * np.prod(tf_window_dimensions)
else:
raise error(f"Unimplemented support for {op_name}")
if not has_batch_dim:
result = tf.squeeze(result, 0)
if not has_channel_dim:
result = tf.squeeze(result, -1)
return result
def _specialized_reduce_window(reducer,
identity,
operand,
*,
window_dimensions,
window_strides,
padding,
base_dilation,
window_dilation,
_in_avals,
_out_aval,
name=None):
"""Wraps the TensorFlow reduce window operation based on a reducer and an
identity function defining the initial value of the reduction depending on
the dtype of the operand.
Args:
reducer: reduction function of type TfVal -> TfVal -> TfVal
identity: function that takes a TensorFlow dtype as a parameter and returns
the starting value of the reduction.
operand: N dimensional array containing elements of type T
window_dimensions: array of integers for window dimension values
window_strides: array of integers for window stride values
padding: array of pairs of integers for padding values
base_dilation: array of integers for base dilation values
window_dilation: array of integers for window dilation values
name: the name of the specialized reduce window primitive for which this
conversion function is called. This information may help to choose a
different conversion path (optional)
Returns:
The reduced operand.
"""
if not _enable_xla and name in ["reduce_window_max", "reduce_window_sum"]:
return _try_tf_pool(name, operand, window_dimensions, window_strides,
padding, base_dilation, window_dilation)
return _common_reduce_window(operand, identity(operand.dtype), reducer,
window_dimensions, window_strides, padding,
base_dilation, window_dilation, _in_avals,
_out_aval)
def _get_max_identity(tf_dtype):
numpy_tf_dtype = tf_dtype.as_numpy_dtype
if tf_dtype == tf.bfloat16 or dtypes.issubdtype(numpy_tf_dtype, np.inexact):
return numpy_tf_dtype(-np.inf)
elif dtypes.issubdtype(numpy_tf_dtype, np.integer):
return dtypes.iinfo(numpy_tf_dtype).min
else:
assert dtypes.issubdtype(
numpy_tf_dtype, np.bool_), (f"{tf_dtype} has no defined max identity")
return False
def _get_min_identity(tf_dtype):
numpy_tf_dtype = tf_dtype.as_numpy_dtype
if tf_dtype == tf.bfloat16 or dtypes.issubdtype(numpy_tf_dtype, np.inexact):
return numpy_tf_dtype(np.inf)
elif dtypes.issubdtype(numpy_tf_dtype, np.integer):
return dtypes.iinfo(numpy_tf_dtype).max
else:
assert dtypes.issubdtype(
numpy_tf_dtype, np.bool_), (f"{tf_dtype} has no defined min identity")
return True
# pylint: disable=protected-access
tf_impl_with_avals[lax.reduce_window_sum_p] = (
functools.partial(
_specialized_reduce_window, _add, lambda x: 0,
name="reduce_window_sum"))
tf_impl_with_avals[lax.reduce_window_min_p] = (
functools.partial(
_specialized_reduce_window,
tf.math.minimum,
_get_min_identity,
name="reduce_window_min"))
tf_impl_with_avals[lax.reduce_window_max_p] = (
functools.partial(
_specialized_reduce_window,
tf.math.maximum,
_get_max_identity,
name="reduce_window_max"))
tf_impl_with_avals[lax.reduce_window_p] = _reduce_window
# pylint: enable=protected-access
# We use lax_control_flow._cumred_tpu_translation_rule to convert cummax,
# cummin, cumsum and cumprod. This is efficient on TPU, but the complexity is
# O(n^2) on other backends. This may be implemented using associative_scan
# instead to favor different backends.
tf_impl_with_avals[lax_control_flow.cummin_p] = _convert_jax_impl(
functools.partial(lax_control_flow._cumred_tpu_translation_rule,
lax._reduce_window_min),
multiple_results=False)
tf_impl_with_avals[lax_control_flow.cummax_p] = _convert_jax_impl(
functools.partial(lax_control_flow._cumred_tpu_translation_rule,
lax._reduce_window_max),
multiple_results=False)
# TODO(bchetioui): cumsum and cumprod can be converted using pure TF ops for
# certain dtypes: bfloat16, float16, float32, float64, and int32. Other dtypes
# will fail when running in compiled mode, but are otherwise compatible with
# the operation. A non-XLA path can thus be defined for all dtypes, though the
# tests will crash.
tf_impl_with_avals[lax_control_flow.cumsum_p] = _convert_jax_impl(
functools.partial(lax_control_flow._cumred_tpu_translation_rule,
lax._reduce_window_sum),
multiple_results=False)
tf_impl_with_avals[lax_control_flow.cumprod_p] = _convert_jax_impl(
functools.partial(lax_control_flow._cumred_tpu_translation_rule,
lax._reduce_window_prod),
multiple_results=False)
def _select_and_scatter(operand, source, init_value, select_jaxpr,
select_consts, scatter_jaxpr, scatter_consts,
window_dimensions, window_strides, padding):
raise NotImplementedError("TODO: jax2tf can not convert _select_and_scatter")
tf_impl[lax.select_and_scatter_p] = _select_and_scatter
@functools.partial(bool_to_int8, argnums=(0, 1))
def _select_and_scatter_add(source, operand, *, select_prim, window_dimensions,
window_strides, padding, _in_avals, _out_aval):
if not _enable_xla:
raise _xla_disabled_error("select_and_scatter_add")
init_value = tf.zeros((), operand.dtype)
select_fn = (
tf.function(tf_impl[select_prim], autograph=False).get_concrete_function(
init_value, init_value))
scatter_fn = _add_fn.get_concrete_function(init_value, init_value)
out = tfxla.select_and_scatter(operand, window_dimensions, window_strides,
padding, source, init_value, select_fn,
scatter_fn)
out.set_shape(_aval_to_tf_shape(_out_aval))
return out
tf_impl_with_avals[lax.select_and_scatter_add_p] = _select_and_scatter_add
def _threefry2x32_jax_impl(*args: TfVal, _in_avals, _out_aval):
res = _convert_jax_impl(
functools.partial(
jax._src.random._threefry2x32_lowering, use_rolled_loops=False),
multiple_results=True)(
*args, _in_avals=_in_avals, _out_aval=_out_aval)
return res
tf_impl_with_avals[jax.random.threefry2x32_p] = _threefry2x32_jax_impl
# Use the vmap implementation, otherwise on TPU the performance is really bad
# With use_vmap=True on, we get about the same performance for JAX and jax2tf.
tf_impl_with_avals[random.random_gamma_p] = _convert_jax_impl(
functools.partial(jax._src.random._gamma_impl, use_vmap=True),
multiple_results=False)
def _gather_dimensions_proto(indices_shape, dimension_numbers):
proto = xla_data_pb2.GatherDimensionNumbers()
proto.offset_dims.extend(dimension_numbers.offset_dims)
proto.collapsed_slice_dims.extend(dimension_numbers.collapsed_slice_dims)
proto.start_index_map.extend(dimension_numbers.start_index_map)
assert indices_shape
proto.index_vector_dim = len(indices_shape) - 1
return proto
@functools.partial(bool_to_int8, argnums=0)
def _gather(operand, start_indices, *, dimension_numbers, slice_sizes,
_in_avals, _out_aval):
"""Tensorflow implementation of gather."""
del _in_avals
if not _enable_xla:
raise _xla_disabled_error("gather")
proto = _gather_dimensions_proto(start_indices.shape, dimension_numbers)
slice_sizes_tf = _eval_shape(slice_sizes)
out = tfxla.gather(operand, start_indices, proto, slice_sizes_tf, False)
out.set_shape(_aval_to_tf_shape(_out_aval))
return out
tf_impl_with_avals[lax.gather_p] = _gather
def _slice(operand, start_indices, limit_indices, strides, _in_avals,
_out_aval):
if strides is None:
strides = [1] * len(start_indices)
slices = tuple(
map(slice, _eval_shape(start_indices), _eval_shape(limit_indices),
_eval_shape(strides)))
out = operand[slices]
# TODO(b/184503314): improve shape inference for __getitem__
out.set_shape(_aval_to_tf_shape(_out_aval))
return out
tf_impl_with_avals[lax.slice_p] = _slice
def _dynamic_slice(operand, *start_indices, slice_sizes,
_in_avals: Sequence[core.ShapedArray],
_out_aval: core.ShapedArray):
# Here we could use tf.slice. Similarly, for lax.gather we can sometimes use
# tf.gather. But those have different semantics for index-out-of-bounds than
# JAX (and XLA). We have tried to force compilation, by wrapping into
# tf.xla.experimental.compile, or tf.function(jit_compile=True), but
# those solutions are brittle because they do not work when nested into an
# outer compilation (see b/162814494 and b/163006262). They also do not
# survive well being put in a SavedModel. Hence, we now use TFXLA slicing
# and gather ops.
if not _enable_xla:
raise _xla_disabled_error("dynamic_slice")
res = tfxla.dynamic_slice(
operand, tf.stack(start_indices), size_indices=_eval_shape(slice_sizes))
# TODO: implement shape inference for XlaDynamicSlice
res.set_shape(_aval_to_tf_shape(_out_aval))
return res
tf_impl_with_avals[lax.dynamic_slice_p] = _dynamic_slice
def _scatter_dimensions_proto(indices_shape, dimension_numbers):
proto = xla_data_pb2.ScatterDimensionNumbers()
proto.update_window_dims.extend(dimension_numbers.update_window_dims)
proto.inserted_window_dims.extend(dimension_numbers.inserted_window_dims)
proto.scatter_dims_to_operand_dims.extend(
dimension_numbers.scatter_dims_to_operand_dims)
assert indices_shape
proto.index_vector_dim = len(indices_shape) - 1
return proto
def _scatter(operand, scatter_indices, updates, *, update_jaxpr, update_consts,
dimension_numbers, indices_are_sorted, unique_indices,
_in_avals: Sequence[core.AbstractValue],
_out_aval: core.AbstractValue):
del unique_indices, _in_avals
assert len(update_consts) == 0, "Update computation cannot have constants"
if not _enable_xla:
raise _xla_disabled_error("scatter")
proto = _scatter_dimensions_proto(scatter_indices.shape, dimension_numbers)
def update_computation(arg1: TfVal, arg2: TfVal) -> TfVal:
closed_jaxpr = core.ClosedJaxpr(update_jaxpr, update_consts)
res, = _interpret_jaxpr(closed_jaxpr, arg1, arg2)
return res
o_spec = tf.TensorSpec((), dtype=operand.dtype)
xla_update_computation = (
tf.function(update_computation,
autograph=False).get_concrete_function(o_spec, o_spec))
out = tfxla.scatter(
operand,
scatter_indices,
updates,
xla_update_computation,
proto,
indices_are_sorted=indices_are_sorted)
# TODO: implement shape analysis for XlaScatter
out.set_shape(_aval_to_tf_shape(_out_aval))
return out
tf_impl_with_avals[lax.scatter_p] = _scatter
tf_impl_with_avals[lax.scatter_min_p] = _scatter
tf_impl_with_avals[lax.scatter_max_p] = _scatter
tf_impl_with_avals[lax.scatter_mul_p] = _scatter
tf_impl_with_avals[lax.scatter_add_p] = _scatter
def _dynamic_update_slice(operand, update, *start_indices):
if not _enable_xla:
raise _xla_disabled_error("dynamic_update_slice")
return tfxla.dynamic_update_slice(operand, update, tf.stack(start_indices))
tf_impl[lax.dynamic_update_slice_p] = _dynamic_update_slice
def _cond(index: TfVal, *operands: TfVal, branches: Sequence[core.ClosedJaxpr],
linear: Sequence[bool]) -> Sequence[TfVal]:
del linear
# tf.cond needs lambdas with no arguments.
branches_tf = [
functools.partial(_interpret_jaxpr, jaxpr, *operands)
for jaxpr in branches
]
return tf.switch_case(index, branches_tf)
tf_impl[lax_control_flow.cond_p] = _cond
def _while(*args: TfVal, cond_nconsts: int, cond_jaxpr: core.ClosedJaxpr,
body_nconsts: int, body_jaxpr: core.ClosedJaxpr) -> Sequence[TfVal]:
cond_consts, body_consts, init_carry = util.split_list(
args, [cond_nconsts, body_nconsts])
if cond_jaxpr.out_avals[0].shape: # type: ignore[attr-defined]
# The conditional is not a scalar, this must be a batched while
return _batched_cond_while(
*args,
cond_nconsts=cond_nconsts,
cond_jaxpr=cond_jaxpr,
body_nconsts=body_nconsts,
body_jaxpr=body_jaxpr)
# The conditional must return a single value to TF
def cond_tf_func(*args: TfVal) -> TfVal:
pred, = _interpret_jaxpr(cond_jaxpr, *cond_consts, *args)
return pred
body_tf_func = functools.partial(_interpret_jaxpr, body_jaxpr, *body_consts)
return tf.while_loop(cond_tf_func, body_tf_func, init_carry)
def _batched_cond_while(*args: TfVal, cond_nconsts: int,
cond_jaxpr: core.ClosedJaxpr, body_nconsts: int,
body_jaxpr: core.ClosedJaxpr) -> Sequence[TfVal]:
"""Interprets a while_loop with a batched condition.
A batched while has a conditional that returns a tensor of booleans, and
a body that returns a list of tensors whose leading dimensions match those
of the conditional tensor.
We need to turn it into a while with scalar boolean conditional. We will
expand the loop carry to include a prefix with the current tensor boolean
condition. We prepend to the loop the first calculation of the tensor boolean
condition. The loop condition will use a "reduce_any" to calculate a scalar
boolean from the tensor boolean condition. The end of the loop body will
compute the new carry using a "tf.where", and we compute the new tensor
boolean condition.
"""
cond_consts, body_consts, init_carry = util.split_list(
args, [cond_nconsts, body_nconsts])
# Initial computation of batched condition
init_pred_b, = _interpret_jaxpr(cond_jaxpr, *cond_consts, *init_carry)
assert init_pred_b is not core.unit
def new_cond_tf_func(pred_b: TfVal, *carry: TfVal) -> TfVal:
pred = tf.reduce_any(pred_b, axis=list(range(len(pred_b.shape))))
return pred
def new_body_tf_func(pred_b: TfVal, *carry: TfVal) -> Sequence[TfVal]:
new_carry: Sequence[TfVal] = _interpret_jaxpr(body_jaxpr, *body_consts,
*carry)
def select_one_carry(new_c: TfVal, c: TfVal) -> TfVal:
pred_b_bcast = _broadcast_in_dim(
pred_b,
shape=new_c.shape,
broadcast_dimensions=list(range(len(pred_b.shape))))
return tf.where(pred_b_bcast, new_c, c)
selected_carry: Sequence[TfVal] = list(
util.safe_map(select_one_carry, new_carry, carry))
next_pred_b, = _interpret_jaxpr(cond_jaxpr, *cond_consts, *selected_carry)
return (next_pred_b, *selected_carry)
_, *res_carry = tf.while_loop(new_cond_tf_func, new_body_tf_func,
(init_pred_b, *init_carry))
return res_carry
tf_impl[lax_control_flow.while_p] = _while
# We use the scan impl rule to rewrite in terms of while.
tf_impl_with_avals[lax_control_flow.scan_p] = _convert_jax_impl(
lax_control_flow._scan_impl)
def _top_k(operand: TfVal, k: int) -> Tuple[TfVal, TfVal]:
# Some types originally incompatible with tf.math.top_k can be promoted
# to a compatible type without loss of precision.
def promote_tf_dtype(tf_dtype):
if tf_dtype in [tf.bool, tf.uint8, tf.uint16]:
return tf.uint32
if tf_dtype in [tf.int8, tf.int16]:
return tf.int32
if tf_dtype is tf.float16:
return tf.float32
return None
conversion_dtype = promote_tf_dtype(operand.dtype)
if conversion_dtype:
values, indices = tf.math.top_k(
tf.dtypes.cast(operand, conversion_dtype), k=k, sorted=True)
return tf.dtypes.cast(values, operand.dtype), indices
else:
return tf.math.top_k(operand, k=k, sorted=True)
tf_impl[lax.top_k_p] = _top_k
def _sort(*operands: TfVal, dimension: int, is_stable: bool,
num_keys: int) -> Tuple[TfVal, ...]:
if not _enable_xla:
raise _xla_disabled_error("sort")
assert 1 <= num_keys <= len(operands)
assert 0 <= dimension < len(
operands[0].shape
), f"Invalid {dimension} for ndim {len(operands[0].shape)}"
# The comparator is a 2N-argument TF function, with arguments [2k] and [2k +1]
# corresponding to two scalars from operand[k].
def lexicographic_comparator_old(*tf_args: TfVal) -> TfVal:
assert len(tf_args) == 2 * len(operands)
# We build a comparison:
# arg[0] < arg[1] or (arg[0] == arg[1] and (arg[2] < arg[3] or ...))
# all the way to arg[2 * num_keys - 2] < arg[2 * num_keys - 1]
inside_comparison = None
for key_idx in range(num_keys - 1, -1, -1):
a = tf_args[2 * key_idx]
b = tf_args[2 * key_idx + 1]
a_lt_b = tf.math.less(a, b)
if inside_comparison is None:
inside_comparison = a_lt_b
else:
inside_comparison = tf.math.logical_or(
a_lt_b, tf.math.logical_and(tf.math.equal(a, b), inside_comparison))
return inside_comparison
comparator_spec: List[tf.TensorSpec] = []
comparator_jax_in_avals: List[core.AbstractValue] = []
for op in operands:
o_spec = tf.TensorSpec((), dtype=op.dtype)
comparator_spec.extend([o_spec, o_spec])
o_aval = core.ShapedArray((), to_jax_dtype(op.dtype))
comparator_jax_in_avals.extend([o_aval, o_aval])
# Use the same comparator that JAX uses when compiling to XLA, to get the
# proper NaN/Inf total order, and the lexicographic ordering.
# The comparator is a 2N-argument TF function, with arguments [2k] and [2k +1]
# corresponding to two scalars from operand[k].
def lexicographic_comparator(*tf_args: TfVal) -> TfVal:
return _convert_jax_impl(
lax._sort_lt_comparator, multiple_results=False)(
*tf_args,
_in_avals=comparator_jax_in_avals,
_out_aval=core.ShapedArray((), np.bool_),
num_keys=num_keys)
xla_comparator_computation = (
tf.function(lexicographic_comparator,
autograph=False).get_concrete_function(*comparator_spec))
results = tfxla.variadic_sort(
operands,
dimension=dimension,
is_stable=is_stable,
comparator=xla_comparator_computation)
return results
tf_impl[lax.sort_p] = _sort
def _fft(x, fft_type, fft_lengths):
FFT, IFFT, RFFT, IRFFT = list(map(xla_client.FftType, [0, 1, 2, 3]))
if fft_type == IRFFT:
expected_lengths = x.shape[-len(fft_lengths):-1] + ((x.shape[-1] - 1) * 2,)
else:
expected_lengths = x.shape[-len(fft_lengths):]
if expected_lengths != fft_lengths:
raise NotImplementedError(
f"Unsupported fft_lengths={fft_lengths} for fft_type={fft_type} of "
f"array with shape={x.shape}.")
tf_funcs = {
FFT: [tf.signal.fft, tf.signal.fft2d, tf.signal.fft3d],
IFFT: [tf.signal.ifft, tf.signal.ifft2d, tf.signal.ifft3d],
RFFT: [tf.signal.rfft, tf.signal.rfft2d, tf.signal.rfft3d],
IRFFT: [tf.signal.irfft, tf.signal.irfft2d, tf.signal.irfft3d]
}
return tf_funcs[fft_type][len(fft_lengths) - 1](x)
tf_impl[lax_fft.fft_p] = _fft
def _qr(operand, full_matrices):
return tf.linalg.qr(operand, full_matrices=full_matrices)
tf_impl[lax_linalg.qr_p] = _qr
def _svd(operand, full_matrices, compute_uv):
result = tf.linalg.svd(operand, full_matrices, compute_uv)
if not compute_uv:
return result,
s, u, v = result
return s, u, tf.linalg.adjoint(v)
tf_impl[lax_linalg.svd_p] = _svd
def _eig(operand: TfVal, compute_left_eigenvectors: bool,
compute_right_eigenvectors: bool):
if compute_left_eigenvectors and compute_right_eigenvectors:
# TODO(bchetioui): didn't find a 100% reliable, easy and satisfying way to
# sort the left eigenvectors in the right order. The jax.numpy.linalg API
# suggests to me that left eigenvectors are anyway seldom used, so I
# think it is acceptable to leave as unimplemented for now.
msg = ("Conversion of eig is not implemented when both "
"compute_left_eigenvectors and compute_right_eigenvectors are set "
"to True.")
raise NotImplementedError(msg)
elif not (compute_left_eigenvectors or compute_right_eigenvectors):
return tuple([tf.linalg.eigvals(operand)])
elif compute_right_eigenvectors:
return tuple(tf.linalg.eig(operand))
else: # compute_left_eigenvectors == True
wH, vl = tf.linalg.eig(tf.linalg.adjoint(operand))
wHH = tf.math.conj(wH)
return tuple([wHH, vl])
tf_impl[lax_linalg.eig_p] = _eig
def _eigh(operand: TfVal, lower: bool, _in_avals, _out_aval):
if operand.shape[-1] == 0:
v, w = operand, tf.reshape(operand, _eval_shape(_in_avals[0].shape[:-1]))
else:
if not lower:
operand = tf.linalg.adjoint(operand)
w, v = tf.linalg.eigh(operand)
cast_type = {
tf.complex64: tf.float32,
tf.complex128: tf.float64
}.get(operand.dtype)
if cast_type is not None:
w = tf.cast(w, cast_type)
return v, w
tf_impl_with_avals[lax_linalg.eigh_p] = _eigh
def _lu(operand: TfVal, _in_avals, _out_aval):
return _convert_jax_impl(lax_linalg._lu_python)(
operand, _in_avals=_in_avals, _out_aval=_out_aval)
tf_impl_with_avals[lax_linalg.lu_p] = _lu
def _triangular_solve(a: TfVal, b: TfVal, *, left_side: bool, lower: bool,
transpose_a: bool, conjugate_a: bool, unit_diagonal: bool,
_in_avals: Sequence[core.ShapedArray],
_out_aval: core.ShapedArray):
if unit_diagonal:
a_aval, _ = _in_avals
a_shape = _eval_shape(a_aval.shape)
a = tf.linalg.set_diag(a, tf.ones(a_shape[:-1], dtype=a.dtype))
if not left_side:
rank = len(a.shape)
transpose_dimensions = list(range(rank - 2)) + [rank - 1, rank - 2]
a = tf.transpose(a, transpose_dimensions)
b = tf.transpose(b, transpose_dimensions)
lower = not lower
# adjoint == transpose for real dtypes, so special care need only be taken
# for complex types.
if a.dtype in [tf.complex64, tf.complex128]:
if (transpose_a and not conjugate_a) or (not transpose_a and conjugate_a):
a = tf.math.conj(a)
result = tf.linalg.triangular_solve(a, b, lower=lower, adjoint=transpose_a)
if not left_side:
result = tf.transpose(result, transpose_dimensions)
return result
tf_impl_with_avals[lax_linalg.triangular_solve_p] = _triangular_solve
def _linear_solve(*args: TfVal, const_lengths, jaxprs, _in_avals, _out_aval):
return _convert_jax_impl(lax_control_flow._custom_linear_solve_impl)(
*args,
const_lengths=const_lengths,
jaxprs=jaxprs,
_in_avals=_in_avals,
_out_aval=_out_aval)
tf_impl_with_avals[lax_control_flow.linear_solve_p] = _linear_solve
def _custom_jvp_call_jaxpr(*args: TfVal, fun_jaxpr: core.ClosedJaxpr,
jvp_jaxpr_thunk: Callable,
num_consts: int) -> Sequence[TfVal]:
# TODO(necula): ensure that there is no AD transformation in scope
return _interpret_jaxpr(fun_jaxpr, *args)
tf_impl[custom_derivatives.custom_jvp_call_jaxpr_p] = _custom_jvp_call_jaxpr
def _custom_vjp_call_jaxpr(*args: TfVal, fun_jaxpr: core.ClosedJaxpr,
**_) -> Sequence[TfVal]:
# TODO(necula): ensure that there is no AD transformation in scope
return _interpret_jaxpr(fun_jaxpr, *args)
tf_impl[custom_derivatives.custom_vjp_call_jaxpr_p] = _custom_vjp_call_jaxpr
def _custom_lin(*args: TfVal, **_) -> Sequence[TfVal]:
raise TypeError("can't apply forward-mode autodiff (jvp) to a custom_vjp "
"function.")
tf_impl[ad.custom_lin_p] = _custom_lin
def split_to_logical_devices(tensor: TfVal,
partition_dimensions: pxla.PartitionsOrReplicated):
"""Like TPUMPStrategy.experimental_split_to_logical_devices.
For jax2tf purposes we want to avoid needing to thread the `strategy` object
through the generated computation. It seems that the original function needs
the strategy object only for error checking, which we assume is done upstream
by JAX.
Args:
tensor: Input tensor to annotate.
partition_dimensions: A list of integers, with one integer per tensor
dimension, specifying in how many parts the dimension should be split. The
product of integers must equal the number of devices per replica.
use_sharding_op: whether to use a sharding op, or not.
Returns:
an annotated tensor.
"""
# This corresponds to the sharding annotations in
# xla_bridge._sharding_to_proto.
if partition_dimensions is None:
return xla_sharding.replicate(tensor, use_sharding_op=True)
num_partition_splits = np.prod(partition_dimensions)
tile_assignment = np.arange(num_partition_splits).reshape(
partition_dimensions)
return xla_sharding.tile(tensor, tile_assignment, use_sharding_op=True)
def _sharded_call(f: lu.WrappedFun, vals: Sequence[TfVal],
in_parts: Sequence[pxla.PartitionsOrReplicated],
out_parts_thunk,
**_) -> Sequence[Tuple[TfVal, core.AbstractValue]]:
sharded_vals = util.safe_map(split_to_logical_devices, vals, in_parts)
vals_out = f.call_wrapped(*sharded_vals) # caller handles new_sublevel
out_parts_flat = out_parts_thunk()
assert len(out_parts_flat) == len(
vals_out), f"expected {len(out_parts_flat)} == {len(vals_out)}"
sharded_vals_out = [
(split_to_logical_devices(val, val_part), val_aval)
for (val, val_aval), val_part in util.safe_zip(vals_out, out_parts_flat)
]
return sharded_vals_out
def _sharding_constraint(arg: TfVal, *,
partitions: pxla.PartitionsOrReplicated):
return split_to_logical_devices(arg, partitions)
tf_impl[sharded_jit.sharding_constraint_p] = _sharding_constraint
def _register_checkpoint_pytrees():
"""Registers TF custom container types as pytrees."""
m = tf.Module()
# The types here are automagically changed by TensorFlow's checkpointing
# infrastructure.
m.a = (tf.Module(), tf.Module())
m.b = [tf.Module(), tf.Module()]
m.c = {"a": tf.Module()}
tuple_wrapper = type(m.a)
list_wrapper = type(m.b)
dict_wrapper = type(m.c)
# TF AutoTrackable swaps container types out for wrappers.
assert tuple_wrapper is not tuple
assert list_wrapper is not list
assert dict_wrapper is not dict
jax.tree_util.register_pytree_node(tuple_wrapper, lambda xs:
(tuple(xs), None), lambda _, xs: tuple(xs))
jax.tree_util.register_pytree_node(list_wrapper, lambda xs: (tuple(xs), None),
lambda _, xs: list(xs))
jax.tree_util.register_pytree_node(
dict_wrapper, lambda s: (tuple(s.values()), tuple(s.keys())),
lambda k, xs: dict(zip(k, xs)))
_register_checkpoint_pytrees()
| 38.809375
| 133
| 0.704374
|
import functools
import re
import string
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import jax
from jax import ad_util, api_util, config
from jax._src import api
from jax import core, custom_derivatives, dtypes
from jax import linear_util as lu
from jax import numpy as jnp
from jax import random, tree_util
from jax._src import util
from jax._src.lax import control_flow as lax_control_flow
from jax._src.lax import fft as lax_fft
from jax._src.lax import lax
from jax._src.lax import linalg as lax_linalg
import jax._src.random
from jax.api_util import flatten_fun
from jax.interpreters import ad
from jax.interpreters import pxla
from jax.interpreters import sharded_jit
from jax.interpreters import xla
from jax.lib import xla_client
from . import shape_poly
import numpy as np
import tensorflow as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.compiler.tf2xla.python import xla as tfxla # type: ignore[import]
from tensorflow.compiler.xla import xla_data_pb2 # type: ignore[import]
from tensorflow.compiler.xla.experimental.xla_sharding import xla_sharding # type: ignore[import]
# pylint: enable=g-direct-tensorflow-import
PolyShape = shape_poly.PolyShape
# The scope name need to be a valid TensorFlow name. See
# https://github.com/tensorflow/tensorflow/blob/r2.3/tensorflow/core/framework/node_def_util.cc#L731
_VALID_SCOPE_REGEX = re.compile("^[A-Za-z0-9.][A-Za-z0-9_.\\/>-]*$")
_INVALID_SCOPE_CHAR = re.compile("[^A-Za-z0-9_.\\/>-]")
def _sanitize_scope_name(name):
scope_name = _INVALID_SCOPE_CHAR.sub("_", name)
if not _VALID_SCOPE_REGEX.match(scope_name):
scope_name = ".{}".format(scope_name)
return scope_name
# A value suitable in a TF tracing context: tf.Tensor, tf.Variable,
# or Python scalar or numpy.ndarray. (A tf.EagerTensor is a tf.Tensor.)
TfVal = Any
DType = Any
PrecisionType = int # Enum xla_data.PrecisionConfig.Precision
def _is_tfval(v: TfVal) -> bool:
if isinstance(v, (tf.Tensor, tf.Variable)):
return True
try:
# Note: this conversion is overkill and just intended as a type check; this
# code is in principle only run if config.jax_enable_checks is True.
# TODO: it is not true that this code is run only with jax_enable_checks.
_safe_convert_to_tensor(v)
return True
except ValueError:
return False
def _safe_convert_to_tensor(val, dtype=None) -> TfVal:
dtype = dtype if dtype else (val.dtype if hasattr(val, "dtype") else None)
conversion_type = to_tf_dtype(dtype) if dtype else None
# The float0 type is not known to TF.
if dtype and dtype == dtypes.float0:
val = np.zeros(np.shape(val), conversion_type.as_numpy_dtype)
return tf.convert_to_tensor(val, dtype=conversion_type)
# The implementation rules for primitives. The rule will be called with the
# arguments (TfVal) and must return TfVal (or a sequence thereof,
# if primitive.multiple_results). The vast majority of primitives do not need
# to worry about core.unit inputs or results. The exception are primarily the
# control-flow primitives.
tf_impl: Dict[core.Primitive, Callable[..., Any]] = {}
# Some primitive implementation rules need the abstract values of arguments
# and the results. This is the case for the primitives implemented using
# _convert_jax_impl and those that need to adjust the shape of the outputs
# due to missing TF shape inference rules for TFXLA ops. The rules for these
# primitives should be added to `tf_impl_with_avals`.
# The abstract value are passed to the implementation as two special kwargs
# `_in_avals` (a tuple of core.AbstractValue) and `_out_aval` (a
# core.AbstractValue, or a tuple thereof when primitive.multiple_results).
tf_impl_with_avals: Dict[core.Primitive, Callable[..., Any]] = {}
# XLA is not linked in all environments; when converting a primitive, if this
# variable is disabled, we try harder to use only standard TF ops if they are
# applicable to the concrete use case; if the resulting conversion path ends up
# requiring a TFXLA operation, an exception is thrown instead.
_enable_xla = True
def _xla_disabled_error(primitive_name: str,
extra_msg: Optional[str] = None) -> Exception:
assert not _enable_xla
msg = f"Call to {primitive_name} cannot be converted with enable_xla=False."
if extra_msg:
msg += f" {extra_msg}"
return NotImplementedError(msg)
@functools.partial(api_util.api_hook, tag="jax2tf_convert")
def convert(fun: Callable,
*,
polymorphic_shapes: Optional[Sequence[Any]] = None,
with_gradient=True,
enable_xla=True) -> Callable:
api._check_callable(fun)
def converted_fun(*args: TfVal, **kwargs: TfVal) -> TfVal:
# TODO: is there a better way to check if we are inside a transformation?
if not core.trace_state_clean():
raise ValueError("convert must be used outside all JAX transformations." +
f"Trace state: {core.thread_local_state.trace_state}")
def check_arg(a):
if not _is_tfval(a):
msg = (f"Argument {a} of type {type(a)} of jax2tf.convert(f) should "
"be NumPy array, scalar, tf.Variable, or tf.Tensor")
raise TypeError(msg)
tree_util.tree_map(check_arg, args)
tree_util.tree_map(check_arg, list(kwargs.values()))
# Name input tensors
args = tuple(
tree_util.tree_map(lambda x, i=i: tf.identity(x, f"jax2tf_arg_{i}"),
a) # type: ignore
for i, a in enumerate(args))
kwargs = {k: tf.identity(v, f"jax2tf_arg_{k}") for k, v in kwargs.items()}
# This function may take pytrees of TfVals. We can only set
# tf.custom_gradient on functions that take a flat argument list.
args_flat, in_tree = tree_util.tree_flatten((args, kwargs))
if polymorphic_shapes is None:
polymorphic_shapes_ = (None,) * len(args)
else:
if not isinstance(polymorphic_shapes, Sequence) or len(args) != len(polymorphic_shapes):
msg = ("polymorphic_shapes must be a sequence with the same length as the positional argument list "
f"({len(args)}). Got polymorphic_shapes={polymorphic_shapes}.")
raise TypeError(msg)
polymorphic_shapes_ = tuple(polymorphic_shapes)
# Expand the polymorphic_shapes to match the argument pytree
polymorphic_shapes_flat = tuple(api_util.flatten_axes("jax2tf.convert polymorphic_shapes",
in_tree.children()[0],
polymorphic_shapes_))
# Add kwargs shapes.
polymorphic_shapes_flat = polymorphic_shapes_flat + tuple(
(None,) * (len(args_flat) - len(polymorphic_shapes_flat)))
# Construct the abstract values for the flat arguments, possibly based on
# the input shapes and the polymorphic_shapes if given. May create new shape
# variables.
args_avals_flat, shapeenv = _args_to_avals_and_env(args_flat,
polymorphic_shapes_flat)
f = lu.wrap_init(fun)
# out_tree_thunk() will be the output tree, after running _interpret_fun.
flat_fun, out_tree_thunk = flatten_fun(f, in_tree)
# Prepare the grad_fn for tf.custom_gradient.
def converted_grad_fn(*out_cts_flat: TfVal,
_out_cts_avals: Sequence[core.AbstractValue],
variables=None):
if variables:
raise ValueError(
"Unexpected variables used in forward pass. "
"This should not happen for first-order differentiation. "
f"variables={variables}")
def fun_vjp_jax(args_jax, out_cts_jax):
# One may think that we can get the pullback while we are converting
# the main function in the first place. That is problematic, because the
# pullback may contain captured tracers from the conversion of the
# main function. Those tracers will confuse the conversion of the
# pullback. So, we construct the vjp anew.
_, pullback_jax = jax.vjp(fun, *args_jax)
return pullback_jax(out_cts_jax)
if polymorphic_shapes is None:
vjp_polymorphic_shapes = None
else:
args_polymorphic_shapes = tree_util.tree_unflatten(
in_tree.children()[0], polymorphic_shapes_flat)
out_cts_polymorphic_shapes = tree_util.tree_unflatten(
out_tree_thunk(),
tuple(str(out_aval.shape)
for out_aval in _out_cts_avals)) # type: ignore
vjp_polymorphic_shapes = [
args_polymorphic_shapes, out_cts_polymorphic_shapes
]
out_cts = tree_util.tree_unflatten(out_tree_thunk(), out_cts_flat)
# TODO: enable higher-order gradients
with tf.name_scope("jax2tf_vjp"):
in_cts = convert(
fun_vjp_jax,
with_gradient=False,
polymorphic_shapes=vjp_polymorphic_shapes)(args, out_cts)
return in_cts
try:
global _shape_env
assert not _shape_env, f"Unexpected shape environment {_shape_env}"
global _enable_xla
prev_enable_xla = _enable_xla
_enable_xla = enable_xla
_shape_env = shapeenv
if with_gradient:
@tf.custom_gradient
def converted_fun_flat_with_custom_gradient(*args_flat: TfVal) -> TfVal:
out_with_avals = _interpret_fun(flat_fun, args_flat, args_avals_flat)
outs, out_avals = util.unzip2(out_with_avals)
return (tuple(outs),
functools.partial(
converted_grad_fn, _out_cts_avals=tuple(out_avals)))
out_flat = converted_fun_flat_with_custom_gradient(*args_flat)
else:
out_flat_raw = _interpret_fun(flat_fun, args_flat, args_avals_flat)
message = ("The jax2tf-converted function does not support gradients. "
"Use `with_gradient` parameter to enable gradients")
# We use PreventGradient, which is propagated through a SavedModel.
out_flat = [
tf.raw_ops.PreventGradient(input=o, message=message)
for o, _ in out_flat_raw
]
finally:
_shape_env = {}
_enable_xla = prev_enable_xla
out_flat = [tf.identity(x, "jax2tf_out") for x in out_flat]
out = tree_util.tree_unflatten(out_tree_thunk(), out_flat)
return out
return converted_fun
# Internals
def _interpret_fun(
fun: lu.WrappedFun, in_vals: Sequence[TfVal],
in_avals: Sequence[core.AbstractValue]
) -> Sequence[Tuple[TfVal, core.AbstractValue]]:
with core.new_base_main(TensorFlowTrace) as main: # type: ignore
fun = _interpret_subtrace(fun, main, in_avals)
with core.new_sublevel():
out_vals: Sequence[Tuple[TfVal, core.AbstractValue]] = \
fun.call_wrapped(*in_vals)
del main
return tuple(out_vals)
def _convert_jax_impl(jax_impl: Callable, *, multiple_results=True) -> Callable:
def wrapped(*tf_args: TfVal, _in_avals: Sequence[core.AbstractValue],
_out_aval: core.AbstractValue, **kwargs) -> Sequence[TfVal]:
# We wrap the jax_impl under _interpret_fun to abstract the TF values
# from jax_impl and turn them into JAX abstract values.
def jax_impl_jax_args(*jax_args):
jax_results = jax_impl(*jax_args, **kwargs)
return jax_results if multiple_results else [jax_results]
tf_results_with_avals = _interpret_fun(
lu.wrap_init(jax_impl_jax_args), tf_args, _in_avals)
tf_results, _ = util.unzip2(tf_results_with_avals)
return tf_results if multiple_results else tf_results[0]
return wrapped
@lu.transformation
def _interpret_subtrace(main: core.MainTrace,
in_avals: Sequence[core.AbstractValue],
*in_vals: TfVal):
trace = TensorFlowTrace(main, core.cur_sublevel())
in_tracers = tuple(
TensorFlowTracer(trace, val, aval)
for val, aval in util.safe_zip(in_vals, in_avals))
# The outs may be core.unit, see comment in TensorFlowTrace.pure.
outs = yield in_tracers, {} # type: Sequence[Union[TfVal, core.Unit]]
out_tracers: Iterable[TensorFlowTracer] = (
map(trace.full_raise, outs)) # type: ignore
out_vals_with_avals: Sequence[Tuple[TfVal, core.AbstractValue]] = (
tuple((t.val, t.aval) for t in out_tracers))
yield out_vals_with_avals
def _interpret_jaxpr(jaxpr: core.ClosedJaxpr, *args: TfVal) -> Sequence[TfVal]:
fun: lu.WrappedFun = lu.wrap_init(core.jaxpr_as_fun(jaxpr))
out_with_avals = _interpret_fun(fun, args, jaxpr.in_avals)
return tuple(v for v, _ in out_with_avals)
### tracer
def _aval_to_tf_shape(aval: core.AbstractValue) -> Tuple[Optional[int], ...]:
return tuple(
map(lambda d: None if isinstance(d, shape_poly.DimVar) else d,
aval.shape)) # type: ignore[attr-defined]
def _tfval_shape_dtype(val: TfVal) -> Tuple[Sequence[Optional[int]], DType]:
if isinstance(val, (tf.Tensor, tf.Variable)):
# May be partially known
return tuple(val.shape), to_jax_dtype(val.dtype)
else: # Must be a numeric value
assert not config.jax_enable_checks or _is_tfval(val), f"Non TfVal: {val}"
raw_aval = xla.abstractify(val)
return raw_aval.shape, raw_aval.dtype # type: ignore[attr-defined]
# A dimension environment maps dimension variables to TF expressions that
# compute the value of the dimension. These expressions refer to the TF
# function arguments.
_ShapeEnv = Dict[shape_poly.DimVar, TfVal]
def _args_to_avals_and_env(args: Sequence[TfVal],
polymorphic_shapes: Sequence[Optional[Union[str, PolyShape]]]) -> \
Tuple[Sequence[core.AbstractValue], _ShapeEnv]:
shapeenv: _ShapeEnv = {}
def input_aval(arg: TfVal,
polymorphic_shape: Optional[str]) -> core.AbstractValue:
raw_shape, dtype = _tfval_shape_dtype(arg)
aval_shape = shape_poly.parse_spec(polymorphic_shape, raw_shape)
for i, d in enumerate(aval_shape):
if type(d) is int:
assert d == np.shape(arg)[i]
elif type(d) is shape_poly.DimVar and d not in shapeenv:
# Even if the shape of `arg` is known, we still use `tf.shape` for
# safety, because the promise is that we will convert the function
# to work for any value of the dimension.
shapeenv[d] = tf.shape(arg)[i] # type: ignore[index]
else:
# TODO: add an assertion tf.shape(arg)[i] == env[d]
pass
return core.ShapedArray(aval_shape, dtype)
avals = tuple(map(input_aval, args, polymorphic_shapes)) # type: ignore
return avals, shapeenv
# A shape environment maps shape variables to TfVal.
_shape_env = {} # type: _ShapeEnv
def _eval_shape(shape: Sequence[shape_poly.DimSize]) -> Sequence[TfVal]:
assert all(map(
lambda x: x is not None,
shape)), (f"Argument shape should be a valid JAX shape but got {shape}")
return tuple(_shape_env[d] # type: ignore[index]
if type(d) is shape_poly.DimVar else d
for d in shape)
def shape_as_value(x):
# return shape_as_value_p.bind(x)
return NotImplementedError("shape_as_value is deprecated")
# # TODO: move this to masking or to some common library, if approved
# shape_as_value_p = core.Primitive("shape_as_value")
# shape_as_value_p.multiple_results = True
# def _shape_as_value_impl(x):
# x_shape = np.shape(x)
# def dim_to_int(dim: shape_poly.DimSize) -> int:
# dim_int = _poly_dim_to_tf_dim(dim)
# if dim_int is None:
# msg = ("shape_as_value is not implemented for non-constant shapes "
# "except for masking and jax2tf. "
# f"Has shape: {x_shape}")
# raise TypeError(msg)
# else:
# return dim_int
# return tuple(map(dim_to_int, x_shape))
#
# shape_as_value_p.def_impl(_shape_as_value_impl)
#
# def _shape_as_value_abstract(x_aval: core.AbstractValue) -> Sequence[core.AbstractValue]:
# rank = len(x_aval.shape) # type: ignore[attr-defined]
# return (core.ShapedArray((), dtypes.canonicalize_dtype(np.int_), weak_type=True),) * rank
#
# shape_as_value_p.def_abstract_eval(_shape_as_value_abstract)
#
# def _shape_as_value_translation(comp, x):
# return xla_client._xla.ops.Tuple(comp,
# tuple(xb.constant(comp, d)
# for d in comp.GetShape(x).dimensions()))
#
# xla.translations[shape_as_value_p] = _shape_as_value_translation
#
# def _shape_as_value_jvp_rule(primals, tangents):
# # The shape does not depend on the contents of the input
# x, = primals
# zero = ad.Zero.from_value(0.)
# return shape_as_value(x), (zero,) * len(x.shape)
#
# ad.primitive_jvps[shape_as_value_p] = _shape_as_value_jvp_rule
#
# def _shape_as_value__batching_rule(batched_args, batch_dims):
# xv, = batched_args
# batch_dim, = batch_dims
# batch_size = xv.shape[batch_dim]
# batched_shape = shape_as_value(xv)
# one_shape = batched_shape[0:batch_dim] + batched_shape[batch_dim+1:]
# res = tuple(jnp.broadcast_to(d, (batch_size, 1)) for d in one_shape)
# return res, (0,) * len(one_shape)
#
# batching.primitive_batchers[shape_as_value_p] = _shape_as_value__batching_rule
#
# def _shape_as_value_masking_rule(operands, operands_logical_shapes):
# x_logical_shape, = operands_logical_shapes
# return tuple(x_logical_shape)
#
# masking.masking_rules[shape_as_value_p] = _shape_as_value_masking_rule
#
# def _shape_as_value_tf(x: TfVal,
# _in_avals: Sequence[core.AbstractValue],
# _out_aval: core.AbstractValue) -> TfVal:
# x_aval = _in_avals[0]
# def dim_to_tfval(dim: shape_poly.DimSize, dim_idx: int) -> TfVal:
# dim_int = _poly_dim_to_tf_dim(dim)
# if dim_int is not None:
# return tf.convert_to_tensor(dim_int)
# else:
# return tf.shape(x)[dim_idx]
# return tuple(dim_to_tfval(dim, dim_idx)
# for dim_idx, dim in enumerate(x_aval.shape)) # type: ignore[attr-defined]
#
# tf_impl_with_avals[shape_as_value_p] = _shape_as_value_tf
# TODO(b/26854495): pylint doesn't understand slots and inheritance.
class TensorFlowTracer(core.Tracer):
__slots__ = ["val", "_aval"]
def __init__(self, trace: "TensorFlowTrace", val: TfVal,
aval: core.AbstractValue):
self._trace = trace
self._aval = aval
if aval is core.abstract_unit:
self.val = val
elif isinstance(val, (tf.Tensor, tf.Variable)):
val_shape, val_dtype = _tfval_shape_dtype(val)
aval_dtype = np.dtype(self._aval.dtype)
if (val_dtype != aval_dtype and not config.x64_enabled and
(val_dtype == tf.int32 and aval_dtype == jnp.int64 or
val_dtype == tf.int64 and aval_dtype == jnp.int32 or
val_dtype == tf.float32 and aval_dtype == jnp.float64 or
val_dtype == tf.float64 and aval_dtype == jnp.float32 or
val_dtype == tf.complex128 and aval_dtype == jnp.complex64)):
val = tf.cast(val, dtype=aval_dtype)
val_dtype = aval_dtype
if config.jax_enable_checks:
assert aval_dtype == val_dtype, f"expected {aval_dtype} == {val_dtype}"
for aval_dim, val_dim in util.safe_zip(
self._aval.shape, val_shape): # type: ignore[attr-defined]
if val_dim is None:
assert isinstance(
aval_dim, shape_poly.DimVar
), f"expected {self._aval.shape} == {val_shape}" # type: ignore[attr-defined]
elif not isinstance(aval_dim, shape_poly.DimVar):
assert aval_dim == val_dim, f"expected {self._aval.shape} == {val_shape}" # type: ignore[attr-defined]
else:
# We have a TF value with known shape, and the abstract shape is a shape variable.
try:
aval_int = int(_eval_shape([aval_dim])) # type: ignore
except TypeError:
continue
assert aval_int == val_dim, f"expected {self._aval.shape} == {val_shape}. Found {aval_int} != {val_dim}." # type: ignore
self.val = val
else: # Must be a numeric value
self.val = _safe_convert_to_tensor(
val, dtype=self._aval.dtype) # type: ignore[attr-defined]
@property
def aval(self):
return self._aval
def full_lower(self):
return self
class TensorFlowTrace(core.Trace):
def pure(self, val: Union[TfVal, core.Unit]) -> TensorFlowTracer:
if val is core.unit:
return TensorFlowTracer(self, tf.constant(np.nan, tf.float32),
core.abstract_unit)
else:
shape, dtype = _tfval_shape_dtype(val)
return TensorFlowTracer(self, val, core.ShapedArray(shape, dtype))
def lift(self, val: core.Tracer) -> TensorFlowTracer:
# This would be called when we need to raise a tracer from a lower-level
# main into the TensorFlowTrace. Since the TensorFlowTrace is never nested
# inside another transform, there are no lower-level main traces.
assert False
def sublift(self, val: TensorFlowTracer) -> TensorFlowTracer:
# This is called when we need to raise a tracer from the same master,
# but a lower sublevel. This could come from a nested jit.
return TensorFlowTracer(self, val.val, val._aval)
def process_primitive(self, primitive: core.Primitive,
tracers: Sequence[TensorFlowTracer],
params) -> TensorFlowTracer:
impl, impl_needs_avals = self.get_primitive_impl(primitive)
args_avals: Sequence[core.AbstractValue] = tuple(t.aval for t in tracers)
out_aval = primitive.abstract_eval(*args_avals, **params)
args_tf: Sequence[TfVal] = [t.val for t in tracers]
if impl_needs_avals:
val_out: TfVal = impl(
*args_tf,
_in_avals=args_avals, # type: ignore
_out_aval=out_aval,
**params)
else:
val_out = impl(*args_tf, **params)
if primitive.multiple_results:
out = [
TensorFlowTracer(self, v, a)
for v, a in util.safe_zip(val_out, out_aval)
] # type: ignore
else:
out = TensorFlowTracer(self, val_out, out_aval) # type: ignore
# Check that the impl rule returned a value of expected shape and dtype
# TODO: adapt this to match polymorphic shapes
if config.jax_enable_checks:
if primitive.multiple_results:
for o, expected_aval in zip(out, out_aval): # type: ignore
assert o.aval.strip_weak_type() == expected_aval.strip_weak_type(), (
f"{primitive}: out.aval = {o.aval}; expected {expected_aval}")
else:
assert out.aval == out_aval, ( # type: ignore
f"{primitive}: out.aval = {out.aval}; expected {out_aval}"
) # type: ignore
return out # type: ignore
def process_call(self, call_primitive: core.Primitive, f: lu.WrappedFun,
tracers: Sequence[TensorFlowTracer], params):
assert call_primitive.multiple_results
vals: Sequence[TfVal] = [t.val for t in tracers]
f = _interpret_subtrace(f, self.main, tuple(t.aval for t in tracers))
with core.new_sublevel():
if call_primitive == core.named_call_p:
with tf.name_scope(_sanitize_scope_name(params["name"])):
vals_out: Sequence[Tuple[TfVal, core.AbstractValue]] = \
f.call_wrapped(*vals)
elif call_primitive == sharded_jit.sharded_call_p:
vals_out = _sharded_call(f, vals, **params)
else:
vals_out = f.call_wrapped(*vals)
return [TensorFlowTracer(self, v, a) for v, a in vals_out]
def post_process_call(self, call_primitive: core.Primitive,
out_tracers: Sequence[TensorFlowTracer], params):
# We encountered a call primitive, e.g., remat_call_p, whose result
# (out_tracers) include TensorFlowTracer that were not passed through
# its arguments (captured from the environment).
vals = tuple(t.val for t in out_tracers)
main = self.main
def todo(vals: Sequence[TfVal]):
trace = TensorFlowTrace(main, core.cur_sublevel())
return [
TensorFlowTracer(trace, v, out_tracer.aval)
for v, out_tracer in util.safe_zip(vals, out_tracers)
]
return vals, todo
def process_map(self, map_primitive, f, tracers, params):
raise NotImplementedError("process_map")
def post_process_map(self, map_primitive, out_tracers, params):
raise NotImplementedError("post_process_map")
def process_custom_jvp_call(self, prim, fun, jvp, tracers):
# Drop the custom differentiation rule and act like a call primitive. This
# behavior is desirable because jax2tf stages code out of the JAX system, so
# there are no more JAX differentiation transformations to be applied.
del jvp # Unused.
return self.process_call(core.call_p, fun, tracers, {})
def post_process_custom_jvp_call(self, out_tracers, params):
assert False # unreachable assuming jax2tf runs with clean trace state
def process_custom_vjp_call(self, prim, fun, fwd, bwd, tracers, out_trees):
# Drop the custom differentiation rule and act like a call primitive. This
# behavior is desirable because jax2tf stages code out of the JAX system, so
# there are no more JAX differentiation transformations to be applied.
del fwd, bwd, out_trees # Unused.
return self.process_call(core.call_p, fun, tracers, {})
def post_process_custom_vjp_call(self, out_tracers, params):
assert False # unreachable assuming jax2tf runs with clean trace state
def get_primitive_impl(self, p: core.Primitive) -> Tuple[Callable, bool]:
# Returns the primitive implementation and whether the implementation
# takes abstract values (see definition of tf_impl_with_avals)
try:
return tf_impl[p], False
except KeyError:
try:
return tf_impl_with_avals[p], True
except KeyError as err:
msg = "TensorFlow interpretation rule for '{}' not implemented"
raise NotImplementedError(msg.format(p)) from err
def to_tf_dtype(jax_dtype):
if jax_dtype == dtypes.float0:
jax_dtype = dtypes.bfloat16
return tf.dtypes.as_dtype(jax_dtype)
def to_jax_dtype(tf_dtype):
return tf_dtype.as_numpy_dtype
def _unexpected_primitive(p: core.Primitive, *args, **kwargs):
assert False, f"Encountered unexpected primitive {p}"
for unexpected in xla.call_translations: # Call primitives are inlined
tf_impl[unexpected] = functools.partial(_unexpected_primitive, unexpected)
# Primitives that are not yet implemented must be explicitly declared here.
tf_not_yet_impl = [
"reduce",
"rng_uniform",
"clz",
"igamma_grad_a",
"random_gamma_grad",
"reduce_precision",
# Not high priority?
"after_all",
"all_to_all",
"create_token",
"infeed",
"outfeed",
"pmax_p",
"pmin",
"ppermute",
"psum",
"pmax",
"pgather",
"axis_index",
"pdot",
"all_gather",
"lu_pivots_to_permutation",
"rng_bit_generator",
"xla_pmap",
"call_tf",
]
tf_impl[ad_util.stop_gradient_p] = tf.stop_gradient
tf_impl[ad_util.zeros_like_p] = tf.zeros_like
def _add(x: TfVal, y: TfVal) -> TfVal:
return tf.raw_ops.AddV2(x=x, y=y)
tf_impl[ad_util.add_jaxvals_p] = _add
tf_impl[xla.device_put_p] = lambda x, device=None: x
tf_impl[lax.neg_p] = tf.math.negative
def _sign(x: TfVal) -> TfVal:
if x.dtype.is_unsigned:
# TF and XLA do not support tf.math.sign for unsigned types.
return tf.where(
tf.math.equal(x, 0), np.array(0, dtype=x.dtype),
np.array(1, dtype=x.dtype))
else:
return tf.math.sign(x)
tf_impl[lax.sign_p] = _sign
tf_impl[lax.floor_p] = tf.math.floor
tf_impl[lax.ceil_p] = tf.math.ceil
def _round(operand, *, rounding_method):
if rounding_method is lax.RoundingMethod.AWAY_FROM_ZERO:
sign = _sign(operand)
operand *= sign
floor = tf.math.floor(operand)
operand -= floor
cond = tf.math.equal(operand, tf.constant(np.array(0.5), operand.dtype))
return sign * (
tf.where(cond, tf.constant(np.array(1), operand.dtype),
tf.math.round(operand)) + floor)
else:
return tf.math.round(operand)
tf_impl[lax.round_p] = _round
tf_impl[lax.nextafter_p] = tf.math.nextafter
def _population_count(x):
orig_dtype = x.dtype
return tf.cast(tf.raw_ops.PopulationCount(x=x), orig_dtype)
tf_impl[lax.population_count_p] = _population_count
tf_impl[lax.is_finite_p] = tf.math.is_finite
def _abs(x: TfVal) -> TfVal:
# TF and XLA do not support tf.math.abs for unsigned types.
return tf.math.abs(x) if not x.dtype.is_unsigned else x
tf_impl[lax.abs_p] = _abs
tf_impl[lax.pow_p] = tf.math.pow
def _integer_pow(x, *, y: int, _in_avals: Sequence[core.AbstractValue],
_out_aval: core.AbstractValue):
# Follows the implementation in lax._integer_pow_translation_rule
if y == 0:
return tf.broadcast_to(
tf.constant(1, dtype=x.dtype, shape=()), _eval_shape(_out_aval.shape))
is_reciprocal = y < 0
if is_reciprocal:
y = -y
acc = None
while y > 0:
if y & 1:
acc = x if acc is None else tf.math.multiply(acc, x)
y >>= 1
if y > 0:
x = tf.math.multiply(x, x)
return tf.math.reciprocal(acc) if is_reciprocal else acc
tf_impl_with_avals[lax.integer_pow_p] = _integer_pow
tf_impl[lax.exp_p] = tf.math.exp
tf_impl[lax.expm1_p] = tf.math.expm1
tf_impl[lax.log_p] = tf.math.log
tf_impl[lax.log1p_p] = tf.math.log1p
tf_impl[lax.tan_p] = tf.math.tan
tf_impl[lax.tanh_p] = tf.math.tanh
tf_impl[lax.sin_p] = tf.math.sin
tf_impl[lax.sinh_p] = tf.math.sinh
tf_impl[lax.cos_p] = tf.math.cos
tf_impl[lax.cosh_p] = tf.math.cosh
tf_impl[lax.acos_p] = tf.math.acos
tf_impl[lax.asin_p] = tf.math.asin
tf_impl[lax.atan_p] = tf.math.atan
tf_impl[lax.atan2_p] = tf.math.atan2
tf_impl[lax.acosh_p] = tf.math.acosh
tf_impl[lax.atanh_p] = tf.math.atanh
tf_impl[lax.asinh_p] = tf.math.asinh
tf_impl[lax.sqrt_p] = tf.math.sqrt
tf_impl[lax.rsqrt_p] = tf.math.rsqrt
tf_impl[lax.lgamma_p] = tf.math.lgamma
tf_impl[lax.digamma_p] = tf.math.digamma
tf_impl[lax.igamma_p] = tf.math.igamma
tf_impl[lax.igammac_p] = tf.math.igammac
tf_impl[lax.regularized_incomplete_beta_p] = tf.math.betainc
tf_impl[lax.erf_p] = tf.math.erf
tf_impl[lax.erfc_p] = tf.math.erfc
tf_impl[lax.erf_inv_p] = tf.math.erfinv
tf_impl[lax.bessel_i0e_p] = tf.math.bessel_i0e
tf_impl[lax.bessel_i1e_p] = tf.math.bessel_i1e
tf_impl[lax.complex_p] = tf.complex
def _conj(x, **kwargs):
# The only dtypes that are allowed are: float32, float64, complex64, and
# complex128.
if x.dtype == tf.float32:
return tf.cast(x, tf.complex64)
elif x.dtype == tf.float64:
return tf.cast(x, tf.complex128)
else:
return tf.math.conj(x)
tf_impl[lax.conj_p] = _conj
tf_impl[lax.real_p] = tf.math.real
tf_impl[lax.imag_p] = tf.math.imag
tf_impl[lax.add_p] = _add
tf_impl[lax.sub_p] = tf.math.subtract
tf_impl[lax.mul_p] = tf.math.multiply
def _iota(*, dtype, shape, dimension):
dtype = to_tf_dtype(dtype)
# Some dtypes are unsupported, like uint32, so we just fall back to int32.
# TODO(mattjj, necula): improve tf.range dtype handling
shape_tf = _eval_shape(shape)
vec = tf.range(tf.cast(shape_tf[dimension], tf.int32), dtype=tf.int32)
vec_shape = [-1 if i == dimension else 1 for i in range(len(shape))]
return tf.cast(tf.broadcast_to(tf.reshape(vec, vec_shape), shape_tf), dtype)
tf_impl[lax.iota_p] = _iota
def _div(lhs, rhs):
if lhs.dtype.is_integer:
quotient = tf.math.floordiv(lhs, rhs)
select = tf.math.logical_and(
tf.not_equal(_sign(lhs), _sign(rhs)),
tf.not_equal(tf.math.floormod(lhs, rhs), 0))
return tf.where(select, quotient + 1, quotient)
else:
return tf.math.truediv(lhs, rhs)
def _rem(lhs, rhs):
return _sign(lhs) * tf.math.floormod(_abs(lhs), _abs(rhs))
tf_impl[lax.div_p] = _div
tf_impl[lax.rem_p] = _rem
tf_impl[lax.max_p] = tf.math.maximum
tf_impl[lax.min_p] = tf.math.minimum
# Map from TF signed types to TF unsigned types.
_SIGNED_TO_UNSIGNED_TABLE = {
tf.int8: tf.uint8,
tf.int16: tf.uint16,
tf.int32: tf.uint32,
tf.int64: tf.uint64,
}
# Map from TF unsigned types to TF signed types.
_UNSIGNED_TO_SIGNED_TABLE = {u: s for s, u in _SIGNED_TO_UNSIGNED_TABLE.items()}
# Note: Bitwise operations only yield identical results on unsigned integers!
# pylint: disable=protected-access
def _shift_right_arithmetic_raw(x, y):
if x.dtype.is_unsigned:
assert x.dtype == y.dtype
orig_dtype = x.dtype
signed_dtype = _UNSIGNED_TO_SIGNED_TABLE[orig_dtype]
x = tf.cast(x, signed_dtype)
y = tf.cast(y, signed_dtype)
res = tf.bitwise.right_shift(x, y)
return tf.cast(res, orig_dtype)
else:
return tf.bitwise.right_shift(x, y)
def _shift_right_arithmetic(x, y):
# TF shift is "implementation defined" if the shift amount is negative
# or larger or equal to the size of the value. We implement the XLA
# semantics to return the shift by the max value (x_bits - 1).
# TODO: it is likely better to add XlaOps for shifts
x_bits = 8 * x.dtype.size
clamp_y = tf.where(_shift_in_bounds(x, y), y, x_bits - 1)
return _shift_right_arithmetic_raw(x, clamp_y)
tf_impl[lax.shift_right_arithmetic_p] = _shift_right_arithmetic
def _shift_right_logical_raw(x, y):
if x.dtype.is_unsigned:
return tf.bitwise.right_shift(x, y)
else:
assert x.dtype == y.dtype
orig_dtype = x.dtype
unsigned_dtype = _SIGNED_TO_UNSIGNED_TABLE[orig_dtype]
x = tf.cast(x, unsigned_dtype)
y = tf.cast(y, unsigned_dtype)
res = tf.bitwise.right_shift(x, y)
return tf.cast(res, orig_dtype)
def _shift_right_logical(x, y):
# TF shift is "implementation defined" if the shift amount is negative
# or larger or equal to the size of the value. We implement the XLA semantics
# to return 0.
# TODO: it is likely better to add XlaOps for shifts
return tf.where(
_shift_in_bounds(x, y), _shift_right_logical_raw(x, y), tf.zeros_like(x))
tf_impl[lax.shift_right_logical_p] = _shift_right_logical
def _shift_left(x, y):
# TF shift is "implementation defined" if the shift amount is negative
# or larger or equal to the size of the value. We implement the XLA semantics
# to return 0.
# TODO: it is likely better to add XlaOps for shifts
return tf.where(
_shift_in_bounds(x, y), tf.bitwise.left_shift(x, y), tf.zeros_like(x))
tf_impl[lax.shift_left_p] = _shift_left
def _shift_in_bounds(x: TfVal, y: TfVal) -> TfVal:
# Return the TF expression for when y is within bounds (0 <= y < |x|)
x_bits = 8 * x.dtype.size
# TF does not have comparisons for uint16 and uint32 (despite what the
# documentation says)
y_comp = tf.cast(
y, _UNSIGNED_TO_SIGNED_TABLE[y.dtype]) if y.dtype.is_unsigned else y
y_lt_x_bits = tf.math.less(y_comp, x_bits)
y_ge_0 = tf.math.greater_equal(y_comp, 0)
return tf.logical_and(y_lt_x_bits, y_ge_0)
def _not(x):
if x.dtype == tf.bool:
return tf.logical_not(x)
else:
return tf.bitwise.invert(x)
tf_impl[lax.not_p] = _not
def bool_to_int8(f, argnums):
argnums = tf.nest.flatten(argnums)
def wrapper(*args, **kwargs):
if not any(args[i].dtype == tf.bool for i in argnums):
return f(*args, **kwargs)
else:
args_cast = [(tf.cast(a, tf.int8) if i in argnums else a)
for i, a in enumerate(args)]
if "_in_avals" in kwargs:
def cast_aval(aval):
return core.ShapedArray(aval.shape, np.int8)
_in_avals_cast = [
cast_aval(aval) if i in argnums else aval
for i, aval in enumerate(kwargs["_in_avals"])
]
_out_aval_cast = tf.nest.map_structure(cast_aval, kwargs["_out_aval"])
kwargs = dict(
kwargs, _in_avals=_in_avals_cast, _out_aval=_out_aval_cast)
out = f(*args_cast, **kwargs)
return tf.nest.map_structure(lambda o: tf.cast(o, tf.bool), out)
return wrapper
tf_impl[lax.or_p] = bool_to_int8(tf.bitwise.bitwise_or, argnums=(0, 1))
tf_impl[lax.and_p] = bool_to_int8(tf.bitwise.bitwise_and, argnums=(0, 1))
tf_impl[lax.xor_p] = bool_to_int8(tf.bitwise.bitwise_xor, argnums=(0, 1))
tf_impl[lax.eq_p] = tf.math.equal
tf_impl[lax.ne_p] = tf.math.not_equal
tf_impl[lax.ge_p] = tf.math.greater_equal
tf_impl[lax.gt_p] = tf.math.greater
tf_impl[lax.le_p] = tf.math.less_equal
tf_impl[lax.lt_p] = tf.math.less
tf_impl[lax_linalg.cholesky_p] = tf.linalg.cholesky
def _convert_element_type(operand, *, new_dtype, weak_type=False):
old_dtype = operand.dtype.as_numpy_dtype
if (dtypes.issubdtype(old_dtype, np.complexfloating) and
not dtypes.issubdtype(new_dtype, np.complexfloating)):
operand = tf.math.real(operand)
if (dtypes.issubdtype(old_dtype, np.floating) and
not (dtypes.issubdtype(new_dtype, np.floating) or dtypes.issubdtype(
new_dtype, np.complexfloating) or new_dtype == np.bool_)):
sign = _sign(operand)
operand = sign * tf.math.floor(sign * operand)
return tf.dtypes.cast(operand, to_tf_dtype(new_dtype))
tf_impl[lax.convert_element_type_p] = _convert_element_type
def _bitcast_convert_type(operand, new_dtype):
return tf.bitcast(operand, to_tf_dtype(new_dtype))
tf_impl[lax.bitcast_convert_type_p] = _bitcast_convert_type
def _clamp(minval, operand, maxval, *, _in_avals, _out_aval):
# The below permits mirroring the behavior of JAX when maxval < minval
op_shape_tf_val = _eval_shape(_in_avals[1].shape)
maxval = tf.broadcast_to(maxval, op_shape_tf_val)
minval = tf.math.minimum(tf.broadcast_to(minval, op_shape_tf_val), maxval)
return tf.clip_by_value(operand, minval, maxval)
tf_impl_with_avals[lax.clamp_p] = _clamp
def _concatenate(*operands, dimension):
return tf.concat(operands, axis=dimension)
tf_impl[lax.concatenate_p] = _concatenate
def _conv_general_dimension_numbers_proto(dimension_numbers):
assert isinstance(dimension_numbers, lax.ConvDimensionNumbers)
lhs_spec, rhs_spec, out_spec = dimension_numbers
proto = xla_data_pb2.ConvolutionDimensionNumbers()
proto.input_batch_dimension = lhs_spec[0]
proto.input_feature_dimension = lhs_spec[1]
proto.output_batch_dimension = out_spec[0]
proto.output_feature_dimension = out_spec[1]
proto.kernel_output_feature_dimension = rhs_spec[0]
proto.kernel_input_feature_dimension = rhs_spec[1]
proto.input_spatial_dimensions.extend(lhs_spec[2:])
proto.kernel_spatial_dimensions.extend(rhs_spec[2:])
proto.output_spatial_dimensions.extend(out_spec[2:])
return proto
def _precision_config_proto(precision: Optional[Tuple[PrecisionType,
PrecisionType]]):
if precision is None:
return None
proto = xla_data_pb2.PrecisionConfig()
proto.operand_precision.append(int(precision[0]))
proto.operand_precision.append(int(precision[1]))
return proto
def _try_tf_conv(lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count, batch_group_count,
preferred_element_type: Optional[DType],
out_shape) -> TfVal:
def error(msg):
suffix = ("See source code for the precise conditions under which "
"convolutions can be converted without XLA.")
return _xla_disabled_error("conv_general_dilated", f"{msg} - {suffix}")
# TODO(bchetioui): this function is not exhaustive wrt which convolution cases
# can be translated into TF primitives. Further investigation is needed to
# fully flesh it out.
if lhs.dtype not in [tf.float16, tf.float32, tf.float64]:
raise error(f"tf.nn.convolution is not supported for dtype {lhs.dtype}")
if feature_group_count != 1:
raise error("tf.nn.convolution does not support grouped convolutions")
# TODO(bchetioui): is there something to do with batch_group_count?
if batch_group_count != 1:
raise error("Unimplemented support for batch_group_count != 1")
nb_spatial_dimensions = len(lhs.shape) - 2
# TF can only deal with 1D, 2D and 3D convolution
if nb_spatial_dimensions < 1 or nb_spatial_dimensions > 3:
raise error("TensorFlow can only handle convolutions with 1, 2, or 3 "
"spatial dimensions")
# TODO(bchetioui): handle different stride cases
if list(window_strides) != [1] * nb_spatial_dimensions:
raise error("Unimplemented support for window_strides != "
f"{tuple([1] * nb_spatial_dimensions)}")
if preferred_element_type is not None and preferred_element_type != lhs.dtype:
raise error("Unimplemented support for preferred_element_type")
def convert_padding() -> str:
# TODO(bchetioui): in this instance, we can not use padtype_to_pads as
# string padding is not implemented for transposed convolution.
if list(lhs_dilation) != [1] * nb_spatial_dimensions:
raise error("Padding conversion is not supported for transposed "
"convolution.")
lhs_perm, rhs_perm, _ = dimension_numbers
effective_rhs_shape = [
(k - 1) * r + 1
for k, r in zip(np.take(rhs.shape, rhs_perm)[2:], rhs_dilation)
]
lhs_shape = np.take(lhs.shape, lhs_perm)[2:]
# TF only allows 'VALID' and 'SAME' padding
for pad_str in ["VALID", "SAME"]:
gen_padding = lax.padtype_to_pads(
lhs_shape, effective_rhs_shape, window_strides, pad_str)
if list(gen_padding) == list(padding):
return pad_str
raise error("Input padding not supported in TensorFlow.")
def convert_dim_nums() -> str:
lhs_spec, rhs_spec, out_spec = dimension_numbers
# TF only allows filters with shape:
# spatial_filter_shape + [in_channels, out_channels]. In JAX however,
# rhs_spec is represented as a tuple containing the following:
# [out_channels, in_channels] + spatial_filter_shape.
supported_rhs_shape = ([nb_spatial_dimensions + 1, nb_spatial_dimensions] +
list(range(nb_spatial_dimensions)))
if list(rhs_spec) != supported_rhs_shape:
raise error("Input filter (RHS) shape format not supported in "
"TensorFlow.")
# TF only supports same LHS and output data format
if lhs_spec != out_spec:
raise error("TensorFlow requires the same data format for LHS and "
"output.")
# Alphabet extracted from the documentation of tf.conv{1,2,3}d
spatial_dim_alphabet = "DHW"[-nb_spatial_dimensions:]
# TF only supports the following data formats:
# - [batch_size, in_channels] + input_spatial_shape
# TODO(bchetioui): TF currently does not support the above on CPU. To avoid
# failing on this platform, this path is commented out for now.
# if list(lhs_spec) == list(range(len(lhs_spec))):
# return "NC" + spatial_dim_alphabet
# - [batch_size] + input_spatial_shape + [in_channels]
if list(lhs_spec) == ([0, len(lhs_spec) - 1] +
list(range(1,
len(lhs_spec) - 1))):
return "N" + spatial_dim_alphabet + "C"
raise error("Data format is unsupported by TensorFlow.")
def convert_dilation_and_compute_result(tf_padding: str,
tf_dim_nums: str) -> TfVal:
no_dilation = [1] * nb_spatial_dimensions
# TODO(bchetioui): is there a generic way to do a transposed atrous
# convolution in TensorFlow?
if not (list(lhs_dilation) == no_dilation or
list(rhs_dilation) == no_dilation):
raise error("Both LHS and RHS dilations are set.")
# This is a non-dilated or atrous convolution
if list(lhs_dilation) == no_dilation:
return tf.nn.convolution(
lhs,
rhs,
strides=window_strides,
padding=tf_padding,
data_format=tf_dim_nums,
dilations=rhs_dilation)
# TODO(bchetioui): the below path is unreachable for now, as passing a lhs
# dilation to this function will result in convert_padding returning None
# systematically. This must be investigated further.
# Dilation of the LHS is transposed convolution
return tf.nn.conv_transpose(
lhs,
rhs,
out_shape,
window_strides,
padding=tf_padding,
data_format=tf_dim_nums,
dilations=lhs_dilation)
tf_padding = convert_padding()
tf_dim_nums = convert_dim_nums()
return convert_dilation_and_compute_result(tf_padding, tf_dim_nums)
def _conv_general_dilated(lhs, rhs, *,
window_strides, padding, lhs_dilation,
rhs_dilation,
dimension_numbers: lax.ConvDimensionNumbers,
feature_group_count: int,
batch_group_count: int,
lhs_shape: Sequence[int],
rhs_shape: Sequence[int],
precision: Optional[Tuple[PrecisionType, PrecisionType]],
preferred_element_type: Optional[DType],
_in_avals: Sequence[core.AbstractValue],
_out_aval: core.AbstractValue):
out_tf_shape = _aval_to_tf_shape(_out_aval)
if not _enable_xla:
return _try_tf_conv(
lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count, batch_group_count,
preferred_element_type, out_tf_shape)
dnums_proto = _conv_general_dimension_numbers_proto(dimension_numbers)
precision_config_proto = _precision_config_proto(precision)
assert batch_group_count == 1 # TODO(necula): implement batch_group_count
def gen_conv(lhs, rhs, preferred_element_type: Optional[DType]):
out = tfxla.conv(
lhs,
rhs,
window_strides,
padding,
lhs_dilation,
rhs_dilation,
dnums_proto,
feature_group_count=feature_group_count,
precision_config=precision_config_proto,
preferred_element_type=preferred_element_type)
# TODO: implement shape inference for XlaConv
out.set_shape(out_tf_shape)
return out
# Follow the lowering for complex convolutions from
# lax._conv_general_dilated_translation. We can use the same conversion on all
# platforms because on XLA:TPU the compiler does the same as a rewrite.
if np.issubdtype(_in_avals[0].dtype, np.complexfloating):
if preferred_element_type is not None:
# Convert complex dtype to types used for real and imaginary parts
assert np.issubdtype(preferred_element_type, np.complexfloating)
preferred_float_et = (
np.float64 if preferred_element_type == np.complex128 else np.float32)
else:
preferred_float_et = None
lhs_real, lhs_imag = tf.math.real(lhs), tf.math.imag(lhs)
rhs_real, rhs_imag = tf.math.real(rhs), tf.math.imag(rhs)
k1 = gen_conv(_add(lhs_real, lhs_imag), rhs_real, preferred_float_et)
k2 = gen_conv(lhs_real, tf.math.subtract(rhs_imag, rhs_real),
preferred_float_et)
k3 = gen_conv(lhs_imag, _add(rhs_real, rhs_imag), preferred_float_et)
return tf.complex(tf.math.subtract(k1, k3), _add(k1, k2))
else:
return gen_conv(lhs, rhs, preferred_element_type)
tf_impl_with_avals[lax.conv_general_dilated_p] = _conv_general_dilated
def _dot_general(lhs, rhs, *, dimension_numbers,
precision: Optional[Tuple[PrecisionType, PrecisionType]],
preferred_element_type: Optional[DType],
_in_avals: Sequence[core.AbstractValue],
_out_aval: core.AbstractValue):
(lhs_contracting, rhs_contracting), (lhs_batch, rhs_batch) = dimension_numbers
lhs_ndim, rhs_ndim = len(lhs.shape), len(rhs.shape)
if _enable_xla:
dnums_proto = xla_data_pb2.DotDimensionNumbers()
dnums_proto.lhs_contracting_dimensions.extend(lhs_contracting)
dnums_proto.rhs_contracting_dimensions.extend(rhs_contracting)
dnums_proto.lhs_batch_dimensions.extend(lhs_batch)
dnums_proto.rhs_batch_dimensions.extend(rhs_batch)
precision_config_proto = _precision_config_proto(precision)
res = tfxla.dot_general(
lhs,
rhs,
dnums_proto,
precision_config_proto,
preferred_element_type=preferred_element_type)
# TODO: in presence of None dimensions, XlaDot shape inference returns
# unknown shape.
res.set_shape(_aval_to_tf_shape(_out_aval))
return res
# This condition ensures that:
# 1) the batch dimensions are ordered in the same way in lhs and rhs (this is
# not strictly necessary, but we would have to reshape the array if that
# were not the case;
# 2) lhs and rhs have the same number of dimensions +/- 1
# 3) the number of non-batch dimensions in both tensors is either 1 or 2
# 4) the contracting dimensions are consistent with those of a classic
# matrix/matrix, vector/matrix or matrix/vector multiplication.
if (lhs_batch == rhs_batch == tuple(range(len(lhs_batch))) and
lhs_ndim - rhs_ndim in [-1, 0, 1] and
1 <= lhs_ndim - len(lhs_batch) <= 2 and
1 <= rhs_ndim - len(rhs_batch) <= 2 and
lhs_contracting == (len(lhs.shape) - 1,) and
rhs_contracting == (len(lhs_batch),)):
# All the inputs to tf.linalg.matmul must have 2 inner dimensions,
# after their batch dimensions, so we need to expand the dimensions
# appropriately. We can get to this branch with three combinations of
# inner shapes:
# - lhs.inner_shape == [a, b], rhs.inner_shape == [b, c]
# - in this case, the resulting inner shape is [a, c];
# - lhs.inner_shape == [b] , rhs.inner_shape == [b, c]
# - in this case, we need to expand lhs to [1, b], and the resulting
# shape is [c]. We need to squeeze the result of tf.linalg.matmul
# as it will have shape [1, c];
# - lhs.shape == [batch] + [a, b], rhs.shape == [batch] + [b]
# - in this case, we need to expand rhs to [b, 1], and the resulting
# shape is [a]. We need to squeeze the result of tf.linalg.matmul
# as it will have shape [a, 1];
# - lhs.shape == [batch] + [b] , rhs.shape == [batch] + [b]
# - in this case, we need to expand lhs to [1, b] and rhs to [b, 1],
# and the resulting shape is (). We need to squeeze the result of
# tf.linalg.matmul as it will have shape [1, 1].
squeeze_idxs = []
if lhs_ndim - len(lhs_batch) == 1:
lhs = tf.expand_dims(lhs, lhs_ndim - 1)
squeeze_idxs.append(len(lhs.shape) - 2)
if rhs_ndim - len(rhs_batch) == 1:
rhs = tf.expand_dims(rhs, rhs_ndim)
squeeze_idxs.append(len(rhs.shape) - 1)
result = tf.linalg.matmul(lhs, rhs)
if len(squeeze_idxs) != 0:
assert all([result.shape[i] == 1 for i in squeeze_idxs])
result = tf.squeeze(result, squeeze_idxs)
return result
new_id = iter(string.ascii_letters)
lhs_axis_ids = [next(new_id) for _ in lhs.shape]
rhs_axis_ids = [next(new_id) for _ in rhs.shape]
lhs_out_axis_ids = lhs_axis_ids[:]
rhs_out_axis_ids = rhs_axis_ids[:]
for lhs_axis, rhs_axis in zip(lhs_contracting, rhs_contracting):
shared_id = next(new_id)
lhs_axis_ids[lhs_axis] = shared_id
rhs_axis_ids[rhs_axis] = shared_id
lhs_out_axis_ids[lhs_axis] = None # type: ignore[call-overload]
rhs_out_axis_ids[rhs_axis] = None # type: ignore[call-overload]
batch_ids = []
for lhs_axis, rhs_axis in zip(lhs_batch, rhs_batch):
shared_id = next(new_id)
lhs_axis_ids[lhs_axis] = shared_id
rhs_axis_ids[rhs_axis] = shared_id
lhs_out_axis_ids[lhs_axis] = None # type: ignore[call-overload]
rhs_out_axis_ids[rhs_axis] = None # type: ignore[call-overload]
batch_ids.append(shared_id)
not_none = lambda x: x is not None
out_axis_ids = list(
filter(not_none, batch_ids + lhs_out_axis_ids + rhs_out_axis_ids))
assert lhs.dtype == rhs.dtype
spec = "{},{}->{}".format("".join(lhs_axis_ids), "".join(rhs_axis_ids),
"".join(out_axis_ids))
return tf.linalg.einsum(spec, lhs, rhs)
tf_impl_with_avals[lax.dot_general_p] = _dot_general
def _broadcast(operand, *, sizes):
result_shape = tf.TensorShape(sizes).concatenate(operand.shape)
return tf.broadcast_to(operand, result_shape)
tf_impl[lax.broadcast_p] = _broadcast
def _broadcast_in_dim(operand, *, shape, broadcast_dimensions):
inshape = [1] * len(shape)
for orig_shape_i, broadcast_dim_i in zip(operand.shape, broadcast_dimensions):
if orig_shape_i != 1:
inshape[broadcast_dim_i] = shape[broadcast_dim_i]
inshape_tf = _eval_shape(inshape)
shape_tf = _eval_shape(shape)
return tf.broadcast_to(tf.reshape(operand, inshape_tf), shape_tf)
tf_impl[lax.broadcast_in_dim_p] = _broadcast_in_dim
def _reshape(operand, *, new_sizes, dimensions):
if dimensions is None:
dimensions = tf.range(tf.rank(operand))
new_sizes_tf = _eval_shape(new_sizes)
return tf.reshape(tf.transpose(operand, dimensions), new_sizes_tf)
tf_impl[lax.reshape_p] = _reshape
def _squeeze(operand, *, dimensions, _in_avals, _out_aval):
op_shape = _in_avals[0].shape
new_shape = tuple(d for i, d in enumerate(op_shape) if i not in dimensions)
new_shape_tf = _eval_shape(new_shape)
return tf.reshape(operand, new_shape_tf)
tf_impl_with_avals[lax.squeeze_p] = _squeeze
def _pad(operand, padding_value, *, padding_config,
_in_avals: Sequence[core.AbstractValue],
_out_aval: core.AbstractValue):
del _in_avals
low, high, interior = util.unzip3(padding_config)
if _enable_xla:
out = tfxla.pad(operand, padding_value, low, high, interior)
return out
if all(lo >= 0 and hi >= 0 and i == 0 for lo, hi, i in padding_config):
return tf.pad(
operand,
util.safe_zip(low, high),
mode="CONSTANT",
constant_values=padding_value)
raise _xla_disabled_error("pad", "Only use cases without interior or negative padding can be converted without XLA.")
tf_impl_with_avals[lax.pad_p] = _pad
def _rev(operand, *, dimensions):
return tf.reverse(operand, dimensions)
tf_impl[lax.rev_p] = _rev
tf_impl[lax.select_p] = tf.where
def _transpose(operand, *, permutation):
return tf.transpose(operand, perm=permutation)
tf_impl[lax.transpose_p] = _transpose
axes_to_axis = lambda func: lambda operand, axes: func(operand, axis=axes)
tf_impl[lax.reduce_sum_p] = (
bool_to_int8(axes_to_axis(tf.reduce_sum), argnums=0))
tf_impl[lax.reduce_prod_p] = (
bool_to_int8(axes_to_axis(tf.reduce_prod), argnums=0))
tf_impl[lax.reduce_max_p] = (
bool_to_int8(axes_to_axis(tf.reduce_max), argnums=0))
tf_impl[lax.reduce_min_p] = (
bool_to_int8(axes_to_axis(tf.reduce_min), argnums=0))
tf_impl[lax.reduce_or_p] = axes_to_axis(tf.reduce_any)
tf_impl[lax.reduce_and_p] = axes_to_axis(tf.reduce_all)
def _argminmax(fn, operand, axes, index_dtype):
axis, = axes
output_type = tf.int32
if dtypes.iinfo(index_dtype).bits > 32:
output_type = tf.int64
# TODO(phawkins): handle axes larger than 2^31.
result = fn(operand, axis=axis, output_type=output_type)
return tf.cast(result, to_tf_dtype(index_dtype))
tf_impl[lax.argmin_p] = functools.partial(_argminmax, tf.math.argmin)
tf_impl[lax.argmax_p] = functools.partial(_argminmax, tf.math.argmax)
_add_fn = tf.function(_add, autograph=False)
_ge_fn = tf.function(tf.math.greater_equal, autograph=False)
def _select_and_gather_add(
tangents: TfVal, operand: TfVal, select_prim: core.Primitive,
window_dimensions: Sequence[int], window_strides: Sequence[int],
base_dilation: Sequence[int], window_dilation: Sequence[int],
padding: Sequence[Tuple[int, int]], _in_avals: Sequence[core.AbstractValue],
_out_aval: core.AbstractValue):
# Note: this function follows the pattern in
# jax.lax._select_and_gather_add_translation.
dtype = operand.dtype
nbits = dtypes.finfo(dtype.as_numpy_dtype).bits
# Specializing the function for 64 bits. Only up to 32 bits are supported on TPU,
# we thus intend to let the code throw a different exception on this platform.
max_bits = 64
assert nbits <= max_bits
double_word_reduction = nbits * 2 <= max_bits
const = lambda dtype, x: tf.constant(np.array(x), dtype)
if double_word_reduction:
word_dtype = lax._UINT_DTYPES[nbits]
double_word_dtype = lax._UINT_DTYPES[nbits * 2]
# Packs two values into a tuple.
def pack(a, b):
a = _bitcast_convert_type(a, word_dtype)
b = _bitcast_convert_type(b, word_dtype)
a = _convert_element_type(a, new_dtype=double_word_dtype)
b = _convert_element_type(b, new_dtype=double_word_dtype)
a = tf.bitwise.left_shift(a, const(double_word_dtype, nbits))
return tf.bitwise.bitwise_or(a, b)
# Unpacks the first element of a tuple.
def fst(t):
assert t.dtype == double_word_dtype
st = _shift_right_logical(t, const(double_word_dtype, nbits))
return _bitcast_convert_type(
_convert_element_type(st, new_dtype=word_dtype), dtype)
# Unpacks the second element of a tuple.
def snd(t):
return _bitcast_convert_type(
_convert_element_type(t, new_dtype=word_dtype), dtype)
else:
raise NotImplementedError(
f"TODO: need to pack {nbits * 2} bits but this platform can only go up to {max_bits} bits."
)
assert select_prim is lax.ge_p or select_prim is lax.le_p, select_prim
def reducer(x, y):
which = tf_impl[select_prim]
return tf_impl[lax.select_p](which(fst(x), fst(y)), x=x, y=y)
init = -np.inf if select_prim is lax.ge_p else np.inf
init_identity = lambda x: pack(const(dtype, init), const(dtype, 0))
out = _specialized_reduce_window(
reducer,
init_identity,
pack(operand, tangents),
window_dimensions=window_dimensions,
window_strides=window_strides,
padding=padding,
base_dilation=base_dilation,
window_dilation=window_dilation,
_in_avals=_in_avals,
_out_aval=_out_aval)
return snd(out)
tf_impl_with_avals[lax.select_and_gather_add_p] = _select_and_gather_add
def _get_shape_from_tensor_or_array(x):
if isinstance(x.shape, tf.TensorShape):
return tuple(x.shape.as_list())
return tuple(x.shape)
def _common_reduce_window(operand, init_val, reducer, window_dimensions,
window_strides, padding, base_dilation,
window_dilation, _in_avals, _out_aval):
o_spec = tf.TensorSpec((), dtype=operand.dtype)
reducer_fn = tf.function(
reducer, autograph=False).get_concrete_function(o_spec, o_spec)
if not isinstance(init_val, tf.Tensor):
assert not config.jax_enable_checks or _is_tfval(
init_val), f"Non TfVal: {init_val}"
init_val = tf.constant(init_val, operand.dtype)
out = tfxla.reduce_window(
operand,
init_val,
reducer_fn,
window_dimensions,
window_strides,
base_dilations=base_dilation,
window_dilations=window_dilation,
padding=padding)
# TODO: implement shape inference for XlaReduceWindow
out.set_shape(_aval_to_tf_shape(_out_aval))
return out
def _reduce_window(operand, init_value, *, jaxpr, consts, window_dimensions,
window_strides, padding, base_dilation, window_dilation,
_in_avals, _out_aval):
assert len(consts) == 0, "Reduction computation cannot have constants"
if not _enable_xla:
raise _xla_disabled_error("reduce_window")
def reducer(arg1: TfVal, arg2: TfVal) -> TfVal:
closed_jaxpr = core.ClosedJaxpr(jaxpr, consts)
res, = _interpret_jaxpr(closed_jaxpr, arg1, arg2)
return res
return _common_reduce_window(operand, init_value, reducer, window_dimensions,
window_strides, padding, base_dilation,
window_dilation, _in_avals, _out_aval)
# _try_tf_pool currently only supports reduce_window_max and reduce_window_sum.
# TODO(bchetioui): this function is not exhaustive wrt which
# reduce_window_max or reduce_window_sum cases can be translated into a call to
# max_pool or avg_pool. Further investigation is needed to fully flesh it out.
def _try_tf_pool(op_name, operand, window_dimensions, window_strides, padding,
base_dilation, window_dilation) -> TfVal:
def error(msg):
suffix = ("See source code for the precise conditions under which "
"reduce_window can be converted without XLA.")
return _xla_disabled_error("reduce_window", f"{msg} - {suffix}")
dtype = operand.dtype
# Contrarily to the main path, tf.int8 is actually a valid type for
# tf.nn.max_pool.
if op_name == "reduce_window_max" and dtype in [
tf.bool, tf.uint32, tf.uint64, tf.complex64, tf.complex128
]:
raise error(f"tf.nn.max_pool does not support operands of type {dtype}")
if op_name == "reduce_window_sum" and operand.dtype not in [
tf.float16, tf.float32, tf.float64
]:
raise error(f"tf.nn.avg_pool does not support operands of type {dtype}")
has_batch_dim = window_dimensions[0] == 1
has_channel_dim = window_dimensions[-1] == 1
nb_spatial_dimensions = len(operand.shape) - has_batch_dim - has_channel_dim
if nb_spatial_dimensions < 1 or nb_spatial_dimensions > 3:
raise error("TensorFlow can only handle pooling for arrays with 1, 2, or "
"3 spatial dimensions")
# TODO(bchetioui): does a simple conversion with another base dilation exist?
if list(base_dilation) != [1] * len(operand.shape):
raise error("Unimplemented support for base dilation")
# TODO(bchetioui): does a simple conversion with another window_dilation
# exist? The whole story seems similar to convolution.
if list(window_dilation) != [1] * len(operand.shape):
raise error("Unimplemented support for window dilation")
if list(padding) != [(0, 0)] * len(operand.shape):
raise error("Unimplemented support for padding")
# ReduceWindow in XLA takes an array of rank N as a parameter, but
# tf.nn.max_pool / tf.nn.avg_pool take an array of rank N+2, with a default
# shape of the form [batch_size] + input_spatial_shape + [num_channels]
tf_operand = operand
tf_window_dimensions = list(window_dimensions)
tf_window_strides = list(window_strides)
if not has_batch_dim:
tf_operand = tf.expand_dims(tf_operand, 0)
tf_window_dimensions = [1] + tf_window_dimensions
tf_window_strides = [1] + tf_window_strides
if not has_channel_dim:
tf_operand = tf.expand_dims(tf_operand, -1)
tf_window_dimensions.append(1)
tf_window_strides.append(1)
tf_data_format = "N" + "DHW"[-nb_spatial_dimensions:] + "C"
tf_padding = "VALID"
if op_name == "reduce_window_max":
result = tf.nn.max_pool(tf_operand, tf_window_dimensions, tf_window_strides,
tf_padding, tf_data_format)
elif op_name == "reduce_window_sum":
avg = tf.nn.avg_pool(tf_operand, tf_window_dimensions, tf_window_strides,
tf_padding, tf_data_format)
result = avg * np.prod(tf_window_dimensions)
else:
raise error(f"Unimplemented support for {op_name}")
if not has_batch_dim:
result = tf.squeeze(result, 0)
if not has_channel_dim:
result = tf.squeeze(result, -1)
return result
def _specialized_reduce_window(reducer,
identity,
operand,
*,
window_dimensions,
window_strides,
padding,
base_dilation,
window_dilation,
_in_avals,
_out_aval,
name=None):
if not _enable_xla and name in ["reduce_window_max", "reduce_window_sum"]:
return _try_tf_pool(name, operand, window_dimensions, window_strides,
padding, base_dilation, window_dilation)
return _common_reduce_window(operand, identity(operand.dtype), reducer,
window_dimensions, window_strides, padding,
base_dilation, window_dilation, _in_avals,
_out_aval)
def _get_max_identity(tf_dtype):
numpy_tf_dtype = tf_dtype.as_numpy_dtype
if tf_dtype == tf.bfloat16 or dtypes.issubdtype(numpy_tf_dtype, np.inexact):
return numpy_tf_dtype(-np.inf)
elif dtypes.issubdtype(numpy_tf_dtype, np.integer):
return dtypes.iinfo(numpy_tf_dtype).min
else:
assert dtypes.issubdtype(
numpy_tf_dtype, np.bool_), (f"{tf_dtype} has no defined max identity")
return False
def _get_min_identity(tf_dtype):
numpy_tf_dtype = tf_dtype.as_numpy_dtype
if tf_dtype == tf.bfloat16 or dtypes.issubdtype(numpy_tf_dtype, np.inexact):
return numpy_tf_dtype(np.inf)
elif dtypes.issubdtype(numpy_tf_dtype, np.integer):
return dtypes.iinfo(numpy_tf_dtype).max
else:
assert dtypes.issubdtype(
numpy_tf_dtype, np.bool_), (f"{tf_dtype} has no defined min identity")
return True
# pylint: disable=protected-access
tf_impl_with_avals[lax.reduce_window_sum_p] = (
functools.partial(
_specialized_reduce_window, _add, lambda x: 0,
name="reduce_window_sum"))
tf_impl_with_avals[lax.reduce_window_min_p] = (
functools.partial(
_specialized_reduce_window,
tf.math.minimum,
_get_min_identity,
name="reduce_window_min"))
tf_impl_with_avals[lax.reduce_window_max_p] = (
functools.partial(
_specialized_reduce_window,
tf.math.maximum,
_get_max_identity,
name="reduce_window_max"))
tf_impl_with_avals[lax.reduce_window_p] = _reduce_window
# pylint: enable=protected-access
# We use lax_control_flow._cumred_tpu_translation_rule to convert cummax,
# cummin, cumsum and cumprod. This is efficient on TPU, but the complexity is
# O(n^2) on other backends. This may be implemented using associative_scan
# instead to favor different backends.
tf_impl_with_avals[lax_control_flow.cummin_p] = _convert_jax_impl(
functools.partial(lax_control_flow._cumred_tpu_translation_rule,
lax._reduce_window_min),
multiple_results=False)
tf_impl_with_avals[lax_control_flow.cummax_p] = _convert_jax_impl(
functools.partial(lax_control_flow._cumred_tpu_translation_rule,
lax._reduce_window_max),
multiple_results=False)
# TODO(bchetioui): cumsum and cumprod can be converted using pure TF ops for
# certain dtypes: bfloat16, float16, float32, float64, and int32. Other dtypes
# will fail when running in compiled mode, but are otherwise compatible with
# the operation. A non-XLA path can thus be defined for all dtypes, though the
# tests will crash.
tf_impl_with_avals[lax_control_flow.cumsum_p] = _convert_jax_impl(
functools.partial(lax_control_flow._cumred_tpu_translation_rule,
lax._reduce_window_sum),
multiple_results=False)
tf_impl_with_avals[lax_control_flow.cumprod_p] = _convert_jax_impl(
functools.partial(lax_control_flow._cumred_tpu_translation_rule,
lax._reduce_window_prod),
multiple_results=False)
def _select_and_scatter(operand, source, init_value, select_jaxpr,
select_consts, scatter_jaxpr, scatter_consts,
window_dimensions, window_strides, padding):
raise NotImplementedError("TODO: jax2tf can not convert _select_and_scatter")
tf_impl[lax.select_and_scatter_p] = _select_and_scatter
@functools.partial(bool_to_int8, argnums=(0, 1))
def _select_and_scatter_add(source, operand, *, select_prim, window_dimensions,
window_strides, padding, _in_avals, _out_aval):
if not _enable_xla:
raise _xla_disabled_error("select_and_scatter_add")
init_value = tf.zeros((), operand.dtype)
select_fn = (
tf.function(tf_impl[select_prim], autograph=False).get_concrete_function(
init_value, init_value))
scatter_fn = _add_fn.get_concrete_function(init_value, init_value)
out = tfxla.select_and_scatter(operand, window_dimensions, window_strides,
padding, source, init_value, select_fn,
scatter_fn)
out.set_shape(_aval_to_tf_shape(_out_aval))
return out
tf_impl_with_avals[lax.select_and_scatter_add_p] = _select_and_scatter_add
def _threefry2x32_jax_impl(*args: TfVal, _in_avals, _out_aval):
res = _convert_jax_impl(
functools.partial(
jax._src.random._threefry2x32_lowering, use_rolled_loops=False),
multiple_results=True)(
*args, _in_avals=_in_avals, _out_aval=_out_aval)
return res
tf_impl_with_avals[jax.random.threefry2x32_p] = _threefry2x32_jax_impl
# Use the vmap implementation, otherwise on TPU the performance is really bad
# With use_vmap=True on, we get about the same performance for JAX and jax2tf.
tf_impl_with_avals[random.random_gamma_p] = _convert_jax_impl(
functools.partial(jax._src.random._gamma_impl, use_vmap=True),
multiple_results=False)
def _gather_dimensions_proto(indices_shape, dimension_numbers):
proto = xla_data_pb2.GatherDimensionNumbers()
proto.offset_dims.extend(dimension_numbers.offset_dims)
proto.collapsed_slice_dims.extend(dimension_numbers.collapsed_slice_dims)
proto.start_index_map.extend(dimension_numbers.start_index_map)
assert indices_shape
proto.index_vector_dim = len(indices_shape) - 1
return proto
@functools.partial(bool_to_int8, argnums=0)
def _gather(operand, start_indices, *, dimension_numbers, slice_sizes,
_in_avals, _out_aval):
del _in_avals
if not _enable_xla:
raise _xla_disabled_error("gather")
proto = _gather_dimensions_proto(start_indices.shape, dimension_numbers)
slice_sizes_tf = _eval_shape(slice_sizes)
out = tfxla.gather(operand, start_indices, proto, slice_sizes_tf, False)
out.set_shape(_aval_to_tf_shape(_out_aval))
return out
tf_impl_with_avals[lax.gather_p] = _gather
def _slice(operand, start_indices, limit_indices, strides, _in_avals,
_out_aval):
if strides is None:
strides = [1] * len(start_indices)
slices = tuple(
map(slice, _eval_shape(start_indices), _eval_shape(limit_indices),
_eval_shape(strides)))
out = operand[slices]
# TODO(b/184503314): improve shape inference for __getitem__
out.set_shape(_aval_to_tf_shape(_out_aval))
return out
tf_impl_with_avals[lax.slice_p] = _slice
def _dynamic_slice(operand, *start_indices, slice_sizes,
_in_avals: Sequence[core.ShapedArray],
_out_aval: core.ShapedArray):
# Here we could use tf.slice. Similarly, for lax.gather we can sometimes use
# tf.gather. But those have different semantics for index-out-of-bounds than
# JAX (and XLA). We have tried to force compilation, by wrapping into
# tf.xla.experimental.compile, or tf.function(jit_compile=True), but
# those solutions are brittle because they do not work when nested into an
# outer compilation (see b/162814494 and b/163006262). They also do not
# survive well being put in a SavedModel. Hence, we now use TFXLA slicing
# and gather ops.
if not _enable_xla:
raise _xla_disabled_error("dynamic_slice")
res = tfxla.dynamic_slice(
operand, tf.stack(start_indices), size_indices=_eval_shape(slice_sizes))
# TODO: implement shape inference for XlaDynamicSlice
res.set_shape(_aval_to_tf_shape(_out_aval))
return res
tf_impl_with_avals[lax.dynamic_slice_p] = _dynamic_slice
def _scatter_dimensions_proto(indices_shape, dimension_numbers):
proto = xla_data_pb2.ScatterDimensionNumbers()
proto.update_window_dims.extend(dimension_numbers.update_window_dims)
proto.inserted_window_dims.extend(dimension_numbers.inserted_window_dims)
proto.scatter_dims_to_operand_dims.extend(
dimension_numbers.scatter_dims_to_operand_dims)
assert indices_shape
proto.index_vector_dim = len(indices_shape) - 1
return proto
def _scatter(operand, scatter_indices, updates, *, update_jaxpr, update_consts,
dimension_numbers, indices_are_sorted, unique_indices,
_in_avals: Sequence[core.AbstractValue],
_out_aval: core.AbstractValue):
del unique_indices, _in_avals
assert len(update_consts) == 0, "Update computation cannot have constants"
if not _enable_xla:
raise _xla_disabled_error("scatter")
proto = _scatter_dimensions_proto(scatter_indices.shape, dimension_numbers)
def update_computation(arg1: TfVal, arg2: TfVal) -> TfVal:
closed_jaxpr = core.ClosedJaxpr(update_jaxpr, update_consts)
res, = _interpret_jaxpr(closed_jaxpr, arg1, arg2)
return res
o_spec = tf.TensorSpec((), dtype=operand.dtype)
xla_update_computation = (
tf.function(update_computation,
autograph=False).get_concrete_function(o_spec, o_spec))
out = tfxla.scatter(
operand,
scatter_indices,
updates,
xla_update_computation,
proto,
indices_are_sorted=indices_are_sorted)
# TODO: implement shape analysis for XlaScatter
out.set_shape(_aval_to_tf_shape(_out_aval))
return out
tf_impl_with_avals[lax.scatter_p] = _scatter
tf_impl_with_avals[lax.scatter_min_p] = _scatter
tf_impl_with_avals[lax.scatter_max_p] = _scatter
tf_impl_with_avals[lax.scatter_mul_p] = _scatter
tf_impl_with_avals[lax.scatter_add_p] = _scatter
def _dynamic_update_slice(operand, update, *start_indices):
if not _enable_xla:
raise _xla_disabled_error("dynamic_update_slice")
return tfxla.dynamic_update_slice(operand, update, tf.stack(start_indices))
tf_impl[lax.dynamic_update_slice_p] = _dynamic_update_slice
def _cond(index: TfVal, *operands: TfVal, branches: Sequence[core.ClosedJaxpr],
linear: Sequence[bool]) -> Sequence[TfVal]:
del linear
# tf.cond needs lambdas with no arguments.
branches_tf = [
functools.partial(_interpret_jaxpr, jaxpr, *operands)
for jaxpr in branches
]
return tf.switch_case(index, branches_tf)
tf_impl[lax_control_flow.cond_p] = _cond
def _while(*args: TfVal, cond_nconsts: int, cond_jaxpr: core.ClosedJaxpr,
body_nconsts: int, body_jaxpr: core.ClosedJaxpr) -> Sequence[TfVal]:
cond_consts, body_consts, init_carry = util.split_list(
args, [cond_nconsts, body_nconsts])
if cond_jaxpr.out_avals[0].shape: # type: ignore[attr-defined]
# The conditional is not a scalar, this must be a batched while
return _batched_cond_while(
*args,
cond_nconsts=cond_nconsts,
cond_jaxpr=cond_jaxpr,
body_nconsts=body_nconsts,
body_jaxpr=body_jaxpr)
# The conditional must return a single value to TF
def cond_tf_func(*args: TfVal) -> TfVal:
pred, = _interpret_jaxpr(cond_jaxpr, *cond_consts, *args)
return pred
body_tf_func = functools.partial(_interpret_jaxpr, body_jaxpr, *body_consts)
return tf.while_loop(cond_tf_func, body_tf_func, init_carry)
def _batched_cond_while(*args: TfVal, cond_nconsts: int,
cond_jaxpr: core.ClosedJaxpr, body_nconsts: int,
body_jaxpr: core.ClosedJaxpr) -> Sequence[TfVal]:
cond_consts, body_consts, init_carry = util.split_list(
args, [cond_nconsts, body_nconsts])
# Initial computation of batched condition
init_pred_b, = _interpret_jaxpr(cond_jaxpr, *cond_consts, *init_carry)
assert init_pred_b is not core.unit
def new_cond_tf_func(pred_b: TfVal, *carry: TfVal) -> TfVal:
pred = tf.reduce_any(pred_b, axis=list(range(len(pred_b.shape))))
return pred
def new_body_tf_func(pred_b: TfVal, *carry: TfVal) -> Sequence[TfVal]:
new_carry: Sequence[TfVal] = _interpret_jaxpr(body_jaxpr, *body_consts,
*carry)
def select_one_carry(new_c: TfVal, c: TfVal) -> TfVal:
pred_b_bcast = _broadcast_in_dim(
pred_b,
shape=new_c.shape,
broadcast_dimensions=list(range(len(pred_b.shape))))
return tf.where(pred_b_bcast, new_c, c)
selected_carry: Sequence[TfVal] = list(
util.safe_map(select_one_carry, new_carry, carry))
next_pred_b, = _interpret_jaxpr(cond_jaxpr, *cond_consts, *selected_carry)
return (next_pred_b, *selected_carry)
_, *res_carry = tf.while_loop(new_cond_tf_func, new_body_tf_func,
(init_pred_b, *init_carry))
return res_carry
tf_impl[lax_control_flow.while_p] = _while
# We use the scan impl rule to rewrite in terms of while.
tf_impl_with_avals[lax_control_flow.scan_p] = _convert_jax_impl(
lax_control_flow._scan_impl)
def _top_k(operand: TfVal, k: int) -> Tuple[TfVal, TfVal]:
# Some types originally incompatible with tf.math.top_k can be promoted
# to a compatible type without loss of precision.
def promote_tf_dtype(tf_dtype):
if tf_dtype in [tf.bool, tf.uint8, tf.uint16]:
return tf.uint32
if tf_dtype in [tf.int8, tf.int16]:
return tf.int32
if tf_dtype is tf.float16:
return tf.float32
return None
conversion_dtype = promote_tf_dtype(operand.dtype)
if conversion_dtype:
values, indices = tf.math.top_k(
tf.dtypes.cast(operand, conversion_dtype), k=k, sorted=True)
return tf.dtypes.cast(values, operand.dtype), indices
else:
return tf.math.top_k(operand, k=k, sorted=True)
tf_impl[lax.top_k_p] = _top_k
def _sort(*operands: TfVal, dimension: int, is_stable: bool,
num_keys: int) -> Tuple[TfVal, ...]:
if not _enable_xla:
raise _xla_disabled_error("sort")
assert 1 <= num_keys <= len(operands)
assert 0 <= dimension < len(
operands[0].shape
), f"Invalid {dimension} for ndim {len(operands[0].shape)}"
# The comparator is a 2N-argument TF function, with arguments [2k] and [2k +1]
# corresponding to two scalars from operand[k].
def lexicographic_comparator_old(*tf_args: TfVal) -> TfVal:
assert len(tf_args) == 2 * len(operands)
# We build a comparison:
# arg[0] < arg[1] or (arg[0] == arg[1] and (arg[2] < arg[3] or ...))
# all the way to arg[2 * num_keys - 2] < arg[2 * num_keys - 1]
inside_comparison = None
for key_idx in range(num_keys - 1, -1, -1):
a = tf_args[2 * key_idx]
b = tf_args[2 * key_idx + 1]
a_lt_b = tf.math.less(a, b)
if inside_comparison is None:
inside_comparison = a_lt_b
else:
inside_comparison = tf.math.logical_or(
a_lt_b, tf.math.logical_and(tf.math.equal(a, b), inside_comparison))
return inside_comparison
comparator_spec: List[tf.TensorSpec] = []
comparator_jax_in_avals: List[core.AbstractValue] = []
for op in operands:
o_spec = tf.TensorSpec((), dtype=op.dtype)
comparator_spec.extend([o_spec, o_spec])
o_aval = core.ShapedArray((), to_jax_dtype(op.dtype))
comparator_jax_in_avals.extend([o_aval, o_aval])
# Use the same comparator that JAX uses when compiling to XLA, to get the
# proper NaN/Inf total order, and the lexicographic ordering.
# The comparator is a 2N-argument TF function, with arguments [2k] and [2k +1]
# corresponding to two scalars from operand[k].
def lexicographic_comparator(*tf_args: TfVal) -> TfVal:
return _convert_jax_impl(
lax._sort_lt_comparator, multiple_results=False)(
*tf_args,
_in_avals=comparator_jax_in_avals,
_out_aval=core.ShapedArray((), np.bool_),
num_keys=num_keys)
xla_comparator_computation = (
tf.function(lexicographic_comparator,
autograph=False).get_concrete_function(*comparator_spec))
results = tfxla.variadic_sort(
operands,
dimension=dimension,
is_stable=is_stable,
comparator=xla_comparator_computation)
return results
tf_impl[lax.sort_p] = _sort
def _fft(x, fft_type, fft_lengths):
FFT, IFFT, RFFT, IRFFT = list(map(xla_client.FftType, [0, 1, 2, 3]))
if fft_type == IRFFT:
expected_lengths = x.shape[-len(fft_lengths):-1] + ((x.shape[-1] - 1) * 2,)
else:
expected_lengths = x.shape[-len(fft_lengths):]
if expected_lengths != fft_lengths:
raise NotImplementedError(
f"Unsupported fft_lengths={fft_lengths} for fft_type={fft_type} of "
f"array with shape={x.shape}.")
tf_funcs = {
FFT: [tf.signal.fft, tf.signal.fft2d, tf.signal.fft3d],
IFFT: [tf.signal.ifft, tf.signal.ifft2d, tf.signal.ifft3d],
RFFT: [tf.signal.rfft, tf.signal.rfft2d, tf.signal.rfft3d],
IRFFT: [tf.signal.irfft, tf.signal.irfft2d, tf.signal.irfft3d]
}
return tf_funcs[fft_type][len(fft_lengths) - 1](x)
tf_impl[lax_fft.fft_p] = _fft
def _qr(operand, full_matrices):
return tf.linalg.qr(operand, full_matrices=full_matrices)
tf_impl[lax_linalg.qr_p] = _qr
def _svd(operand, full_matrices, compute_uv):
result = tf.linalg.svd(operand, full_matrices, compute_uv)
if not compute_uv:
return result,
s, u, v = result
return s, u, tf.linalg.adjoint(v)
tf_impl[lax_linalg.svd_p] = _svd
def _eig(operand: TfVal, compute_left_eigenvectors: bool,
compute_right_eigenvectors: bool):
if compute_left_eigenvectors and compute_right_eigenvectors:
# TODO(bchetioui): didn't find a 100% reliable, easy and satisfying way to
msg = ("Conversion of eig is not implemented when both "
"compute_left_eigenvectors and compute_right_eigenvectors are set "
"to True.")
raise NotImplementedError(msg)
elif not (compute_left_eigenvectors or compute_right_eigenvectors):
return tuple([tf.linalg.eigvals(operand)])
elif compute_right_eigenvectors:
return tuple(tf.linalg.eig(operand))
else:
wH, vl = tf.linalg.eig(tf.linalg.adjoint(operand))
wHH = tf.math.conj(wH)
return tuple([wHH, vl])
tf_impl[lax_linalg.eig_p] = _eig
def _eigh(operand: TfVal, lower: bool, _in_avals, _out_aval):
if operand.shape[-1] == 0:
v, w = operand, tf.reshape(operand, _eval_shape(_in_avals[0].shape[:-1]))
else:
if not lower:
operand = tf.linalg.adjoint(operand)
w, v = tf.linalg.eigh(operand)
cast_type = {
tf.complex64: tf.float32,
tf.complex128: tf.float64
}.get(operand.dtype)
if cast_type is not None:
w = tf.cast(w, cast_type)
return v, w
tf_impl_with_avals[lax_linalg.eigh_p] = _eigh
def _lu(operand: TfVal, _in_avals, _out_aval):
return _convert_jax_impl(lax_linalg._lu_python)(
operand, _in_avals=_in_avals, _out_aval=_out_aval)
tf_impl_with_avals[lax_linalg.lu_p] = _lu
def _triangular_solve(a: TfVal, b: TfVal, *, left_side: bool, lower: bool,
transpose_a: bool, conjugate_a: bool, unit_diagonal: bool,
_in_avals: Sequence[core.ShapedArray],
_out_aval: core.ShapedArray):
if unit_diagonal:
a_aval, _ = _in_avals
a_shape = _eval_shape(a_aval.shape)
a = tf.linalg.set_diag(a, tf.ones(a_shape[:-1], dtype=a.dtype))
if not left_side:
rank = len(a.shape)
transpose_dimensions = list(range(rank - 2)) + [rank - 1, rank - 2]
a = tf.transpose(a, transpose_dimensions)
b = tf.transpose(b, transpose_dimensions)
lower = not lower
if a.dtype in [tf.complex64, tf.complex128]:
if (transpose_a and not conjugate_a) or (not transpose_a and conjugate_a):
a = tf.math.conj(a)
result = tf.linalg.triangular_solve(a, b, lower=lower, adjoint=transpose_a)
if not left_side:
result = tf.transpose(result, transpose_dimensions)
return result
tf_impl_with_avals[lax_linalg.triangular_solve_p] = _triangular_solve
def _linear_solve(*args: TfVal, const_lengths, jaxprs, _in_avals, _out_aval):
return _convert_jax_impl(lax_control_flow._custom_linear_solve_impl)(
*args,
const_lengths=const_lengths,
jaxprs=jaxprs,
_in_avals=_in_avals,
_out_aval=_out_aval)
tf_impl_with_avals[lax_control_flow.linear_solve_p] = _linear_solve
def _custom_jvp_call_jaxpr(*args: TfVal, fun_jaxpr: core.ClosedJaxpr,
jvp_jaxpr_thunk: Callable,
num_consts: int) -> Sequence[TfVal]:
return _interpret_jaxpr(fun_jaxpr, *args)
tf_impl[custom_derivatives.custom_jvp_call_jaxpr_p] = _custom_jvp_call_jaxpr
def _custom_vjp_call_jaxpr(*args: TfVal, fun_jaxpr: core.ClosedJaxpr,
**_) -> Sequence[TfVal]:
return _interpret_jaxpr(fun_jaxpr, *args)
tf_impl[custom_derivatives.custom_vjp_call_jaxpr_p] = _custom_vjp_call_jaxpr
def _custom_lin(*args: TfVal, **_) -> Sequence[TfVal]:
raise TypeError("can't apply forward-mode autodiff (jvp) to a custom_vjp "
"function.")
tf_impl[ad.custom_lin_p] = _custom_lin
def split_to_logical_devices(tensor: TfVal,
partition_dimensions: pxla.PartitionsOrReplicated):
# This corresponds to the sharding annotations in
# xla_bridge._sharding_to_proto.
if partition_dimensions is None:
return xla_sharding.replicate(tensor, use_sharding_op=True)
num_partition_splits = np.prod(partition_dimensions)
tile_assignment = np.arange(num_partition_splits).reshape(
partition_dimensions)
return xla_sharding.tile(tensor, tile_assignment, use_sharding_op=True)
def _sharded_call(f: lu.WrappedFun, vals: Sequence[TfVal],
in_parts: Sequence[pxla.PartitionsOrReplicated],
out_parts_thunk,
**_) -> Sequence[Tuple[TfVal, core.AbstractValue]]:
sharded_vals = util.safe_map(split_to_logical_devices, vals, in_parts)
vals_out = f.call_wrapped(*sharded_vals) # caller handles new_sublevel
out_parts_flat = out_parts_thunk()
assert len(out_parts_flat) == len(
vals_out), f"expected {len(out_parts_flat)} == {len(vals_out)}"
sharded_vals_out = [
(split_to_logical_devices(val, val_part), val_aval)
for (val, val_aval), val_part in util.safe_zip(vals_out, out_parts_flat)
]
return sharded_vals_out
def _sharding_constraint(arg: TfVal, *,
partitions: pxla.PartitionsOrReplicated):
return split_to_logical_devices(arg, partitions)
tf_impl[sharded_jit.sharding_constraint_p] = _sharding_constraint
def _register_checkpoint_pytrees():
m = tf.Module()
# The types here are automagically changed by TensorFlow's checkpointing
m.a = (tf.Module(), tf.Module())
m.b = [tf.Module(), tf.Module()]
m.c = {"a": tf.Module()}
tuple_wrapper = type(m.a)
list_wrapper = type(m.b)
dict_wrapper = type(m.c)
assert tuple_wrapper is not tuple
assert list_wrapper is not list
assert dict_wrapper is not dict
jax.tree_util.register_pytree_node(tuple_wrapper, lambda xs:
(tuple(xs), None), lambda _, xs: tuple(xs))
jax.tree_util.register_pytree_node(list_wrapper, lambda xs: (tuple(xs), None),
lambda _, xs: list(xs))
jax.tree_util.register_pytree_node(
dict_wrapper, lambda s: (tuple(s.values()), tuple(s.keys())),
lambda k, xs: dict(zip(k, xs)))
_register_checkpoint_pytrees()
| true
| true
|
790d4a9b5b3baf0348c6de1a41ad3f16a13f894d
| 881
|
py
|
Python
|
tests/test_flare_io.py
|
sh-divya/flare
|
93219ff03df10528abb8f7a5309f15f7899a3f12
|
[
"MIT"
] | null | null | null |
tests/test_flare_io.py
|
sh-divya/flare
|
93219ff03df10528abb8f7a5309f15f7899a3f12
|
[
"MIT"
] | null | null | null |
tests/test_flare_io.py
|
sh-divya/flare
|
93219ff03df10528abb8f7a5309f15f7899a3f12
|
[
"MIT"
] | null | null | null |
import pytest
pmgout = pytest.importorskip("pymatgen.io.vasp.outputs")
Vasprun = pmgout.Vasprun
import os
import numpy as np
from flare.struc import Structure, get_unique_species
from flare.dft_interface.vasp_util import md_trajectory_from_vasprun
from flare.utils.flare_io import md_trajectory_to_file, md_trajectory_from_file
pytestmark = pytest.mark.filterwarnings(
"ignore::UserWarning", "ignore::pymatgen.io.vasp.outputs.UnconvergedVASPWarning"
)
def test_read_write_trajectory():
structures = md_trajectory_from_vasprun("test_files/test_vasprun.xml")
fname = "tst_traj.json"
md_trajectory_to_file(fname, structures)
fstructures = md_trajectory_from_file(fname)
for s, f in zip(structures, fstructures):
assert np.isclose(s.forces, f.forces).all()
assert np.isclose(s.positions, f.positions).all()
os.system("rm tst_traj.json")
| 35.24
| 84
| 0.779796
|
import pytest
pmgout = pytest.importorskip("pymatgen.io.vasp.outputs")
Vasprun = pmgout.Vasprun
import os
import numpy as np
from flare.struc import Structure, get_unique_species
from flare.dft_interface.vasp_util import md_trajectory_from_vasprun
from flare.utils.flare_io import md_trajectory_to_file, md_trajectory_from_file
pytestmark = pytest.mark.filterwarnings(
"ignore::UserWarning", "ignore::pymatgen.io.vasp.outputs.UnconvergedVASPWarning"
)
def test_read_write_trajectory():
structures = md_trajectory_from_vasprun("test_files/test_vasprun.xml")
fname = "tst_traj.json"
md_trajectory_to_file(fname, structures)
fstructures = md_trajectory_from_file(fname)
for s, f in zip(structures, fstructures):
assert np.isclose(s.forces, f.forces).all()
assert np.isclose(s.positions, f.positions).all()
os.system("rm tst_traj.json")
| true
| true
|
790d4ab1943eea88064a1e0518a78d11f3258595
| 636
|
py
|
Python
|
run_tests.py
|
djpetti/rhodopsin
|
97bdb9a6ba3c29b1fe1dd1e60b0b41e5a247ccf1
|
[
"MIT"
] | null | null | null |
run_tests.py
|
djpetti/rhodopsin
|
97bdb9a6ba3c29b1fe1dd1e60b0b41e5a247ccf1
|
[
"MIT"
] | null | null | null |
run_tests.py
|
djpetti/rhodopsin
|
97bdb9a6ba3c29b1fe1dd1e60b0b41e5a247ccf1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import os
import unittest
""" Script to run the Python tests. """
def run_python_tests():
""" Runs the Python tests.
Returns:
True if the tests all succeed, False if there are failures. """
print("Starting tests...")
loader = unittest.TestLoader()
# Get the directory this module is in.
dir_path = os.path.dirname(os.path.realpath(__file__))
suite = loader.discover("rhodopsin/tests", top_level_dir=dir_path)
test_result = unittest.TextTestRunner(verbosity=2).run(suite)
if not test_result.wasSuccessful():
return False
return True
if __name__ == "__main__":
run_python_tests()
| 21.2
| 68
| 0.712264
|
import os
import unittest
def run_python_tests():
print("Starting tests...")
loader = unittest.TestLoader()
dir_path = os.path.dirname(os.path.realpath(__file__))
suite = loader.discover("rhodopsin/tests", top_level_dir=dir_path)
test_result = unittest.TextTestRunner(verbosity=2).run(suite)
if not test_result.wasSuccessful():
return False
return True
if __name__ == "__main__":
run_python_tests()
| true
| true
|
790d4bbada6c11a5360442f4fb288451faab61d1
| 3,901
|
py
|
Python
|
Python Projects/A Number Guesser(2 modes).py
|
Kaique-Apolinario/Python-projects
|
88ddbe2cb41720c1f26006b2053bf7a1a88d78db
|
[
"MIT"
] | null | null | null |
Python Projects/A Number Guesser(2 modes).py
|
Kaique-Apolinario/Python-projects
|
88ddbe2cb41720c1f26006b2053bf7a1a88d78db
|
[
"MIT"
] | null | null | null |
Python Projects/A Number Guesser(2 modes).py
|
Kaique-Apolinario/Python-projects
|
88ddbe2cb41720c1f26006b2053bf7a1a88d78db
|
[
"MIT"
] | null | null | null |
import random
from time import sleep
def Guess():
global attempts
# If the user choose anything but a number between 0 and 10, they will get stuck in loop.
while True:
try:
attempts += 1 # This will count every attempt made by the user
user_number = int(input().replace(' ', ''))
except:
print("You should put a number between 0 and 10 <3")
else:
if user_number > 10 or user_number < 0:
print("I told you a number between 0 and 10 <3")
else:
break
return user_number
def NextGame():
# If the user choose anything but "[S] or [N]", they will get stuck in loop.
while True:
choice = input(
"Do you want to play again? [S]/[N] ").upper().replace(' ', '')
if (choice in "[S]" or choice in "[N]") and choice not in "[]":
break
else:
print("I didn't understand your choice.", end=' ')
return choice
# Introduction
print("\033[1;36m=-"*20, "\033[m")
print(f'\033[1;36m {"Lets play Number Guesser!":^40}\033[m')
print("\033[1;36m=-"*20, "\033[m")
sleep(2)
# The user will choose a mode or will get stuck in a loop until they do so.
while True:
mode = input(
"\nFirst of all, choose a mode: \n[1] Normal mode \n[2] Hide the thimble\n").replace(' ', '')
while True:
if mode.isnumeric() == False or int(mode) != 1 and int(mode) != 2:
mode = input("I said to you to choose 1 or 2.\n")
else:
break
# If the user choose the "normal mode"
if int(mode) == 1:
while True:
# It will reset the amount of attempts every time the player choose to play it.
attempts = 0
# The computer will choose a random number
print("I chose a number between 0 and 10, try to guess it! ")
while True:
pc_number = random.randint(0, 10)
# The user will type a number between 0 and 10 or will get stuck in a loop until they do so.
user_number = Guess()
if user_number != pc_number:
print(
"Oops! You are wrong, let me chose another number... Guess it!")
# When the user win
else:
break
print(f"Yes! You are right! You made it with {attempts} attempts!")
# The user choices if they want to play again or not.
choice = NextGame()
break
if choice not in "[S]":
break
elif int(mode) == 2: # If the user choose the "Hide the thimble mode"
# It will reset the amount of attempts every time the player choose to play it.
attempts = 0
# The computer will choose a random number
pc_number = random.randint(0, 10)
print("I chose a number between 0 and 10, try to guess it!")
# The user will choose a number between 0 and 10, otherwise they will get stuck in a loop.
while True:
user_number = Guess()
if pc_number == user_number: # If the user number is the same as the computer one, the user wins!
break
# If the user's choice is 2 numbers or less apart from the computer one, the user will know they are getting close.
elif pc_number > user_number >= pc_number-2 or pc_number < user_number <= pc_number+2:
print("Hot.")
# Else, they know they aren't close to the computer's number.
else:
print("Cold.")
# When the user win
print(f"Yes! You are right! You made it with {attempts} attempts!")
choice = NextGame()
if choice not in "[S]":
break
# Goodbye
print(f"\nBye, bye! I'll miss you <3")
print("\033[1;34;107mBy: Kaique Apolinário\033[m")
| 36.457944
| 127
| 0.5596
|
import random
from time import sleep
def Guess():
global attempts
while True:
try:
attempts += 1
user_number = int(input().replace(' ', ''))
except:
print("You should put a number between 0 and 10 <3")
else:
if user_number > 10 or user_number < 0:
print("I told you a number between 0 and 10 <3")
else:
break
return user_number
def NextGame():
while True:
choice = input(
"Do you want to play again? [S]/[N] ").upper().replace(' ', '')
if (choice in "[S]" or choice in "[N]") and choice not in "[]":
break
else:
print("I didn't understand your choice.", end=' ')
return choice
# Introduction
print("\033[1;36m=-"*20, "\033[m")
print(f'\033[1;36m {"Lets play Number Guesser!":^40}\033[m')
print("\033[1;36m=-"*20, "\033[m")
sleep(2)
# The user will choose a mode or will get stuck in a loop until they do so.
while True:
mode = input(
"\nFirst of all, choose a mode: \n[1] Normal mode \n[2] Hide the thimble\n").replace(' ', '')
while True:
if mode.isnumeric() == False or int(mode) != 1 and int(mode) != 2:
mode = input("I said to you to choose 1 or 2.\n")
else:
break
# If the user choose the "normal mode"
if int(mode) == 1:
while True:
# It will reset the amount of attempts every time the player choose to play it.
attempts = 0
# The computer will choose a random number
print("I chose a number between 0 and 10, try to guess it! ")
while True:
pc_number = random.randint(0, 10)
# The user will type a number between 0 and 10 or will get stuck in a loop until they do so.
user_number = Guess()
if user_number != pc_number:
print(
"Oops! You are wrong, let me chose another number... Guess it!")
# When the user win
else:
break
print(f"Yes! You are right! You made it with {attempts} attempts!")
# The user choices if they want to play again or not.
choice = NextGame()
break
if choice not in "[S]":
break
elif int(mode) == 2: # If the user choose the "Hide the thimble mode"
# It will reset the amount of attempts every time the player choose to play it.
attempts = 0
# The computer will choose a random number
pc_number = random.randint(0, 10)
print("I chose a number between 0 and 10, try to guess it!")
# The user will choose a number between 0 and 10, otherwise they will get stuck in a loop.
while True:
user_number = Guess()
if pc_number == user_number: # If the user number is the same as the computer one, the user wins!
break
# If the user's choice is 2 numbers or less apart from the computer one, the user will know they are getting close.
elif pc_number > user_number >= pc_number-2 or pc_number < user_number <= pc_number+2:
print("Hot.")
else:
print("Cold.")
print(f"Yes! You are right! You made it with {attempts} attempts!")
choice = NextGame()
if choice not in "[S]":
break
print(f"\nBye, bye! I'll miss you <3")
print("\033[1;34;107mBy: Kaique Apolinário\033[m")
| true
| true
|
790d4da49d1de4ac4b1333435c4bfff56262dac9
| 45,664
|
py
|
Python
|
FontTools/fontTools/ttLib/tables/_c_m_a_p.py
|
johanoren/IncrementalNumbers_Fusion360
|
dd2655ff44d80853b24dabde2f3b523ef470673d
|
[
"MIT"
] | null | null | null |
FontTools/fontTools/ttLib/tables/_c_m_a_p.py
|
johanoren/IncrementalNumbers_Fusion360
|
dd2655ff44d80853b24dabde2f3b523ef470673d
|
[
"MIT"
] | 1
|
2019-09-10T11:50:51.000Z
|
2019-09-10T11:50:51.000Z
|
FontTools/fontTools/ttLib/tables/_c_m_a_p.py
|
johanoren/IncrementalNumbers_Fusion360
|
dd2655ff44d80853b24dabde2f3b523ef470673d
|
[
"MIT"
] | null | null | null |
from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
from fontTools.misc.textTools import safeEval, readHex
from fontTools.misc.encodingTools import getEncoding
from fontTools.ttLib import getSearchRange
from fontTools.unicode import Unicode
from . import DefaultTable
import sys
import struct
import array
import operator
class table__c_m_a_p(DefaultTable.DefaultTable):
def getcmap(self, platformID, platEncID):
for subtable in self.tables:
if (subtable.platformID == platformID and
subtable.platEncID == platEncID):
return subtable
return None # not found
def decompile(self, data, ttFont):
tableVersion, numSubTables = struct.unpack(">HH", data[:4])
self.tableVersion = int(tableVersion)
self.tables = tables = []
seenOffsets = {}
for i in range(numSubTables):
platformID, platEncID, offset = struct.unpack(
">HHl", data[4+i*8:4+(i+1)*8])
platformID, platEncID = int(platformID), int(platEncID)
format, length = struct.unpack(">HH", data[offset:offset+4])
if format in [8,10,12,13]:
format, reserved, length = struct.unpack(">HHL", data[offset:offset+8])
elif format in [14]:
format, length = struct.unpack(">HL", data[offset:offset+6])
if not length:
print("Error: cmap subtable is reported as having zero length: platformID %s, platEncID %s, format %s offset %s. Skipping table." % (platformID, platEncID,format, offset))
continue
table = CmapSubtable.newSubtable(format)
table.platformID = platformID
table.platEncID = platEncID
# Note that by default we decompile only the subtable header info;
# any other data gets decompiled only when an attribute of the
# subtable is referenced.
table.decompileHeader(data[offset:offset+int(length)], ttFont)
if offset in seenOffsets:
table.cmap = tables[seenOffsets[offset]].cmap
else:
seenOffsets[offset] = i
tables.append(table)
def compile(self, ttFont):
self.tables.sort() # sort according to the spec; see CmapSubtable.__lt__()
numSubTables = len(self.tables)
totalOffset = 4 + 8 * numSubTables
data = struct.pack(">HH", self.tableVersion, numSubTables)
tableData = b""
seen = {} # Some tables are the same object reference. Don't compile them twice.
done = {} # Some tables are different objects, but compile to the same data chunk
for table in self.tables:
try:
offset = seen[id(table.cmap)]
except KeyError:
chunk = table.compile(ttFont)
if chunk in done:
offset = done[chunk]
else:
offset = seen[id(table.cmap)] = done[chunk] = totalOffset + len(tableData)
tableData = tableData + chunk
data = data + struct.pack(">HHl", table.platformID, table.platEncID, offset)
return data + tableData
def toXML(self, writer, ttFont):
writer.simpletag("tableVersion", version=self.tableVersion)
writer.newline()
for table in self.tables:
table.toXML(writer, ttFont)
def fromXML(self, name, attrs, content, ttFont):
if name == "tableVersion":
self.tableVersion = safeEval(attrs["version"])
return
if name[:12] != "cmap_format_":
return
if not hasattr(self, "tables"):
self.tables = []
format = safeEval(name[12:])
table = CmapSubtable.newSubtable(format)
table.platformID = safeEval(attrs["platformID"])
table.platEncID = safeEval(attrs["platEncID"])
table.fromXML(name, attrs, content, ttFont)
self.tables.append(table)
class CmapSubtable(object):
@staticmethod
def getSubtableClass(format):
"""Return the subtable class for a format."""
return cmap_classes.get(format, cmap_format_unknown)
@staticmethod
def newSubtable(format):
"""Return a new instance of a subtable for format."""
subtableClass = CmapSubtable.getSubtableClass(format)
return subtableClass(format)
def __init__(self, format):
self.format = format
self.data = None
self.ttFont = None
def __getattr__(self, attr):
# allow lazy decompilation of subtables.
if attr[:2] == '__': # don't handle requests for member functions like '__lt__'
raise AttributeError(attr)
if self.data is None:
raise AttributeError(attr)
self.decompile(None, None) # use saved data.
self.data = None # Once this table has been decompiled, make sure we don't
# just return the original data. Also avoids recursion when
# called with an attribute that the cmap subtable doesn't have.
return getattr(self, attr)
def decompileHeader(self, data, ttFont):
format, length, language = struct.unpack(">HHH", data[:6])
assert len(data) == length, "corrupt cmap table format %d (data length: %d, header length: %d)" % (format, len(data), length)
self.format = int(format)
self.length = int(length)
self.language = int(language)
self.data = data[6:]
self.ttFont = ttFont
def toXML(self, writer, ttFont):
writer.begintag(self.__class__.__name__, [
("platformID", self.platformID),
("platEncID", self.platEncID),
("language", self.language),
])
writer.newline()
codes = sorted(self.cmap.items())
self._writeCodes(codes, writer)
writer.endtag(self.__class__.__name__)
writer.newline()
def getEncoding(self, default=None):
"""Returns the Python encoding name for this cmap subtable based on its platformID,
platEncID, and language. If encoding for these values is not known, by default
None is returned. That can be overriden by passing a value to the default
argument.
Note that if you want to choose a "preferred" cmap subtable, most of the time
self.isUnicode() is what you want as that one only returns true for the modern,
commonly used, Unicode-compatible triplets, not the legacy ones.
"""
return getEncoding(self.platformID, self.platEncID, self.language, default)
def isUnicode(self):
return (self.platformID == 0 or
(self.platformID == 3 and self.platEncID in [0, 1, 10]))
def isSymbol(self):
return self.platformID == 3 and self.platEncID == 0
def _writeCodes(self, codes, writer):
isUnicode = self.isUnicode()
for code, name in codes:
writer.simpletag("map", code=hex(code), name=name)
if isUnicode:
writer.comment(Unicode[code])
writer.newline()
def __lt__(self, other):
if not isinstance(other, CmapSubtable):
return NotImplemented
# implemented so that list.sort() sorts according to the spec.
selfTuple = (
getattr(self, "platformID", None),
getattr(self, "platEncID", None),
getattr(self, "language", None),
self.__dict__)
otherTuple = (
getattr(other, "platformID", None),
getattr(other, "platEncID", None),
getattr(other, "language", None),
other.__dict__)
return selfTuple < otherTuple
class cmap_format_0(CmapSubtable):
def decompile(self, data, ttFont):
# we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
# If not, someone is calling the subtable decompile() directly, and must provide both args.
if data is not None and ttFont is not None:
self.decompileHeader(data, ttFont)
else:
assert (data is None and ttFont is None), "Need both data and ttFont arguments"
data = self.data # decompileHeader assigns the data after the header to self.data
assert 262 == self.length, "Format 0 cmap subtable not 262 bytes"
glyphIdArray = array.array("B")
glyphIdArray.fromstring(self.data)
self.cmap = cmap = {}
lenArray = len(glyphIdArray)
charCodes = list(range(lenArray))
names = map(self.ttFont.getGlyphName, glyphIdArray)
list(map(operator.setitem, [cmap]*lenArray, charCodes, names))
def compile(self, ttFont):
if self.data:
return struct.pack(">HHH", 0, 262, self.language) + self.data
charCodeList = sorted(self.cmap.items())
charCodes = [entry[0] for entry in charCodeList]
valueList = [entry[1] for entry in charCodeList]
assert charCodes == list(range(256))
valueList = map(ttFont.getGlyphID, valueList)
glyphIdArray = array.array("B", valueList)
data = struct.pack(">HHH", 0, 262, self.language) + glyphIdArray.tostring()
assert len(data) == 262
return data
def fromXML(self, name, attrs, content, ttFont):
self.language = safeEval(attrs["language"])
if not hasattr(self, "cmap"):
self.cmap = {}
cmap = self.cmap
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name != "map":
continue
cmap[safeEval(attrs["code"])] = attrs["name"]
subHeaderFormat = ">HHhH"
class SubHeader(object):
def __init__(self):
self.firstCode = None
self.entryCount = None
self.idDelta = None
self.idRangeOffset = None
self.glyphIndexArray = []
class cmap_format_2(CmapSubtable):
def setIDDelta(self, subHeader):
subHeader.idDelta = 0
# find the minGI which is not zero.
minGI = subHeader.glyphIndexArray[0]
for gid in subHeader.glyphIndexArray:
if (gid != 0) and (gid < minGI):
minGI = gid
# The lowest gid in glyphIndexArray, after subtracting idDelta, must be 1.
# idDelta is a short, and must be between -32K and 32K. minGI can be between 1 and 64K.
# We would like to pick an idDelta such that the first glyphArray GID is 1,
# so that we are more likely to be able to combine glypharray GID subranges.
# This means that we have a problem when minGI is > 32K
# Since the final gi is reconstructed from the glyphArray GID by:
# (short)finalGID = (gid + idDelta) % 0x10000),
# we can get from a glypharray GID of 1 to a final GID of 65K by subtracting 2, and casting the
# negative number to an unsigned short.
if (minGI > 1):
if minGI > 0x7FFF:
subHeader.idDelta = -(0x10000 - minGI) -1
else:
subHeader.idDelta = minGI -1
idDelta = subHeader.idDelta
for i in range(subHeader.entryCount):
gid = subHeader.glyphIndexArray[i]
if gid > 0:
subHeader.glyphIndexArray[i] = gid - idDelta
def decompile(self, data, ttFont):
# we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
# If not, someone is calling the subtable decompile() directly, and must provide both args.
if data is not None and ttFont is not None:
self.decompileHeader(data, ttFont)
else:
assert (data is None and ttFont is None), "Need both data and ttFont arguments"
data = self.data # decompileHeader assigns the data after the header to self.data
subHeaderKeys = []
maxSubHeaderindex = 0
# get the key array, and determine the number of subHeaders.
allKeys = array.array("H")
allKeys.fromstring(data[:512])
data = data[512:]
if sys.byteorder != "big":
allKeys.byteswap()
subHeaderKeys = [ key//8 for key in allKeys]
maxSubHeaderindex = max(subHeaderKeys)
#Load subHeaders
subHeaderList = []
pos = 0
for i in range(maxSubHeaderindex + 1):
subHeader = SubHeader()
(subHeader.firstCode, subHeader.entryCount, subHeader.idDelta, \
subHeader.idRangeOffset) = struct.unpack(subHeaderFormat, data[pos:pos + 8])
pos += 8
giDataPos = pos + subHeader.idRangeOffset-2
giList = array.array("H")
giList.fromstring(data[giDataPos:giDataPos + subHeader.entryCount*2])
if sys.byteorder != "big":
giList.byteswap()
subHeader.glyphIndexArray = giList
subHeaderList.append(subHeader)
# How this gets processed.
# Charcodes may be one or two bytes.
# The first byte of a charcode is mapped through the subHeaderKeys, to select
# a subHeader. For any subheader but 0, the next byte is then mapped through the
# selected subheader. If subheader Index 0 is selected, then the byte itself is
# mapped through the subheader, and there is no second byte.
# Then assume that the subsequent byte is the first byte of the next charcode,and repeat.
#
# Each subheader references a range in the glyphIndexArray whose length is entryCount.
# The range in glyphIndexArray referenced by a sunheader may overlap with the range in glyphIndexArray
# referenced by another subheader.
# The only subheader that will be referenced by more than one first-byte value is the subheader
# that maps the entire range of glyphID values to glyphIndex 0, e.g notdef:
# {firstChar 0, EntryCount 0,idDelta 0,idRangeOffset xx}
# A byte being mapped though a subheader is treated as in index into a mapping of array index to font glyphIndex.
# A subheader specifies a subrange within (0...256) by the
# firstChar and EntryCount values. If the byte value is outside the subrange, then the glyphIndex is zero
# (e.g. glyph not in font).
# If the byte index is in the subrange, then an offset index is calculated as (byteIndex - firstChar).
# The index to glyphIndex mapping is a subrange of the glyphIndexArray. You find the start of the subrange by
# counting idRangeOffset bytes from the idRangeOffset word. The first value in this subrange is the
# glyphIndex for the index firstChar. The offset index should then be used in this array to get the glyphIndex.
# Example for Logocut-Medium
# first byte of charcode = 129; selects subheader 1.
# subheader 1 = {firstChar 64, EntryCount 108,idDelta 42,idRangeOffset 0252}
# second byte of charCode = 66
# the index offset = 66-64 = 2.
# The subrange of the glyphIndexArray starting at 0x0252 bytes from the idRangeOffset word is:
# [glyphIndexArray index], [subrange array index] = glyphIndex
# [256], [0]=1 from charcode [129, 64]
# [257], [1]=2 from charcode [129, 65]
# [258], [2]=3 from charcode [129, 66]
# [259], [3]=4 from charcode [129, 67]
# So, the glyphIndex = 3 from the array. Then if idDelta is not zero and the glyph ID is not zero,
# add it to the glyphID to get the final glyphIndex
# value. In this case the final glyph index = 3+ 42 -> 45 for the final glyphIndex. Whew!
self.data = b""
self.cmap = cmap = {}
notdefGI = 0
for firstByte in range(256):
subHeadindex = subHeaderKeys[firstByte]
subHeader = subHeaderList[subHeadindex]
if subHeadindex == 0:
if (firstByte < subHeader.firstCode) or (firstByte >= subHeader.firstCode + subHeader.entryCount):
continue # gi is notdef.
else:
charCode = firstByte
offsetIndex = firstByte - subHeader.firstCode
gi = subHeader.glyphIndexArray[offsetIndex]
if gi != 0:
gi = (gi + subHeader.idDelta) % 0x10000
else:
continue # gi is notdef.
cmap[charCode] = gi
else:
if subHeader.entryCount:
charCodeOffset = firstByte * 256 + subHeader.firstCode
for offsetIndex in range(subHeader.entryCount):
charCode = charCodeOffset + offsetIndex
gi = subHeader.glyphIndexArray[offsetIndex]
if gi != 0:
gi = (gi + subHeader.idDelta) % 0x10000
else:
continue
cmap[charCode] = gi
# If not subHeader.entryCount, then all char codes with this first byte are
# mapped to .notdef. We can skip this subtable, and leave the glyphs un-encoded, which is the
# same as mapping it to .notdef.
# cmap values are GID's.
glyphOrder = self.ttFont.getGlyphOrder()
gids = list(cmap.values())
charCodes = list(cmap.keys())
lenCmap = len(gids)
try:
names = list(map(operator.getitem, [glyphOrder]*lenCmap, gids ))
except IndexError:
getGlyphName = self.ttFont.getGlyphName
names = list(map(getGlyphName, gids ))
list(map(operator.setitem, [cmap]*lenCmap, charCodes, names))
def compile(self, ttFont):
if self.data:
return struct.pack(">HHH", self.format, self.length, self.language) + self.data
kEmptyTwoCharCodeRange = -1
notdefGI = 0
items = sorted(self.cmap.items())
charCodes = [item[0] for item in items]
names = [item[1] for item in items]
nameMap = ttFont.getReverseGlyphMap()
lenCharCodes = len(charCodes)
try:
gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names))
except KeyError:
nameMap = ttFont.getReverseGlyphMap(rebuild=True)
try:
gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names))
except KeyError:
# allow virtual GIDs in format 2 tables
gids = []
for name in names:
try:
gid = nameMap[name]
except KeyError:
try:
if (name[:3] == 'gid'):
gid = eval(name[3:])
else:
gid = ttFont.getGlyphID(name)
except:
raise KeyError(name)
gids.append(gid)
# Process the (char code to gid) item list in char code order.
# By definition, all one byte char codes map to subheader 0.
# For all the two byte char codes, we assume that the first byte maps maps to the empty subhead (with an entry count of 0,
# which defines all char codes in its range to map to notdef) unless proven otherwise.
# Note that since the char code items are processed in char code order, all the char codes with the
# same first byte are in sequential order.
subHeaderKeys = [ kEmptyTwoCharCodeRange for x in range(256)] # list of indices into subHeaderList.
subHeaderList = []
# We force this subheader entry 0 to exist in the subHeaderList in the case where some one comes up
# with a cmap where all the one byte char codes map to notdef,
# with the result that the subhead 0 would not get created just by processing the item list.
charCode = charCodes[0]
if charCode > 255:
subHeader = SubHeader()
subHeader.firstCode = 0
subHeader.entryCount = 0
subHeader.idDelta = 0
subHeader.idRangeOffset = 0
subHeaderList.append(subHeader)
lastFirstByte = -1
items = zip(charCodes, gids)
for charCode, gid in items:
if gid == 0:
continue
firstbyte = charCode >> 8
secondByte = charCode & 0x00FF
if firstbyte != lastFirstByte: # Need to update the current subhead, and start a new one.
if lastFirstByte > -1:
# fix GI's and iDelta of current subheader.
self.setIDDelta(subHeader)
# If it was sunheader 0 for one-byte charCodes, then we need to set the subHeaderKeys value to zero
# for the indices matching the char codes.
if lastFirstByte == 0:
for index in range(subHeader.entryCount):
charCode = subHeader.firstCode + index
subHeaderKeys[charCode] = 0
assert (subHeader.entryCount == len(subHeader.glyphIndexArray)), "Error - subhead entry count does not match len of glyphID subrange."
# init new subheader
subHeader = SubHeader()
subHeader.firstCode = secondByte
subHeader.entryCount = 1
subHeader.glyphIndexArray.append(gid)
subHeaderList.append(subHeader)
subHeaderKeys[firstbyte] = len(subHeaderList) -1
lastFirstByte = firstbyte
else:
# need to fill in with notdefs all the code points between the last charCode and the current charCode.
codeDiff = secondByte - (subHeader.firstCode + subHeader.entryCount)
for i in range(codeDiff):
subHeader.glyphIndexArray.append(notdefGI)
subHeader.glyphIndexArray.append(gid)
subHeader.entryCount = subHeader.entryCount + codeDiff + 1
# fix GI's and iDelta of last subheader that we we added to the subheader array.
self.setIDDelta(subHeader)
# Now we add a final subheader for the subHeaderKeys which maps to empty two byte charcode ranges.
subHeader = SubHeader()
subHeader.firstCode = 0
subHeader.entryCount = 0
subHeader.idDelta = 0
subHeader.idRangeOffset = 2
subHeaderList.append(subHeader)
emptySubheadIndex = len(subHeaderList) - 1
for index in range(256):
if subHeaderKeys[index] == kEmptyTwoCharCodeRange:
subHeaderKeys[index] = emptySubheadIndex
# Since this is the last subheader, the GlyphIndex Array starts two bytes after the start of the
# idRangeOffset word of this subHeader. We can safely point to the first entry in the GlyphIndexArray,
# since the first subrange of the GlyphIndexArray is for subHeader 0, which always starts with
# charcode 0 and GID 0.
idRangeOffset = (len(subHeaderList)-1)*8 + 2 # offset to beginning of glyphIDArray from first subheader idRangeOffset.
subheadRangeLen = len(subHeaderList) -1 # skip last special empty-set subheader; we've already hardocodes its idRangeOffset to 2.
for index in range(subheadRangeLen):
subHeader = subHeaderList[index]
subHeader.idRangeOffset = 0
for j in range(index):
prevSubhead = subHeaderList[j]
if prevSubhead.glyphIndexArray == subHeader.glyphIndexArray: # use the glyphIndexArray subarray
subHeader.idRangeOffset = prevSubhead.idRangeOffset - (index-j)*8
subHeader.glyphIndexArray = []
break
if subHeader.idRangeOffset == 0: # didn't find one.
subHeader.idRangeOffset = idRangeOffset
idRangeOffset = (idRangeOffset - 8) + subHeader.entryCount*2 # one less subheader, one more subArray.
else:
idRangeOffset = idRangeOffset - 8 # one less subheader
# Now we can write out the data!
length = 6 + 512 + 8*len(subHeaderList) # header, 256 subHeaderKeys, and subheader array.
for subhead in subHeaderList[:-1]:
length = length + len(subhead.glyphIndexArray)*2 # We can't use subhead.entryCount, as some of the subhead may share subArrays.
dataList = [struct.pack(">HHH", 2, length, self.language)]
for index in subHeaderKeys:
dataList.append(struct.pack(">H", index*8))
for subhead in subHeaderList:
dataList.append(struct.pack(subHeaderFormat, subhead.firstCode, subhead.entryCount, subhead.idDelta, subhead.idRangeOffset))
for subhead in subHeaderList[:-1]:
for gi in subhead.glyphIndexArray:
dataList.append(struct.pack(">H", gi))
data = bytesjoin(dataList)
assert (len(data) == length), "Error: cmap format 2 is not same length as calculated! actual: " + str(len(data))+ " calc : " + str(length)
return data
def fromXML(self, name, attrs, content, ttFont):
self.language = safeEval(attrs["language"])
if not hasattr(self, "cmap"):
self.cmap = {}
cmap = self.cmap
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name != "map":
continue
cmap[safeEval(attrs["code"])] = attrs["name"]
cmap_format_4_format = ">7H"
#uint16 endCode[segCount] # Ending character code for each segment, last = 0xFFFF.
#uint16 reservedPad # This value should be zero
#uint16 startCode[segCount] # Starting character code for each segment
#uint16 idDelta[segCount] # Delta for all character codes in segment
#uint16 idRangeOffset[segCount] # Offset in bytes to glyph indexArray, or 0
#uint16 glyphIndexArray[variable] # Glyph index array
def splitRange(startCode, endCode, cmap):
# Try to split a range of character codes into subranges with consecutive
# glyph IDs in such a way that the cmap4 subtable can be stored "most"
# efficiently. I can't prove I've got the optimal solution, but it seems
# to do well with the fonts I tested: none became bigger, many became smaller.
if startCode == endCode:
return [], [endCode]
lastID = cmap[startCode]
lastCode = startCode
inOrder = None
orderedBegin = None
subRanges = []
# Gather subranges in which the glyph IDs are consecutive.
for code in range(startCode + 1, endCode + 1):
glyphID = cmap[code]
if glyphID - 1 == lastID:
if inOrder is None or not inOrder:
inOrder = 1
orderedBegin = lastCode
else:
if inOrder:
inOrder = 0
subRanges.append((orderedBegin, lastCode))
orderedBegin = None
lastID = glyphID
lastCode = code
if inOrder:
subRanges.append((orderedBegin, lastCode))
assert lastCode == endCode
# Now filter out those new subranges that would only make the data bigger.
# A new segment cost 8 bytes, not using a new segment costs 2 bytes per
# character.
newRanges = []
for b, e in subRanges:
if b == startCode and e == endCode:
break # the whole range, we're fine
if b == startCode or e == endCode:
threshold = 4 # split costs one more segment
else:
threshold = 8 # split costs two more segments
if (e - b + 1) > threshold:
newRanges.append((b, e))
subRanges = newRanges
if not subRanges:
return [], [endCode]
if subRanges[0][0] != startCode:
subRanges.insert(0, (startCode, subRanges[0][0] - 1))
if subRanges[-1][1] != endCode:
subRanges.append((subRanges[-1][1] + 1, endCode))
# Fill the "holes" in the segments list -- those are the segments in which
# the glyph IDs are _not_ consecutive.
i = 1
while i < len(subRanges):
if subRanges[i-1][1] + 1 != subRanges[i][0]:
subRanges.insert(i, (subRanges[i-1][1] + 1, subRanges[i][0] - 1))
i = i + 1
i = i + 1
# Transform the ranges into startCode/endCode lists.
start = []
end = []
for b, e in subRanges:
start.append(b)
end.append(e)
start.pop(0)
assert len(start) + 1 == len(end)
return start, end
class cmap_format_4(CmapSubtable):
def decompile(self, data, ttFont):
# we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
# If not, someone is calling the subtable decompile() directly, and must provide both args.
if data is not None and ttFont is not None:
self.decompileHeader(data, ttFont)
else:
assert (data is None and ttFont is None), "Need both data and ttFont arguments"
data = self.data # decompileHeader assigns the data after the header to self.data
(segCountX2, searchRange, entrySelector, rangeShift) = \
struct.unpack(">4H", data[:8])
data = data[8:]
segCount = segCountX2 // 2
allCodes = array.array("H")
allCodes.fromstring(data)
self.data = data = None
if sys.byteorder != "big":
allCodes.byteswap()
# divide the data
endCode = allCodes[:segCount]
allCodes = allCodes[segCount+1:] # the +1 is skipping the reservedPad field
startCode = allCodes[:segCount]
allCodes = allCodes[segCount:]
idDelta = allCodes[:segCount]
allCodes = allCodes[segCount:]
idRangeOffset = allCodes[:segCount]
glyphIndexArray = allCodes[segCount:]
lenGIArray = len(glyphIndexArray)
# build 2-byte character mapping
charCodes = []
gids = []
for i in range(len(startCode) - 1): # don't do 0xffff!
start = startCode[i]
delta = idDelta[i]
rangeOffset = idRangeOffset[i]
# *someone* needs to get killed.
partial = rangeOffset // 2 - start + i - len(idRangeOffset)
rangeCharCodes = list(range(startCode[i], endCode[i] + 1))
charCodes.extend(rangeCharCodes)
if rangeOffset == 0:
gids.extend([(charCode + delta) & 0xFFFF for charCode in rangeCharCodes])
else:
for charCode in rangeCharCodes:
index = charCode + partial
assert (index < lenGIArray), "In format 4 cmap, range (%d), the calculated index (%d) into the glyph index array is not less than the length of the array (%d) !" % (i, index, lenGIArray)
if glyphIndexArray[index] != 0: # if not missing glyph
glyphID = glyphIndexArray[index] + delta
else:
glyphID = 0 # missing glyph
gids.append(glyphID & 0xFFFF)
self.cmap = cmap = {}
lenCmap = len(gids)
glyphOrder = self.ttFont.getGlyphOrder()
try:
names = list(map(operator.getitem, [glyphOrder]*lenCmap, gids ))
except IndexError:
getGlyphName = self.ttFont.getGlyphName
names = list(map(getGlyphName, gids ))
list(map(operator.setitem, [cmap]*lenCmap, charCodes, names))
def compile(self, ttFont):
if self.data:
return struct.pack(">HHH", self.format, self.length, self.language) + self.data
charCodes = list(self.cmap.keys())
lenCharCodes = len(charCodes)
if lenCharCodes == 0:
startCode = [0xffff]
endCode = [0xffff]
else:
charCodes.sort()
names = list(map(operator.getitem, [self.cmap]*lenCharCodes, charCodes))
nameMap = ttFont.getReverseGlyphMap()
try:
gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names))
except KeyError:
nameMap = ttFont.getReverseGlyphMap(rebuild=True)
try:
gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names))
except KeyError:
# allow virtual GIDs in format 4 tables
gids = []
for name in names:
try:
gid = nameMap[name]
except KeyError:
try:
if (name[:3] == 'gid'):
gid = eval(name[3:])
else:
gid = ttFont.getGlyphID(name)
except:
raise KeyError(name)
gids.append(gid)
cmap = {} # code:glyphID mapping
list(map(operator.setitem, [cmap]*len(charCodes), charCodes, gids))
# Build startCode and endCode lists.
# Split the char codes in ranges of consecutive char codes, then split
# each range in more ranges of consecutive/not consecutive glyph IDs.
# See splitRange().
lastCode = charCodes[0]
endCode = []
startCode = [lastCode]
for charCode in charCodes[1:]: # skip the first code, it's the first start code
if charCode == lastCode + 1:
lastCode = charCode
continue
start, end = splitRange(startCode[-1], lastCode, cmap)
startCode.extend(start)
endCode.extend(end)
startCode.append(charCode)
lastCode = charCode
start, end = splitRange(startCode[-1], lastCode, cmap)
startCode.extend(start)
endCode.extend(end)
startCode.append(0xffff)
endCode.append(0xffff)
# build up rest of cruft
idDelta = []
idRangeOffset = []
glyphIndexArray = []
for i in range(len(endCode)-1): # skip the closing codes (0xffff)
indices = []
for charCode in range(startCode[i], endCode[i] + 1):
indices.append(cmap[charCode])
if (indices == list(range(indices[0], indices[0] + len(indices)))):
idDelta.append((indices[0] - startCode[i]) % 0x10000)
idRangeOffset.append(0)
else:
# someone *definitely* needs to get killed.
idDelta.append(0)
idRangeOffset.append(2 * (len(endCode) + len(glyphIndexArray) - i))
glyphIndexArray.extend(indices)
idDelta.append(1) # 0xffff + 1 == (tadaa!) 0. So this end code maps to .notdef
idRangeOffset.append(0)
# Insane.
segCount = len(endCode)
segCountX2 = segCount * 2
searchRange, entrySelector, rangeShift = getSearchRange(segCount, 2)
charCodeArray = array.array("H", endCode + [0] + startCode)
idDeltaArray = array.array("H", idDelta)
restArray = array.array("H", idRangeOffset + glyphIndexArray)
if sys.byteorder != "big":
charCodeArray.byteswap()
idDeltaArray.byteswap()
restArray.byteswap()
data = charCodeArray.tostring() + idDeltaArray.tostring() + restArray.tostring()
length = struct.calcsize(cmap_format_4_format) + len(data)
header = struct.pack(cmap_format_4_format, self.format, length, self.language,
segCountX2, searchRange, entrySelector, rangeShift)
return header + data
def fromXML(self, name, attrs, content, ttFont):
self.language = safeEval(attrs["language"])
if not hasattr(self, "cmap"):
self.cmap = {}
cmap = self.cmap
for element in content:
if not isinstance(element, tuple):
continue
nameMap, attrsMap, dummyContent = element
if nameMap != "map":
assert 0, "Unrecognized keyword in cmap subtable"
cmap[safeEval(attrsMap["code"])] = attrsMap["name"]
class cmap_format_6(CmapSubtable):
def decompile(self, data, ttFont):
# we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
# If not, someone is calling the subtable decompile() directly, and must provide both args.
if data is not None and ttFont is not None:
self.decompileHeader(data, ttFont)
else:
assert (data is None and ttFont is None), "Need both data and ttFont arguments"
data = self.data # decompileHeader assigns the data after the header to self.data
firstCode, entryCount = struct.unpack(">HH", data[:4])
firstCode = int(firstCode)
data = data[4:]
#assert len(data) == 2 * entryCount # XXX not true in Apple's Helvetica!!!
glyphIndexArray = array.array("H")
glyphIndexArray.fromstring(data[:2 * int(entryCount)])
if sys.byteorder != "big":
glyphIndexArray.byteswap()
self.data = data = None
self.cmap = cmap = {}
lenArray = len(glyphIndexArray)
charCodes = list(range(firstCode, firstCode + lenArray))
glyphOrder = self.ttFont.getGlyphOrder()
try:
names = list(map(operator.getitem, [glyphOrder]*lenArray, glyphIndexArray ))
except IndexError:
getGlyphName = self.ttFont.getGlyphName
names = list(map(getGlyphName, glyphIndexArray ))
list(map(operator.setitem, [cmap]*lenArray, charCodes, names))
def compile(self, ttFont):
if self.data:
return struct.pack(">HHH", self.format, self.length, self.language) + self.data
cmap = self.cmap
codes = list(cmap.keys())
if codes: # yes, there are empty cmap tables.
codes = list(range(codes[0], codes[-1] + 1))
firstCode = codes[0]
valueList = [cmap.get(code, ".notdef") for code in codes]
valueList = map(ttFont.getGlyphID, valueList)
glyphIndexArray = array.array("H", valueList)
if sys.byteorder != "big":
glyphIndexArray.byteswap()
data = glyphIndexArray.tostring()
else:
data = b""
firstCode = 0
header = struct.pack(">HHHHH",
6, len(data) + 10, self.language, firstCode, len(codes))
return header + data
def fromXML(self, name, attrs, content, ttFont):
self.language = safeEval(attrs["language"])
if not hasattr(self, "cmap"):
self.cmap = {}
cmap = self.cmap
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name != "map":
continue
cmap[safeEval(attrs["code"])] = attrs["name"]
class cmap_format_12_or_13(CmapSubtable):
def __init__(self, format):
self.format = format
self.reserved = 0
self.data = None
self.ttFont = None
def decompileHeader(self, data, ttFont):
format, reserved, length, language, nGroups = struct.unpack(">HHLLL", data[:16])
assert len(data) == (16 + nGroups*12) == (length), "corrupt cmap table format %d (data length: %d, header length: %d)" % (self.format, len(data), length)
self.format = format
self.reserved = reserved
self.length = length
self.language = language
self.nGroups = nGroups
self.data = data[16:]
self.ttFont = ttFont
def decompile(self, data, ttFont):
# we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
# If not, someone is calling the subtable decompile() directly, and must provide both args.
if data is not None and ttFont is not None:
self.decompileHeader(data, ttFont)
else:
assert (data is None and ttFont is None), "Need both data and ttFont arguments"
data = self.data # decompileHeader assigns the data after the header to self.data
charCodes = []
gids = []
pos = 0
for i in range(self.nGroups):
startCharCode, endCharCode, glyphID = struct.unpack(">LLL",data[pos:pos+12] )
pos += 12
lenGroup = 1 + endCharCode - startCharCode
charCodes.extend(list(range(startCharCode, endCharCode +1)))
gids.extend(self._computeGIDs(glyphID, lenGroup))
self.data = data = None
self.cmap = cmap = {}
lenCmap = len(gids)
glyphOrder = self.ttFont.getGlyphOrder()
try:
names = list(map(operator.getitem, [glyphOrder]*lenCmap, gids ))
except IndexError:
getGlyphName = self.ttFont.getGlyphName
names = list(map(getGlyphName, gids ))
list(map(operator.setitem, [cmap]*lenCmap, charCodes, names))
def compile(self, ttFont):
if self.data:
return struct.pack(">HHLLL", self.format, self.reserved, self.length, self.language, self.nGroups) + self.data
charCodes = list(self.cmap.keys())
lenCharCodes = len(charCodes)
names = list(self.cmap.values())
nameMap = ttFont.getReverseGlyphMap()
try:
gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names))
except KeyError:
nameMap = ttFont.getReverseGlyphMap(rebuild=True)
try:
gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names))
except KeyError:
# allow virtual GIDs in format 12 tables
gids = []
for name in names:
try:
gid = nameMap[name]
except KeyError:
try:
if (name[:3] == 'gid'):
gid = eval(name[3:])
else:
gid = ttFont.getGlyphID(name)
except:
raise KeyError(name)
gids.append(gid)
cmap = {} # code:glyphID mapping
list(map(operator.setitem, [cmap]*len(charCodes), charCodes, gids))
charCodes.sort()
index = 0
startCharCode = charCodes[0]
startGlyphID = cmap[startCharCode]
lastGlyphID = startGlyphID - self._format_step
lastCharCode = startCharCode - 1
nGroups = 0
dataList = []
maxIndex = len(charCodes)
for index in range(maxIndex):
charCode = charCodes[index]
glyphID = cmap[charCode]
if not self._IsInSameRun(glyphID, lastGlyphID, charCode, lastCharCode):
dataList.append(struct.pack(">LLL", startCharCode, lastCharCode, startGlyphID))
startCharCode = charCode
startGlyphID = glyphID
nGroups = nGroups + 1
lastGlyphID = glyphID
lastCharCode = charCode
dataList.append(struct.pack(">LLL", startCharCode, lastCharCode, startGlyphID))
nGroups = nGroups + 1
data = bytesjoin(dataList)
lengthSubtable = len(data) +16
assert len(data) == (nGroups*12) == (lengthSubtable-16)
return struct.pack(">HHLLL", self.format, self.reserved, lengthSubtable, self.language, nGroups) + data
def toXML(self, writer, ttFont):
writer.begintag(self.__class__.__name__, [
("platformID", self.platformID),
("platEncID", self.platEncID),
("format", self.format),
("reserved", self.reserved),
("length", self.length),
("language", self.language),
("nGroups", self.nGroups),
])
writer.newline()
codes = sorted(self.cmap.items())
self._writeCodes(codes, writer)
writer.endtag(self.__class__.__name__)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
self.format = safeEval(attrs["format"])
self.reserved = safeEval(attrs["reserved"])
self.length = safeEval(attrs["length"])
self.language = safeEval(attrs["language"])
self.nGroups = safeEval(attrs["nGroups"])
if not hasattr(self, "cmap"):
self.cmap = {}
cmap = self.cmap
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name != "map":
continue
cmap[safeEval(attrs["code"])] = attrs["name"]
class cmap_format_12(cmap_format_12_or_13):
_format_step = 1
def __init__(self, format=12):
cmap_format_12_or_13.__init__(self, format)
def _computeGIDs(self, startingGlyph, numberOfGlyphs):
return list(range(startingGlyph, startingGlyph + numberOfGlyphs))
def _IsInSameRun(self, glyphID, lastGlyphID, charCode, lastCharCode):
return (glyphID == 1 + lastGlyphID) and (charCode == 1 + lastCharCode)
class cmap_format_13(cmap_format_12_or_13):
_format_step = 0
def __init__(self, format=13):
cmap_format_12_or_13.__init__(self, format)
def _computeGIDs(self, startingGlyph, numberOfGlyphs):
return [startingGlyph] * numberOfGlyphs
def _IsInSameRun(self, glyphID, lastGlyphID, charCode, lastCharCode):
return (glyphID == lastGlyphID) and (charCode == 1 + lastCharCode)
def cvtToUVS(threeByteString):
data = b"\0" + threeByteString
val, = struct.unpack(">L", data)
return val
def cvtFromUVS(val):
assert 0 <= val < 0x1000000
fourByteString = struct.pack(">L", val)
return fourByteString[1:]
class cmap_format_14(CmapSubtable):
def decompileHeader(self, data, ttFont):
format, length, numVarSelectorRecords = struct.unpack(">HLL", data[:10])
self.data = data[10:]
self.length = length
self.numVarSelectorRecords = numVarSelectorRecords
self.ttFont = ttFont
self.language = 0xFF # has no language.
def decompile(self, data, ttFont):
if data is not None and ttFont is not None:
self.decompileHeader(data, ttFont)
else:
assert (data is None and ttFont is None), "Need both data and ttFont arguments"
data = self.data
self.cmap = {} # so that clients that expect this to exist in a cmap table won't fail.
uvsDict = {}
recOffset = 0
for n in range(self.numVarSelectorRecords):
uvs, defOVSOffset, nonDefUVSOffset = struct.unpack(">3sLL", data[recOffset:recOffset +11])
recOffset += 11
varUVS = cvtToUVS(uvs)
if defOVSOffset:
startOffset = defOVSOffset - 10
numValues, = struct.unpack(">L", data[startOffset:startOffset+4])
startOffset +=4
for r in range(numValues):
uv, addtlCnt = struct.unpack(">3sB", data[startOffset:startOffset+4])
startOffset += 4
firstBaseUV = cvtToUVS(uv)
cnt = addtlCnt+1
baseUVList = list(range(firstBaseUV, firstBaseUV+cnt))
glyphList = [None]*cnt
localUVList = zip(baseUVList, glyphList)
try:
uvsDict[varUVS].extend(localUVList)
except KeyError:
uvsDict[varUVS] = list(localUVList)
if nonDefUVSOffset:
startOffset = nonDefUVSOffset - 10
numRecs, = struct.unpack(">L", data[startOffset:startOffset+4])
startOffset +=4
localUVList = []
for r in range(numRecs):
uv, gid = struct.unpack(">3sH", data[startOffset:startOffset+5])
startOffset += 5
uv = cvtToUVS(uv)
glyphName = self.ttFont.getGlyphName(gid)
localUVList.append( [uv, glyphName] )
try:
uvsDict[varUVS].extend(localUVList)
except KeyError:
uvsDict[varUVS] = localUVList
self.uvsDict = uvsDict
def toXML(self, writer, ttFont):
writer.begintag(self.__class__.__name__, [
("platformID", self.platformID),
("platEncID", self.platEncID),
("format", self.format),
("length", self.length),
("numVarSelectorRecords", self.numVarSelectorRecords),
])
writer.newline()
uvsDict = self.uvsDict
uvsList = sorted(uvsDict.keys())
for uvs in uvsList:
uvList = uvsDict[uvs]
uvList.sort(key=lambda item: (item[1] is not None, item[0], item[1]))
for uv, gname in uvList:
if gname is None:
gname = "None"
# I use the arg rather than th keyword syntax in order to preserve the attribute order.
writer.simpletag("map", [ ("uvs",hex(uvs)), ("uv",hex(uv)), ("name", gname)] )
writer.newline()
writer.endtag(self.__class__.__name__)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
self.format = safeEval(attrs["format"])
self.length = safeEval(attrs["length"])
self.numVarSelectorRecords = safeEval(attrs["numVarSelectorRecords"])
self.language = 0xFF # provide a value so that CmapSubtable.__lt__() won't fail
if not hasattr(self, "cmap"):
self.cmap = {} # so that clients that expect this to exist in a cmap table won't fail.
if not hasattr(self, "uvsDict"):
self.uvsDict = {}
uvsDict = self.uvsDict
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name != "map":
continue
uvs = safeEval(attrs["uvs"])
uv = safeEval(attrs["uv"])
gname = attrs["name"]
if gname == "None":
gname = None
try:
uvsDict[uvs].append( [uv, gname])
except KeyError:
uvsDict[uvs] = [ [uv, gname] ]
def compile(self, ttFont):
if self.data:
return struct.pack(">HLL", self.format, self.length, self.numVarSelectorRecords) + self.data
uvsDict = self.uvsDict
uvsList = sorted(uvsDict.keys())
self.numVarSelectorRecords = len(uvsList)
offset = 10 + self.numVarSelectorRecords*11 # current value is end of VarSelectorRecords block.
data = []
varSelectorRecords =[]
for uvs in uvsList:
entryList = uvsDict[uvs]
defList = [entry for entry in entryList if entry[1] is None]
if defList:
defList = [entry[0] for entry in defList]
defOVSOffset = offset
defList.sort()
lastUV = defList[0]
cnt = -1
defRecs = []
for defEntry in defList:
cnt +=1
if (lastUV+cnt) != defEntry:
rec = struct.pack(">3sB", cvtFromUVS(lastUV), cnt-1)
lastUV = defEntry
defRecs.append(rec)
cnt = 0
rec = struct.pack(">3sB", cvtFromUVS(lastUV), cnt)
defRecs.append(rec)
numDefRecs = len(defRecs)
data.append(struct.pack(">L", numDefRecs))
data.extend(defRecs)
offset += 4 + numDefRecs*4
else:
defOVSOffset = 0
ndefList = [entry for entry in entryList if entry[1] is not None]
if ndefList:
nonDefUVSOffset = offset
ndefList.sort()
numNonDefRecs = len(ndefList)
data.append(struct.pack(">L", numNonDefRecs))
offset += 4 + numNonDefRecs*5
for uv, gname in ndefList:
gid = ttFont.getGlyphID(gname)
ndrec = struct.pack(">3sH", cvtFromUVS(uv), gid)
data.append(ndrec)
else:
nonDefUVSOffset = 0
vrec = struct.pack(">3sLL", cvtFromUVS(uvs), defOVSOffset, nonDefUVSOffset)
varSelectorRecords.append(vrec)
data = bytesjoin(varSelectorRecords) + bytesjoin(data)
self.length = 10 + len(data)
headerdata = struct.pack(">HLL", self.format, self.length, self.numVarSelectorRecords)
self.data = headerdata + data
return self.data
class cmap_format_unknown(CmapSubtable):
def toXML(self, writer, ttFont):
cmapName = self.__class__.__name__[:12] + str(self.format)
writer.begintag(cmapName, [
("platformID", self.platformID),
("platEncID", self.platEncID),
])
writer.newline()
writer.dumphex(self.data)
writer.endtag(cmapName)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
self.data = readHex(content)
self.cmap = {}
def decompileHeader(self, data, ttFont):
self.language = 0 # dummy value
self.data = data
def decompile(self, data, ttFont):
# we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
# If not, someone is calling the subtable decompile() directly, and must provide both args.
if data is not None and ttFont is not None:
self.decompileHeader(data, ttFont)
else:
assert (data is None and ttFont is None), "Need both data and ttFont arguments"
def compile(self, ttFont):
if self.data:
return self.data
else:
return None
cmap_classes = {
0: cmap_format_0,
2: cmap_format_2,
4: cmap_format_4,
6: cmap_format_6,
12: cmap_format_12,
13: cmap_format_13,
14: cmap_format_14,
}
| 35.289026
| 192
| 0.698888
|
from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
from fontTools.misc.textTools import safeEval, readHex
from fontTools.misc.encodingTools import getEncoding
from fontTools.ttLib import getSearchRange
from fontTools.unicode import Unicode
from . import DefaultTable
import sys
import struct
import array
import operator
class table__c_m_a_p(DefaultTable.DefaultTable):
def getcmap(self, platformID, platEncID):
for subtable in self.tables:
if (subtable.platformID == platformID and
subtable.platEncID == platEncID):
return subtable
return None
def decompile(self, data, ttFont):
tableVersion, numSubTables = struct.unpack(">HH", data[:4])
self.tableVersion = int(tableVersion)
self.tables = tables = []
seenOffsets = {}
for i in range(numSubTables):
platformID, platEncID, offset = struct.unpack(
">HHl", data[4+i*8:4+(i+1)*8])
platformID, platEncID = int(platformID), int(platEncID)
format, length = struct.unpack(">HH", data[offset:offset+4])
if format in [8,10,12,13]:
format, reserved, length = struct.unpack(">HHL", data[offset:offset+8])
elif format in [14]:
format, length = struct.unpack(">HL", data[offset:offset+6])
if not length:
print("Error: cmap subtable is reported as having zero length: platformID %s, platEncID %s, format %s offset %s. Skipping table." % (platformID, platEncID,format, offset))
continue
table = CmapSubtable.newSubtable(format)
table.platformID = platformID
table.platEncID = platEncID
table.decompileHeader(data[offset:offset+int(length)], ttFont)
if offset in seenOffsets:
table.cmap = tables[seenOffsets[offset]].cmap
else:
seenOffsets[offset] = i
tables.append(table)
def compile(self, ttFont):
self.tables.sort()
numSubTables = len(self.tables)
totalOffset = 4 + 8 * numSubTables
data = struct.pack(">HH", self.tableVersion, numSubTables)
tableData = b""
seen = {}
done = {} # Some tables are different objects, but compile to the same data chunk
for table in self.tables:
try:
offset = seen[id(table.cmap)]
except KeyError:
chunk = table.compile(ttFont)
if chunk in done:
offset = done[chunk]
else:
offset = seen[id(table.cmap)] = done[chunk] = totalOffset + len(tableData)
tableData = tableData + chunk
data = data + struct.pack(">HHl", table.platformID, table.platEncID, offset)
return data + tableData
def toXML(self, writer, ttFont):
writer.simpletag("tableVersion", version=self.tableVersion)
writer.newline()
for table in self.tables:
table.toXML(writer, ttFont)
def fromXML(self, name, attrs, content, ttFont):
if name == "tableVersion":
self.tableVersion = safeEval(attrs["version"])
return
if name[:12] != "cmap_format_":
return
if not hasattr(self, "tables"):
self.tables = []
format = safeEval(name[12:])
table = CmapSubtable.newSubtable(format)
table.platformID = safeEval(attrs["platformID"])
table.platEncID = safeEval(attrs["platEncID"])
table.fromXML(name, attrs, content, ttFont)
self.tables.append(table)
class CmapSubtable(object):
@staticmethod
def getSubtableClass(format):
return cmap_classes.get(format, cmap_format_unknown)
@staticmethod
def newSubtable(format):
subtableClass = CmapSubtable.getSubtableClass(format)
return subtableClass(format)
def __init__(self, format):
self.format = format
self.data = None
self.ttFont = None
def __getattr__(self, attr):
# allow lazy decompilation of subtables.
if attr[:2] == '__': # don't handle requests for member functions like '__lt__'
raise AttributeError(attr)
if self.data is None:
raise AttributeError(attr)
self.decompile(None, None)
self.data = None
# just return the original data. Also avoids recursion when
# called with an attribute that the cmap subtable doesn't have.
return getattr(self, attr)
def decompileHeader(self, data, ttFont):
format, length, language = struct.unpack(">HHH", data[:6])
assert len(data) == length, "corrupt cmap table format %d (data length: %d, header length: %d)" % (format, len(data), length)
self.format = int(format)
self.length = int(length)
self.language = int(language)
self.data = data[6:]
self.ttFont = ttFont
def toXML(self, writer, ttFont):
writer.begintag(self.__class__.__name__, [
("platformID", self.platformID),
("platEncID", self.platEncID),
("language", self.language),
])
writer.newline()
codes = sorted(self.cmap.items())
self._writeCodes(codes, writer)
writer.endtag(self.__class__.__name__)
writer.newline()
def getEncoding(self, default=None):
return getEncoding(self.platformID, self.platEncID, self.language, default)
def isUnicode(self):
return (self.platformID == 0 or
(self.platformID == 3 and self.platEncID in [0, 1, 10]))
def isSymbol(self):
return self.platformID == 3 and self.platEncID == 0
def _writeCodes(self, codes, writer):
isUnicode = self.isUnicode()
for code, name in codes:
writer.simpletag("map", code=hex(code), name=name)
if isUnicode:
writer.comment(Unicode[code])
writer.newline()
def __lt__(self, other):
if not isinstance(other, CmapSubtable):
return NotImplemented
selfTuple = (
getattr(self, "platformID", None),
getattr(self, "platEncID", None),
getattr(self, "language", None),
self.__dict__)
otherTuple = (
getattr(other, "platformID", None),
getattr(other, "platEncID", None),
getattr(other, "language", None),
other.__dict__)
return selfTuple < otherTuple
class cmap_format_0(CmapSubtable):
def decompile(self, data, ttFont):
if data is not None and ttFont is not None:
self.decompileHeader(data, ttFont)
else:
assert (data is None and ttFont is None), "Need both data and ttFont arguments"
data = self.data
assert 262 == self.length, "Format 0 cmap subtable not 262 bytes"
glyphIdArray = array.array("B")
glyphIdArray.fromstring(self.data)
self.cmap = cmap = {}
lenArray = len(glyphIdArray)
charCodes = list(range(lenArray))
names = map(self.ttFont.getGlyphName, glyphIdArray)
list(map(operator.setitem, [cmap]*lenArray, charCodes, names))
def compile(self, ttFont):
if self.data:
return struct.pack(">HHH", 0, 262, self.language) + self.data
charCodeList = sorted(self.cmap.items())
charCodes = [entry[0] for entry in charCodeList]
valueList = [entry[1] for entry in charCodeList]
assert charCodes == list(range(256))
valueList = map(ttFont.getGlyphID, valueList)
glyphIdArray = array.array("B", valueList)
data = struct.pack(">HHH", 0, 262, self.language) + glyphIdArray.tostring()
assert len(data) == 262
return data
def fromXML(self, name, attrs, content, ttFont):
self.language = safeEval(attrs["language"])
if not hasattr(self, "cmap"):
self.cmap = {}
cmap = self.cmap
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name != "map":
continue
cmap[safeEval(attrs["code"])] = attrs["name"]
subHeaderFormat = ">HHhH"
class SubHeader(object):
def __init__(self):
self.firstCode = None
self.entryCount = None
self.idDelta = None
self.idRangeOffset = None
self.glyphIndexArray = []
class cmap_format_2(CmapSubtable):
def setIDDelta(self, subHeader):
subHeader.idDelta = 0
minGI = subHeader.glyphIndexArray[0]
for gid in subHeader.glyphIndexArray:
if (gid != 0) and (gid < minGI):
minGI = gid
if (minGI > 1):
if minGI > 0x7FFF:
subHeader.idDelta = -(0x10000 - minGI) -1
else:
subHeader.idDelta = minGI -1
idDelta = subHeader.idDelta
for i in range(subHeader.entryCount):
gid = subHeader.glyphIndexArray[i]
if gid > 0:
subHeader.glyphIndexArray[i] = gid - idDelta
def decompile(self, data, ttFont):
if data is not None and ttFont is not None:
self.decompileHeader(data, ttFont)
else:
assert (data is None and ttFont is None), "Need both data and ttFont arguments"
data = self.data
subHeaderKeys = []
maxSubHeaderindex = 0
allKeys = array.array("H")
allKeys.fromstring(data[:512])
data = data[512:]
if sys.byteorder != "big":
allKeys.byteswap()
subHeaderKeys = [ key//8 for key in allKeys]
maxSubHeaderindex = max(subHeaderKeys)
subHeaderList = []
pos = 0
for i in range(maxSubHeaderindex + 1):
subHeader = SubHeader()
(subHeader.firstCode, subHeader.entryCount, subHeader.idDelta, \
subHeader.idRangeOffset) = struct.unpack(subHeaderFormat, data[pos:pos + 8])
pos += 8
giDataPos = pos + subHeader.idRangeOffset-2
giList = array.array("H")
giList.fromstring(data[giDataPos:giDataPos + subHeader.entryCount*2])
if sys.byteorder != "big":
giList.byteswap()
subHeader.glyphIndexArray = giList
subHeaderList.append(subHeader)
self.data = b""
self.cmap = cmap = {}
notdefGI = 0
for firstByte in range(256):
subHeadindex = subHeaderKeys[firstByte]
subHeader = subHeaderList[subHeadindex]
if subHeadindex == 0:
if (firstByte < subHeader.firstCode) or (firstByte >= subHeader.firstCode + subHeader.entryCount):
continue
else:
charCode = firstByte
offsetIndex = firstByte - subHeader.firstCode
gi = subHeader.glyphIndexArray[offsetIndex]
if gi != 0:
gi = (gi + subHeader.idDelta) % 0x10000
else:
continue
cmap[charCode] = gi
else:
if subHeader.entryCount:
charCodeOffset = firstByte * 256 + subHeader.firstCode
for offsetIndex in range(subHeader.entryCount):
charCode = charCodeOffset + offsetIndex
gi = subHeader.glyphIndexArray[offsetIndex]
if gi != 0:
gi = (gi + subHeader.idDelta) % 0x10000
else:
continue
cmap[charCode] = gi
glyphOrder = self.ttFont.getGlyphOrder()
gids = list(cmap.values())
charCodes = list(cmap.keys())
lenCmap = len(gids)
try:
names = list(map(operator.getitem, [glyphOrder]*lenCmap, gids ))
except IndexError:
getGlyphName = self.ttFont.getGlyphName
names = list(map(getGlyphName, gids ))
list(map(operator.setitem, [cmap]*lenCmap, charCodes, names))
def compile(self, ttFont):
if self.data:
return struct.pack(">HHH", self.format, self.length, self.language) + self.data
kEmptyTwoCharCodeRange = -1
notdefGI = 0
items = sorted(self.cmap.items())
charCodes = [item[0] for item in items]
names = [item[1] for item in items]
nameMap = ttFont.getReverseGlyphMap()
lenCharCodes = len(charCodes)
try:
gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names))
except KeyError:
nameMap = ttFont.getReverseGlyphMap(rebuild=True)
try:
gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names))
except KeyError:
# allow virtual GIDs in format 2 tables
gids = []
for name in names:
try:
gid = nameMap[name]
except KeyError:
try:
if (name[:3] == 'gid'):
gid = eval(name[3:])
else:
gid = ttFont.getGlyphID(name)
except:
raise KeyError(name)
gids.append(gid)
# Process the (char code to gid) item list in char code order.
# By definition, all one byte char codes map to subheader 0.
# For all the two byte char codes, we assume that the first byte maps maps to the empty subhead (with an entry count of 0,
# which defines all char codes in its range to map to notdef) unless proven otherwise.
# Note that since the char code items are processed in char code order, all the char codes with the
# same first byte are in sequential order.
subHeaderKeys = [ kEmptyTwoCharCodeRange for x in range(256)] # list of indices into subHeaderList.
subHeaderList = []
# We force this subheader entry 0 to exist in the subHeaderList in the case where some one comes up
# with a cmap where all the one byte char codes map to notdef,
# with the result that the subhead 0 would not get created just by processing the item list.
charCode = charCodes[0]
if charCode > 255:
subHeader = SubHeader()
subHeader.firstCode = 0
subHeader.entryCount = 0
subHeader.idDelta = 0
subHeader.idRangeOffset = 0
subHeaderList.append(subHeader)
lastFirstByte = -1
items = zip(charCodes, gids)
for charCode, gid in items:
if gid == 0:
continue
firstbyte = charCode >> 8
secondByte = charCode & 0x00FF
if firstbyte != lastFirstByte: # Need to update the current subhead, and start a new one.
if lastFirstByte > -1:
# fix GI's and iDelta of current subheader.
self.setIDDelta(subHeader)
if lastFirstByte == 0:
for index in range(subHeader.entryCount):
charCode = subHeader.firstCode + index
subHeaderKeys[charCode] = 0
assert (subHeader.entryCount == len(subHeader.glyphIndexArray)), "Error - subhead entry count does not match len of glyphID subrange."
subHeader = SubHeader()
subHeader.firstCode = secondByte
subHeader.entryCount = 1
subHeader.glyphIndexArray.append(gid)
subHeaderList.append(subHeader)
subHeaderKeys[firstbyte] = len(subHeaderList) -1
lastFirstByte = firstbyte
else:
codeDiff = secondByte - (subHeader.firstCode + subHeader.entryCount)
for i in range(codeDiff):
subHeader.glyphIndexArray.append(notdefGI)
subHeader.glyphIndexArray.append(gid)
subHeader.entryCount = subHeader.entryCount + codeDiff + 1
self.setIDDelta(subHeader)
# Now we add a final subheader for the subHeaderKeys which maps to empty two byte charcode ranges.
subHeader = SubHeader()
subHeader.firstCode = 0
subHeader.entryCount = 0
subHeader.idDelta = 0
subHeader.idRangeOffset = 2
subHeaderList.append(subHeader)
emptySubheadIndex = len(subHeaderList) - 1
for index in range(256):
if subHeaderKeys[index] == kEmptyTwoCharCodeRange:
subHeaderKeys[index] = emptySubheadIndex
# Since this is the last subheader, the GlyphIndex Array starts two bytes after the start of the
# idRangeOffset word of this subHeader. We can safely point to the first entry in the GlyphIndexArray,
# since the first subrange of the GlyphIndexArray is for subHeader 0, which always starts with
# charcode 0 and GID 0.
idRangeOffset = (len(subHeaderList)-1)*8 + 2 # offset to beginning of glyphIDArray from first subheader idRangeOffset.
subheadRangeLen = len(subHeaderList) -1 # skip last special empty-set subheader; we've already hardocodes its idRangeOffset to 2.
for index in range(subheadRangeLen):
subHeader = subHeaderList[index]
subHeader.idRangeOffset = 0
for j in range(index):
prevSubhead = subHeaderList[j]
if prevSubhead.glyphIndexArray == subHeader.glyphIndexArray:
subHeader.idRangeOffset = prevSubhead.idRangeOffset - (index-j)*8
subHeader.glyphIndexArray = []
break
if subHeader.idRangeOffset == 0:
subHeader.idRangeOffset = idRangeOffset
idRangeOffset = (idRangeOffset - 8) + subHeader.entryCount*2 # one less subheader, one more subArray.
else:
idRangeOffset = idRangeOffset - 8 # one less subheader
# Now we can write out the data!
length = 6 + 512 + 8*len(subHeaderList) # header, 256 subHeaderKeys, and subheader array.
for subhead in subHeaderList[:-1]:
length = length + len(subhead.glyphIndexArray)*2 # We can't use subhead.entryCount, as some of the subhead may share subArrays.
dataList = [struct.pack(">HHH", 2, length, self.language)]
for index in subHeaderKeys:
dataList.append(struct.pack(">H", index*8))
for subhead in subHeaderList:
dataList.append(struct.pack(subHeaderFormat, subhead.firstCode, subhead.entryCount, subhead.idDelta, subhead.idRangeOffset))
for subhead in subHeaderList[:-1]:
for gi in subhead.glyphIndexArray:
dataList.append(struct.pack(">H", gi))
data = bytesjoin(dataList)
assert (len(data) == length), "Error: cmap format 2 is not same length as calculated! actual: " + str(len(data))+ " calc : " + str(length)
return data
def fromXML(self, name, attrs, content, ttFont):
self.language = safeEval(attrs["language"])
if not hasattr(self, "cmap"):
self.cmap = {}
cmap = self.cmap
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name != "map":
continue
cmap[safeEval(attrs["code"])] = attrs["name"]
cmap_format_4_format = ">7H"
(startCode + 1, endCode + 1):
glyphID = cmap[code]
if glyphID - 1 == lastID:
if inOrder is None or not inOrder:
inOrder = 1
orderedBegin = lastCode
else:
if inOrder:
inOrder = 0
subRanges.append((orderedBegin, lastCode))
orderedBegin = None
lastID = glyphID
lastCode = code
if inOrder:
subRanges.append((orderedBegin, lastCode))
assert lastCode == endCode
newRanges = []
for b, e in subRanges:
if b == startCode and e == endCode:
break
if b == startCode or e == endCode:
threshold = 4 # split costs one more segment
else:
threshold = 8 # split costs two more segments
if (e - b + 1) > threshold:
newRanges.append((b, e))
subRanges = newRanges
if not subRanges:
return [], [endCode]
if subRanges[0][0] != startCode:
subRanges.insert(0, (startCode, subRanges[0][0] - 1))
if subRanges[-1][1] != endCode:
subRanges.append((subRanges[-1][1] + 1, endCode))
# Fill the "holes" in the segments list -- those are the segments in which
# the glyph IDs are _not_ consecutive.
i = 1
while i < len(subRanges):
if subRanges[i-1][1] + 1 != subRanges[i][0]:
subRanges.insert(i, (subRanges[i-1][1] + 1, subRanges[i][0] - 1))
i = i + 1
i = i + 1
# Transform the ranges into startCode/endCode lists.
start = []
end = []
for b, e in subRanges:
start.append(b)
end.append(e)
start.pop(0)
assert len(start) + 1 == len(end)
return start, end
class cmap_format_4(CmapSubtable):
def decompile(self, data, ttFont):
# we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
# If not, someone is calling the subtable decompile() directly, and must provide both args.
if data is not None and ttFont is not None:
self.decompileHeader(data, ttFont)
else:
assert (data is None and ttFont is None), "Need both data and ttFont arguments"
data = self.data # decompileHeader assigns the data after the header to self.data
(segCountX2, searchRange, entrySelector, rangeShift) = \
struct.unpack(">4H", data[:8])
data = data[8:]
segCount = segCountX2 // 2
allCodes = array.array("H")
allCodes.fromstring(data)
self.data = data = None
if sys.byteorder != "big":
allCodes.byteswap()
# divide the data
endCode = allCodes[:segCount]
allCodes = allCodes[segCount+1:] # the +1 is skipping the reservedPad field
startCode = allCodes[:segCount]
allCodes = allCodes[segCount:]
idDelta = allCodes[:segCount]
allCodes = allCodes[segCount:]
idRangeOffset = allCodes[:segCount]
glyphIndexArray = allCodes[segCount:]
lenGIArray = len(glyphIndexArray)
# build 2-byte character mapping
charCodes = []
gids = []
for i in range(len(startCode) - 1): # don't do 0xffff!
start = startCode[i]
delta = idDelta[i]
rangeOffset = idRangeOffset[i]
partial = rangeOffset // 2 - start + i - len(idRangeOffset)
rangeCharCodes = list(range(startCode[i], endCode[i] + 1))
charCodes.extend(rangeCharCodes)
if rangeOffset == 0:
gids.extend([(charCode + delta) & 0xFFFF for charCode in rangeCharCodes])
else:
for charCode in rangeCharCodes:
index = charCode + partial
assert (index < lenGIArray), "In format 4 cmap, range (%d), the calculated index (%d) into the glyph index array is not less than the length of the array (%d) !" % (i, index, lenGIArray)
if glyphIndexArray[index] != 0:
glyphID = glyphIndexArray[index] + delta
else:
glyphID = 0
gids.append(glyphID & 0xFFFF)
self.cmap = cmap = {}
lenCmap = len(gids)
glyphOrder = self.ttFont.getGlyphOrder()
try:
names = list(map(operator.getitem, [glyphOrder]*lenCmap, gids ))
except IndexError:
getGlyphName = self.ttFont.getGlyphName
names = list(map(getGlyphName, gids ))
list(map(operator.setitem, [cmap]*lenCmap, charCodes, names))
def compile(self, ttFont):
if self.data:
return struct.pack(">HHH", self.format, self.length, self.language) + self.data
charCodes = list(self.cmap.keys())
lenCharCodes = len(charCodes)
if lenCharCodes == 0:
startCode = [0xffff]
endCode = [0xffff]
else:
charCodes.sort()
names = list(map(operator.getitem, [self.cmap]*lenCharCodes, charCodes))
nameMap = ttFont.getReverseGlyphMap()
try:
gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names))
except KeyError:
nameMap = ttFont.getReverseGlyphMap(rebuild=True)
try:
gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names))
except KeyError:
gids = []
for name in names:
try:
gid = nameMap[name]
except KeyError:
try:
if (name[:3] == 'gid'):
gid = eval(name[3:])
else:
gid = ttFont.getGlyphID(name)
except:
raise KeyError(name)
gids.append(gid)
cmap = {}
list(map(operator.setitem, [cmap]*len(charCodes), charCodes, gids))
lastCode = charCodes[0]
endCode = []
startCode = [lastCode]
for charCode in charCodes[1:]:
if charCode == lastCode + 1:
lastCode = charCode
continue
start, end = splitRange(startCode[-1], lastCode, cmap)
startCode.extend(start)
endCode.extend(end)
startCode.append(charCode)
lastCode = charCode
start, end = splitRange(startCode[-1], lastCode, cmap)
startCode.extend(start)
endCode.extend(end)
startCode.append(0xffff)
endCode.append(0xffff)
# build up rest of cruft
idDelta = []
idRangeOffset = []
glyphIndexArray = []
for i in range(len(endCode)-1): # skip the closing codes (0xffff)
indices = []
for charCode in range(startCode[i], endCode[i] + 1):
indices.append(cmap[charCode])
if (indices == list(range(indices[0], indices[0] + len(indices)))):
idDelta.append((indices[0] - startCode[i]) % 0x10000)
idRangeOffset.append(0)
else:
# someone *definitely* needs to get killed.
idDelta.append(0)
idRangeOffset.append(2 * (len(endCode) + len(glyphIndexArray) - i))
glyphIndexArray.extend(indices)
idDelta.append(1) # 0xffff + 1 == (tadaa!) 0. So this end code maps to .notdef
idRangeOffset.append(0)
# Insane.
segCount = len(endCode)
segCountX2 = segCount * 2
searchRange, entrySelector, rangeShift = getSearchRange(segCount, 2)
charCodeArray = array.array("H", endCode + [0] + startCode)
idDeltaArray = array.array("H", idDelta)
restArray = array.array("H", idRangeOffset + glyphIndexArray)
if sys.byteorder != "big":
charCodeArray.byteswap()
idDeltaArray.byteswap()
restArray.byteswap()
data = charCodeArray.tostring() + idDeltaArray.tostring() + restArray.tostring()
length = struct.calcsize(cmap_format_4_format) + len(data)
header = struct.pack(cmap_format_4_format, self.format, length, self.language,
segCountX2, searchRange, entrySelector, rangeShift)
return header + data
def fromXML(self, name, attrs, content, ttFont):
self.language = safeEval(attrs["language"])
if not hasattr(self, "cmap"):
self.cmap = {}
cmap = self.cmap
for element in content:
if not isinstance(element, tuple):
continue
nameMap, attrsMap, dummyContent = element
if nameMap != "map":
assert 0, "Unrecognized keyword in cmap subtable"
cmap[safeEval(attrsMap["code"])] = attrsMap["name"]
class cmap_format_6(CmapSubtable):
def decompile(self, data, ttFont):
# we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
# If not, someone is calling the subtable decompile() directly, and must provide both args.
if data is not None and ttFont is not None:
self.decompileHeader(data, ttFont)
else:
assert (data is None and ttFont is None), "Need both data and ttFont arguments"
data = self.data # decompileHeader assigns the data after the header to self.data
firstCode, entryCount = struct.unpack(">HH", data[:4])
firstCode = int(firstCode)
data = data[4:]
#assert len(data) == 2 * entryCount # XXX not true in Apple's Helvetica!!!
glyphIndexArray = array.array("H")
glyphIndexArray.fromstring(data[:2 * int(entryCount)])
if sys.byteorder != "big":
glyphIndexArray.byteswap()
self.data = data = None
self.cmap = cmap = {}
lenArray = len(glyphIndexArray)
charCodes = list(range(firstCode, firstCode + lenArray))
glyphOrder = self.ttFont.getGlyphOrder()
try:
names = list(map(operator.getitem, [glyphOrder]*lenArray, glyphIndexArray ))
except IndexError:
getGlyphName = self.ttFont.getGlyphName
names = list(map(getGlyphName, glyphIndexArray ))
list(map(operator.setitem, [cmap]*lenArray, charCodes, names))
def compile(self, ttFont):
if self.data:
return struct.pack(">HHH", self.format, self.length, self.language) + self.data
cmap = self.cmap
codes = list(cmap.keys())
if codes:
codes = list(range(codes[0], codes[-1] + 1))
firstCode = codes[0]
valueList = [cmap.get(code, ".notdef") for code in codes]
valueList = map(ttFont.getGlyphID, valueList)
glyphIndexArray = array.array("H", valueList)
if sys.byteorder != "big":
glyphIndexArray.byteswap()
data = glyphIndexArray.tostring()
else:
data = b""
firstCode = 0
header = struct.pack(">HHHHH",
6, len(data) + 10, self.language, firstCode, len(codes))
return header + data
def fromXML(self, name, attrs, content, ttFont):
self.language = safeEval(attrs["language"])
if not hasattr(self, "cmap"):
self.cmap = {}
cmap = self.cmap
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name != "map":
continue
cmap[safeEval(attrs["code"])] = attrs["name"]
class cmap_format_12_or_13(CmapSubtable):
def __init__(self, format):
self.format = format
self.reserved = 0
self.data = None
self.ttFont = None
def decompileHeader(self, data, ttFont):
format, reserved, length, language, nGroups = struct.unpack(">HHLLL", data[:16])
assert len(data) == (16 + nGroups*12) == (length), "corrupt cmap table format %d (data length: %d, header length: %d)" % (self.format, len(data), length)
self.format = format
self.reserved = reserved
self.length = length
self.language = language
self.nGroups = nGroups
self.data = data[16:]
self.ttFont = ttFont
def decompile(self, data, ttFont):
if data is not None and ttFont is not None:
self.decompileHeader(data, ttFont)
else:
assert (data is None and ttFont is None), "Need both data and ttFont arguments"
data = self.data
charCodes = []
gids = []
pos = 0
for i in range(self.nGroups):
startCharCode, endCharCode, glyphID = struct.unpack(">LLL",data[pos:pos+12] )
pos += 12
lenGroup = 1 + endCharCode - startCharCode
charCodes.extend(list(range(startCharCode, endCharCode +1)))
gids.extend(self._computeGIDs(glyphID, lenGroup))
self.data = data = None
self.cmap = cmap = {}
lenCmap = len(gids)
glyphOrder = self.ttFont.getGlyphOrder()
try:
names = list(map(operator.getitem, [glyphOrder]*lenCmap, gids ))
except IndexError:
getGlyphName = self.ttFont.getGlyphName
names = list(map(getGlyphName, gids ))
list(map(operator.setitem, [cmap]*lenCmap, charCodes, names))
def compile(self, ttFont):
if self.data:
return struct.pack(">HHLLL", self.format, self.reserved, self.length, self.language, self.nGroups) + self.data
charCodes = list(self.cmap.keys())
lenCharCodes = len(charCodes)
names = list(self.cmap.values())
nameMap = ttFont.getReverseGlyphMap()
try:
gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names))
except KeyError:
nameMap = ttFont.getReverseGlyphMap(rebuild=True)
try:
gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names))
except KeyError:
gids = []
for name in names:
try:
gid = nameMap[name]
except KeyError:
try:
if (name[:3] == 'gid'):
gid = eval(name[3:])
else:
gid = ttFont.getGlyphID(name)
except:
raise KeyError(name)
gids.append(gid)
cmap = {}
list(map(operator.setitem, [cmap]*len(charCodes), charCodes, gids))
charCodes.sort()
index = 0
startCharCode = charCodes[0]
startGlyphID = cmap[startCharCode]
lastGlyphID = startGlyphID - self._format_step
lastCharCode = startCharCode - 1
nGroups = 0
dataList = []
maxIndex = len(charCodes)
for index in range(maxIndex):
charCode = charCodes[index]
glyphID = cmap[charCode]
if not self._IsInSameRun(glyphID, lastGlyphID, charCode, lastCharCode):
dataList.append(struct.pack(">LLL", startCharCode, lastCharCode, startGlyphID))
startCharCode = charCode
startGlyphID = glyphID
nGroups = nGroups + 1
lastGlyphID = glyphID
lastCharCode = charCode
dataList.append(struct.pack(">LLL", startCharCode, lastCharCode, startGlyphID))
nGroups = nGroups + 1
data = bytesjoin(dataList)
lengthSubtable = len(data) +16
assert len(data) == (nGroups*12) == (lengthSubtable-16)
return struct.pack(">HHLLL", self.format, self.reserved, lengthSubtable, self.language, nGroups) + data
def toXML(self, writer, ttFont):
writer.begintag(self.__class__.__name__, [
("platformID", self.platformID),
("platEncID", self.platEncID),
("format", self.format),
("reserved", self.reserved),
("length", self.length),
("language", self.language),
("nGroups", self.nGroups),
])
writer.newline()
codes = sorted(self.cmap.items())
self._writeCodes(codes, writer)
writer.endtag(self.__class__.__name__)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
self.format = safeEval(attrs["format"])
self.reserved = safeEval(attrs["reserved"])
self.length = safeEval(attrs["length"])
self.language = safeEval(attrs["language"])
self.nGroups = safeEval(attrs["nGroups"])
if not hasattr(self, "cmap"):
self.cmap = {}
cmap = self.cmap
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name != "map":
continue
cmap[safeEval(attrs["code"])] = attrs["name"]
class cmap_format_12(cmap_format_12_or_13):
_format_step = 1
def __init__(self, format=12):
cmap_format_12_or_13.__init__(self, format)
def _computeGIDs(self, startingGlyph, numberOfGlyphs):
return list(range(startingGlyph, startingGlyph + numberOfGlyphs))
def _IsInSameRun(self, glyphID, lastGlyphID, charCode, lastCharCode):
return (glyphID == 1 + lastGlyphID) and (charCode == 1 + lastCharCode)
class cmap_format_13(cmap_format_12_or_13):
_format_step = 0
def __init__(self, format=13):
cmap_format_12_or_13.__init__(self, format)
def _computeGIDs(self, startingGlyph, numberOfGlyphs):
return [startingGlyph] * numberOfGlyphs
def _IsInSameRun(self, glyphID, lastGlyphID, charCode, lastCharCode):
return (glyphID == lastGlyphID) and (charCode == 1 + lastCharCode)
def cvtToUVS(threeByteString):
data = b"\0" + threeByteString
val, = struct.unpack(">L", data)
return val
def cvtFromUVS(val):
assert 0 <= val < 0x1000000
fourByteString = struct.pack(">L", val)
return fourByteString[1:]
class cmap_format_14(CmapSubtable):
def decompileHeader(self, data, ttFont):
format, length, numVarSelectorRecords = struct.unpack(">HLL", data[:10])
self.data = data[10:]
self.length = length
self.numVarSelectorRecords = numVarSelectorRecords
self.ttFont = ttFont
self.language = 0xFF
def decompile(self, data, ttFont):
if data is not None and ttFont is not None:
self.decompileHeader(data, ttFont)
else:
assert (data is None and ttFont is None), "Need both data and ttFont arguments"
data = self.data
self.cmap = {}
uvsDict = {}
recOffset = 0
for n in range(self.numVarSelectorRecords):
uvs, defOVSOffset, nonDefUVSOffset = struct.unpack(">3sLL", data[recOffset:recOffset +11])
recOffset += 11
varUVS = cvtToUVS(uvs)
if defOVSOffset:
startOffset = defOVSOffset - 10
numValues, = struct.unpack(">L", data[startOffset:startOffset+4])
startOffset +=4
for r in range(numValues):
uv, addtlCnt = struct.unpack(">3sB", data[startOffset:startOffset+4])
startOffset += 4
firstBaseUV = cvtToUVS(uv)
cnt = addtlCnt+1
baseUVList = list(range(firstBaseUV, firstBaseUV+cnt))
glyphList = [None]*cnt
localUVList = zip(baseUVList, glyphList)
try:
uvsDict[varUVS].extend(localUVList)
except KeyError:
uvsDict[varUVS] = list(localUVList)
if nonDefUVSOffset:
startOffset = nonDefUVSOffset - 10
numRecs, = struct.unpack(">L", data[startOffset:startOffset+4])
startOffset +=4
localUVList = []
for r in range(numRecs):
uv, gid = struct.unpack(">3sH", data[startOffset:startOffset+5])
startOffset += 5
uv = cvtToUVS(uv)
glyphName = self.ttFont.getGlyphName(gid)
localUVList.append( [uv, glyphName] )
try:
uvsDict[varUVS].extend(localUVList)
except KeyError:
uvsDict[varUVS] = localUVList
self.uvsDict = uvsDict
def toXML(self, writer, ttFont):
writer.begintag(self.__class__.__name__, [
("platformID", self.platformID),
("platEncID", self.platEncID),
("format", self.format),
("length", self.length),
("numVarSelectorRecords", self.numVarSelectorRecords),
])
writer.newline()
uvsDict = self.uvsDict
uvsList = sorted(uvsDict.keys())
for uvs in uvsList:
uvList = uvsDict[uvs]
uvList.sort(key=lambda item: (item[1] is not None, item[0], item[1]))
for uv, gname in uvList:
if gname is None:
gname = "None"
# I use the arg rather than th keyword syntax in order to preserve the attribute order.
writer.simpletag("map", [ ("uvs",hex(uvs)), ("uv",hex(uv)), ("name", gname)] )
writer.newline()
writer.endtag(self.__class__.__name__)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
self.format = safeEval(attrs["format"])
self.length = safeEval(attrs["length"])
self.numVarSelectorRecords = safeEval(attrs["numVarSelectorRecords"])
self.language = 0xFF # provide a value so that CmapSubtable.__lt__() won't fail
if not hasattr(self, "cmap"):
self.cmap = {}
if not hasattr(self, "uvsDict"):
self.uvsDict = {}
uvsDict = self.uvsDict
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name != "map":
continue
uvs = safeEval(attrs["uvs"])
uv = safeEval(attrs["uv"])
gname = attrs["name"]
if gname == "None":
gname = None
try:
uvsDict[uvs].append( [uv, gname])
except KeyError:
uvsDict[uvs] = [ [uv, gname] ]
def compile(self, ttFont):
if self.data:
return struct.pack(">HLL", self.format, self.length, self.numVarSelectorRecords) + self.data
uvsDict = self.uvsDict
uvsList = sorted(uvsDict.keys())
self.numVarSelectorRecords = len(uvsList)
offset = 10 + self.numVarSelectorRecords*11 # current value is end of VarSelectorRecords block.
data = []
varSelectorRecords =[]
for uvs in uvsList:
entryList = uvsDict[uvs]
defList = [entry for entry in entryList if entry[1] is None]
if defList:
defList = [entry[0] for entry in defList]
defOVSOffset = offset
defList.sort()
lastUV = defList[0]
cnt = -1
defRecs = []
for defEntry in defList:
cnt +=1
if (lastUV+cnt) != defEntry:
rec = struct.pack(">3sB", cvtFromUVS(lastUV), cnt-1)
lastUV = defEntry
defRecs.append(rec)
cnt = 0
rec = struct.pack(">3sB", cvtFromUVS(lastUV), cnt)
defRecs.append(rec)
numDefRecs = len(defRecs)
data.append(struct.pack(">L", numDefRecs))
data.extend(defRecs)
offset += 4 + numDefRecs*4
else:
defOVSOffset = 0
ndefList = [entry for entry in entryList if entry[1] is not None]
if ndefList:
nonDefUVSOffset = offset
ndefList.sort()
numNonDefRecs = len(ndefList)
data.append(struct.pack(">L", numNonDefRecs))
offset += 4 + numNonDefRecs*5
for uv, gname in ndefList:
gid = ttFont.getGlyphID(gname)
ndrec = struct.pack(">3sH", cvtFromUVS(uv), gid)
data.append(ndrec)
else:
nonDefUVSOffset = 0
vrec = struct.pack(">3sLL", cvtFromUVS(uvs), defOVSOffset, nonDefUVSOffset)
varSelectorRecords.append(vrec)
data = bytesjoin(varSelectorRecords) + bytesjoin(data)
self.length = 10 + len(data)
headerdata = struct.pack(">HLL", self.format, self.length, self.numVarSelectorRecords)
self.data = headerdata + data
return self.data
class cmap_format_unknown(CmapSubtable):
def toXML(self, writer, ttFont):
cmapName = self.__class__.__name__[:12] + str(self.format)
writer.begintag(cmapName, [
("platformID", self.platformID),
("platEncID", self.platEncID),
])
writer.newline()
writer.dumphex(self.data)
writer.endtag(cmapName)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
self.data = readHex(content)
self.cmap = {}
def decompileHeader(self, data, ttFont):
self.language = 0 # dummy value
self.data = data
def decompile(self, data, ttFont):
# we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
# If not, someone is calling the subtable decompile() directly, and must provide both args.
if data is not None and ttFont is not None:
self.decompileHeader(data, ttFont)
else:
assert (data is None and ttFont is None), "Need both data and ttFont arguments"
def compile(self, ttFont):
if self.data:
return self.data
else:
return None
cmap_classes = {
0: cmap_format_0,
2: cmap_format_2,
4: cmap_format_4,
6: cmap_format_6,
12: cmap_format_12,
13: cmap_format_13,
14: cmap_format_14,
}
| true
| true
|
790d4dc332f8c6d87a412245bf6606b6e879871c
| 1,282
|
py
|
Python
|
tethys_datasets/migrations/0003_spatialdatasetservice.py
|
CI-WATER/django-tethys_datasets
|
504963a720693931a1fa1a899d5492548672216f
|
[
"BSD-2-Clause"
] | null | null | null |
tethys_datasets/migrations/0003_spatialdatasetservice.py
|
CI-WATER/django-tethys_datasets
|
504963a720693931a1fa1a899d5492548672216f
|
[
"BSD-2-Clause"
] | null | null | null |
tethys_datasets/migrations/0003_spatialdatasetservice.py
|
CI-WATER/django-tethys_datasets
|
504963a720693931a1fa1a899d5492548672216f
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('tethys_datasets', '0002_auto_20150119_1756'),
]
operations = [
migrations.CreateModel(
name='SpatialDatasetService',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=30)),
('engine', models.CharField(default=b'tethys_dataset_services.engines.GeoServerSpatialDatasetEngine', max_length=200, choices=[(b'tethys_dataset_services.engines.GeoServerSpatialDatasetEngine', b'GeoServer')])),
('endpoint', models.CharField(max_length=1024)),
('apikey', models.CharField(max_length=100, blank=True)),
('username', models.CharField(max_length=100, blank=True)),
('password', models.CharField(max_length=100, blank=True)),
],
options={
'verbose_name': 'Spatial Dataset Service',
'verbose_name_plural': 'Spatial Dataset Services',
},
bases=(models.Model,),
),
]
| 40.0625
| 227
| 0.613885
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('tethys_datasets', '0002_auto_20150119_1756'),
]
operations = [
migrations.CreateModel(
name='SpatialDatasetService',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=30)),
('engine', models.CharField(default=b'tethys_dataset_services.engines.GeoServerSpatialDatasetEngine', max_length=200, choices=[(b'tethys_dataset_services.engines.GeoServerSpatialDatasetEngine', b'GeoServer')])),
('endpoint', models.CharField(max_length=1024)),
('apikey', models.CharField(max_length=100, blank=True)),
('username', models.CharField(max_length=100, blank=True)),
('password', models.CharField(max_length=100, blank=True)),
],
options={
'verbose_name': 'Spatial Dataset Service',
'verbose_name_plural': 'Spatial Dataset Services',
},
bases=(models.Model,),
),
]
| true
| true
|
790d4dda347c97ee25f917fd744920903b8f9127
| 17,962
|
py
|
Python
|
src/webparse.py
|
neilchristanto/ValDashboard
|
d62d04020081c114c67d80e52726ad827a180ba0
|
[
"MIT"
] | null | null | null |
src/webparse.py
|
neilchristanto/ValDashboard
|
d62d04020081c114c67d80e52726ad827a180ba0
|
[
"MIT"
] | null | null | null |
src/webparse.py
|
neilchristanto/ValDashboard
|
d62d04020081c114c67d80e52726ad827a180ba0
|
[
"MIT"
] | null | null | null |
import re
import pandas as pd
import requests
from lxml import html as lhtml
from fake_useragent import UserAgent
import logging
WS_TO_STR = 0
WS_SRC = 1
WS_PATH = 2
WS_CACHE = 3
class WebParse:
websource = {
# Readable Source unique path caching
"mkt_cap" : ['Mkt Cap' , "ycharts" , "market_cap", 0],
"inc_qtr" : ['Inc Qtr' , "ycharts" , "net_income", 1],
"inc_ttm" : ['Inc TTM' , "ycharts" , "net_income_ttm", 1],
"rev_qtr" : ['Rev Qtr' , "ycharts" , "revenues", 1],
"rev_ttm" : ['Rev TTM' , "ycharts" , "revenues_ttm", 1],
"p_rev_ttm" : ['Prv Rev TTM', "ycharts" , "revenues_ttm", 1],
"rev_fy" : ['Rev FY' , "cml" , "analysts", 1],
"ref_1fy" : ['Rev 1FY' , "cml" , "analysts", 1],
"ref_2fy" : ['Rev 2FY' , "cml" , "analysts", 1],
# All PS depends on MktCap and Rev
"ps_fy" : ['PS FY' , "NA"],
"ps_1fy" : ['PS 1FY' , "NA"],
"ps_2fy" : ['PS 2FY' , "NA"],
"ps_ttm" : ['PS TTM' , "NA"],
"ps_nxt" : ['PS Nxt' , "NA"],
# upside and growth are just ratios between 2 numbers in different times
"upside" : ['Upside' , "NA"],
"rev_grow" : ['Rev Grow' , "NA"],
"inc_grow" : ['Inc Grow' , "NA"],
'revgw_fy' : ['RevGw FY' , 'NA'],
'revgw_1fy' : ['RevGw 1FY' , 'NA'],
'revgw_2fy' : ['RevGw_2FY' , 'NA'],
}
# cache the entire http response
cached_web = {}
# handle to portfolio extracted data
pdata = {}
# state to specify whether the latest date is the same
# if so, skip the parses
skip_metric_parse = 0
# fy_idx is for indexing the fiscal year calculation for revenue
fy_idx = 0
# logger
def __init__(self):
self.logger = logging.getLogger('root.' + __name__)
def clear_webcache(self):
self.cached_web = {}
def val_toB(self, istr):
# return value in billion
if istr == 'NA':
val = -1
elif istr[-1] == 'B':
val = float(istr[0:-1].replace(',', ''))
elif istr[-1] == 'T':
val = float(istr[0:-1].replace(',', ''))*1000.0
else: # observed value is in Mill
val = float(istr[0:-1].replace(',', ''))/1000.0
return val
def val_toM(self, istr):
if istr == 'NA':
val = -1
elif istr[-1] == 'B':
val = float(istr[0:-1].replace(',', ''))*1000.0
else:
val = float(istr[0:-1].replace(',', ''))
return val
# Return the full xml, considering caching enabled or not
# if caching is enabled and is present, no need to query the website again
def get_xml(self, **kwargs):
s = kwargs['stock']
m = kwargs['metric']
u = kwargs['url']
key = (s,self.websource[m][WS_PATH])
# check for caching enable
if self.websource[m][WS_CACHE]:
if key in self.cached_web.keys():
self.logger.debug('get cached url = %s' % u)
return self.cached_web[key]
# here, either caching is not enabled, or cache entry is not present
self.logger.debug('get url = %s' % u)
ua = UserAgent()
hdr = {"User-Agent": ua.random}
req = requests.get(u, headers=hdr)
root = lhtml.fromstring(req.content)
# cache if enabled
if self.websource[m][WS_CACHE]:
self.cached_web[key] = root
return root
def check_skip_metric(self, **kwargs):
s = kwargs['stock']
m = kwargs['metric']
if self.skip_metric_parse:
self.logger.debug('{0} - {1} - skipped'.format(s, m))
return 1, self.pdata[s][self.websource[m][WS_TO_STR]]
else:
return 0, 0
def check_gph_skip_metric(self, **kwargs):
s = kwargs['stock']
m = kwargs['metric']
if self.skip_metric_parse:
self.logger.debug('{0} - {1} - skipped'.format(s, m))
return 1, self.pdata[s][self.websource[m][WS_TO_STR] + ' date'], \
self.pdata[s][self.websource[m][WS_TO_STR]]
else:
return 0, 0, 0
def parse_ycharts_pgNameVal(self, **kwargs):
root = self.get_xml(**kwargs)
res = root.xpath("//span[@class='page-name-date']")
stk = kwargs['stock']
metric = kwargs['metric']
if len(res) != 1:
self.logger.error("ERROR: stock %s, %s list not unique, or not available" %
(kwargs['stock'], kwargs['metric']))
return -1
res = res[0].text
[val, date] = res.split(" for ")
val = self.val_toB(val)
try:
if date == self.pdata[stk]['latest']:
self.skip_metric_parse = 1
self.logger.debug('%s latest data matches (%s).. skipping ycharts metric parse' % (stk, date))
# if date is not the same and this is not market cap, that means this is new data..
# empty out the stocks data
elif metric != 'mkt_cap':
self.pdata[stk] = {'Mkt Cap' : self.pdata[stk]['Mkt Cap'], 'latest' : ''}
except KeyError:
pass
return val
def parse_mkt_cap(self, **kwargs):
self.skip_metric_parse = 0
self.fy_idx = 0
retval = self.parse_ycharts_pgNameVal(**kwargs)
return float("{0:.3f}".format(retval))
def parse_rev_ttm(self, **kwargs):
skip, retval = self.check_skip_metric(**kwargs)
if skip:
return retval
retval = self.parse_ycharts_pgNameVal(**kwargs)
return float("{0:.3f}".format(retval))
'''
def parse_inc_qtr(self, **kwargs):
if self.skip_metric_parse:
return self.pdata[kwargs['stock']][kwargs['metric']]
retval = self.parse_ycharts_pgNameVal(**kwargs)
return float("{0:.3f}".format(retval))
def parse_inc_ttm(self, **kwargs):
if self.skip_metric_parse:
return self.pdata[kwargs['stock']][kwargs['metric']]
retval = self.parse_ycharts_pgNameVal(**kwargs)
return float("{0:.3f}".format(retval))
'''
def parse_p_rev_ttm(self, **kwargs):
root = self.get_xml(**kwargs)
td = root.xpath("//td")
# prev ttm is located at TD[8] and TD[9]
# [0][1] is for current quarter
# [2][3] is for prev quarter
# [8][9] is for a year ago
try:
retval = td[9].text.strip()
# return value in billion
retval = self.val_toB(retval)
except IndexError:
retval = -1
return float("{0:.4f}".format(retval))
def parse_rev_nxt_zacks(self, root):
tb = root.xpath("//section[@id='detailed_earnings_estimates']")[0]
hdr = [th.text_content().split('(')[0].strip() for th in tb.xpath('.//th')]
row = [[td.text_content() for td in tr.xpath('.//td')] for tr in tb.xpath('.//tbody/tr')]
# create indexes and proper row
hdr = hdr[1:]
idx = [r[0] for r in row]
row = [r[1:] for r in row]
df = pd.DataFrame(data = row, columns = hdr, index = idx)
val = df['Next Year']['Zacks Consensus Estimate']
retval = self.val_toB(val)
return float("{0:.3f}".format(retval))
def parse_rev_nxt(self, **kwargs):
skip, retval = self.check_skip_metric(**kwargs)
if skip:
return retval
root = self.get_xml(**kwargs)
if self.websource[kwargs['metric']][WS_SRC] == 'yahoo':
retval = self.parse_rev_nxt_yahoo(root)
elif self.websource[kwargs['metric']][WS_SRC] == 'zacks':
retval =self.parse_rev_nxt_zacks(root)
return float("{0:.3f}".format(retval))
'''
parsing from CML
'''
def parse_rev_fy(self, **kwargs):
root = self.get_xml(**kwargs)
# current FY = 7, next = 8, onward
xpath = "//table[@class='responsive']/tbody/tr[{}]/td[@class='mean']".format(self.fy_idx + 7)
res = root.xpath(xpath)[0].text
# returned value is in millions
return self.val_toB(res)
'''
# parsing that requires ratio
# ps = market_cap / rev_ttm
# ps_nxt = market_cap / rev_nxt
# rev_growth = rev_ttm / p_rev_ttm
# upside = rev_nxt / rev_ttm
'''
# helper function to get ratio
def get_two_metrics(self, stk, a, b):
if stk not in self.pdata.keys():
aval = self.parse(stk, a)
bval = self.parse(stk, b)
else:
try:
aval = self.pdata[stk][self.websource[a][WS_TO_STR]]
except KeyError:
aval = self.parse(stk, a)
try:
bval = self.pdata[stk][self.websource[b][WS_TO_STR]]
except KeyError:
bval = self.parse(stk, b)
return aval, bval
# PS TTM is basically mkt_cap/rev_ttm
# if the required data is not present, parse them first
def parse_ps_ttm(self, **kwargs):
skip, retval = self.check_skip_metric(**kwargs)
if skip:
return retval
mkt_cap, rev_ttm = self.get_two_metrics(kwargs['stock'], 'mkt_cap', 'rev_ttm')
retval = mkt_cap / rev_ttm
return float("{0:.3f}".format(retval))
# this is basically market_cap/rev_nxt
def parse_ps_nxt(self, **kwargs):
skip, retval = self.check_skip_metric(**kwargs)
if skip:
return retval
mkt_cap, rev_nxt = self.get_two_metrics(kwargs['stock'], 'mkt_cap', 'rev_nxt')
retval = mkt_cap / rev_nxt
return float("{0:.3f}".format(retval))
# rev growth need the rev_ttm and prev year's rev_ttm
def parse_rev_grow(self, **kwargs):
skip, retval = self.check_skip_metric(**kwargs)
if skip:
return retval
crev_ttm, prev_ttm = self.get_two_metrics(kwargs['stock'], 'rev_ttm', 'p_rev_ttm')
retval = crev_ttm * 100.0 / prev_ttm - 100
return "{0:.0f}%".format(retval)
# upside = rev_nxt / rev_ttm
def parse_upside(self, **kwargs):
skip, retval = self.check_skip_metric(**kwargs)
if skip:
return retval
rev_nxt, rev_ttm = self.get_two_metrics(kwargs['stock'], 'rev_nxt', 'rev_ttm')
retval = rev_nxt * 100.0 / rev_ttm - 100
return "{0:.0f}%".format(retval)
'''
Parse PS that depends on CML website
'''
# ps_fy = market_cap / rev_fy
# rev_fy is not part of the JSON valuation, so we'll always parse it again (from cached web)
def parse_ps_fy(self, **kwargs):
mkt_cap, rev_fy = self.get_two_metrics(kwargs['stock'], 'mkt_cap', 'rev_fy')
retval = mkt_cap / rev_fy
return float("{0:.2f}".format(retval))
def parse_ps_1fy(self, **kwargs):
self.fy_idx = 1
return self.parse_ps_fy(**kwargs)
def parse_ps_2fy(self, **kwargs):
self.fy_idx = 2
return self.parse_ps_fy(**kwargs)
def parse_revgw_fy(self, **kwargs):
curr, nxt = self.get_two_metrics(kwargs['stock'], 'ps_ttm', 'ps_fy')
return '{0:.0f}%'.format((curr-nxt)*100.0 / nxt)
def parse_revgw_1fy(self, **kwargs):
curr, nxt = self.get_two_metrics(kwargs['stock'], 'ps_fy', 'ps_1fy')
return '{0:.0f}%'.format((curr-nxt)*100.0 / nxt)
def parse_revgw_2fy(self, **kwargs):
curr, nxt = self.get_two_metrics(kwargs['stock'], 'ps_1fy', 'ps_2fy')
return '{0:.0f}%'.format((curr-nxt)*100.0 / nxt)
def parse_ycharts_td(self, **kwargs):
"""
Parse ycharts.com, indexing into the 'dataTableBox' id.
Each <tr> will have a pair of <td>: date and value.
Data from ycharts.com is most recent first, so new entry is prepended to the list
to create chronological order.
list[0] = oldest data
list[-1] = newest data
:param kwargs: Passed on to get_xml (contains stock, metric, url)
:return: date: list of dates (string)
:return: val: list of values converted to million
"""
root = self.get_xml(**kwargs)
td = root.xpath("//table[@class='table']")[0].xpath('.//td')
tdlen = len(td)
date, val = [], []
for i in range(0, tdlen, 2):
# if content is 0, skip
if td[i].text_content() == '': continue
if td[i+1].text_content().strip() == '': continue
date = [td[i].text_content()] + date
val = [self.val_toM(td[i+1].text_content().strip())] + val
return date, val
def parse_gph_inc_qtr(self, **kwargs):
skip, date_ls, val_ls = self.check_gph_skip_metric(**kwargs)
if skip:
return date_ls, val_ls
date, val = self.parse_ycharts_td(**kwargs)
return date, val
def parse_gph_inc_ttm(self, **kwargs):
skip, date_ls, val_ls = self.check_gph_skip_metric(**kwargs)
if skip:
return date_ls, val_ls
date, val = self.parse_ycharts_td(**kwargs)
return date, val
def parse_gph_rev_qtr(self, **kwargs):
skip, date_ls, val_ls = self.check_gph_skip_metric(**kwargs)
if skip:
return date_ls, val_ls
date, val = self.parse_ycharts_td(**kwargs)
return date, val
def parse_gph_rev_ttm(self, **kwargs):
skip, date_ls, val_ls = self.check_gph_skip_metric(**kwargs)
if skip:
return date_ls, val_ls
date, val = self.parse_ycharts_td(**kwargs)
return date, val
def parse_gph_metric(self, stk, m):
"""
Parse graph metric
:param stk:
:param m:
:return:
"""
if stk not in self.pdata.keys():
date, val = self.parse(stk, m, fn_type="graph")
else:
try:
date = self.pdata[stk][self.websource[m][WS_TO_STR] + ' date']
val = self.pdata[stk][self.websource[m][WS_TO_STR]]
except KeyError:
date, val = self.parse(stk, m, fn_type='graph')
return date, val
def parse_gph_grow(self, **kwargs):
metric = re.sub("grow", "ttm", kwargs['metric']).lower()
date, val = self.parse_gph_metric(kwargs['stock'], metric)
# can't compute YoY growth if only 4 quarters or less
if len(val) <= 4:
return [], []
retval = [float("{0:.2f}".format(val[i] * 100.0 / val[i-4] - 100)) for i in range(4, len(val))]
retdate = date[4:]
return retdate, retval
def parse_gph_inc_grow(self, **kwargs):
return [], []
def parse_gph_rev_grow(self, **kwargs):
return self.parse_gph_grow(**kwargs)
'''
parser main entry point and helper functions
'''
# pre_parse takes in the metric and give the correct URL to go to
# input : stock, metric
# output : stock, modified metric, proper URL
def pre_parse(self, stock, metric):
wp_metric = re.sub(" ", "_", metric).lower()
try:
mainurl = self.websource[wp_metric][WS_SRC]
if mainurl == 'ycharts':
url = "https://ycharts.com/companies/{}/{}".format(
stock, self.websource[wp_metric][WS_PATH])
elif mainurl == "yahoo":
url = "https://www.finance.yahoo.com/quote/{}/{}".format(
stock, self.websource[wp_metric][WS_PATH])
elif mainurl == "zacks":
url = "https://zacks.com/stock/quote/{}/{}".format(
stock, self.websource[wp_metric][WS_PATH])
elif mainurl == 'cml':
url = 'https://www.cmlviz.com/inc/{1}.php?ticker={0}'.format(
stock, self.websource[wp_metric][WS_PATH])
elif mainurl == 'NA':
url = "NA"
else:
url = None
except KeyError:
url = None
return stock, wp_metric, url
def parse(self, stock, metric, **kwargs):
stock, metric, url = self.pre_parse(stock, metric)
if url == None:
msg = """
ERROR: url returned None from pre_parse
stock: %s; metric: %s
""" % (stock, metric)
print(msg)
return -1
try:
if kwargs['fn_type'] == 'graph':
fn_prefix = "parse_gph_"
else:
raise KeyError
except KeyError:
fn_prefix = "parse_"
try:
func = getattr(self, fn_prefix + metric)
except AttributeError:
print("ERROR: no function: %s" % (fn_prefix + metric))
return -1
return func(stock=stock, metric=metric, url=url)
| 34.945525
| 111
| 0.512137
|
import re
import pandas as pd
import requests
from lxml import html as lhtml
from fake_useragent import UserAgent
import logging
WS_TO_STR = 0
WS_SRC = 1
WS_PATH = 2
WS_CACHE = 3
class WebParse:
websource = {
"mkt_cap" : ['Mkt Cap' , "ycharts" , "market_cap", 0],
"inc_qtr" : ['Inc Qtr' , "ycharts" , "net_income", 1],
"inc_ttm" : ['Inc TTM' , "ycharts" , "net_income_ttm", 1],
"rev_qtr" : ['Rev Qtr' , "ycharts" , "revenues", 1],
"rev_ttm" : ['Rev TTM' , "ycharts" , "revenues_ttm", 1],
"p_rev_ttm" : ['Prv Rev TTM', "ycharts" , "revenues_ttm", 1],
"rev_fy" : ['Rev FY' , "cml" , "analysts", 1],
"ref_1fy" : ['Rev 1FY' , "cml" , "analysts", 1],
"ref_2fy" : ['Rev 2FY' , "cml" , "analysts", 1],
"ps_fy" : ['PS FY' , "NA"],
"ps_1fy" : ['PS 1FY' , "NA"],
"ps_2fy" : ['PS 2FY' , "NA"],
"ps_ttm" : ['PS TTM' , "NA"],
"ps_nxt" : ['PS Nxt' , "NA"],
"upside" : ['Upside' , "NA"],
"rev_grow" : ['Rev Grow' , "NA"],
"inc_grow" : ['Inc Grow' , "NA"],
'revgw_fy' : ['RevGw FY' , 'NA'],
'revgw_1fy' : ['RevGw 1FY' , 'NA'],
'revgw_2fy' : ['RevGw_2FY' , 'NA'],
}
cached_web = {}
pdata = {}
skip_metric_parse = 0
fy_idx = 0
def __init__(self):
self.logger = logging.getLogger('root.' + __name__)
def clear_webcache(self):
self.cached_web = {}
def val_toB(self, istr):
if istr == 'NA':
val = -1
elif istr[-1] == 'B':
val = float(istr[0:-1].replace(',', ''))
elif istr[-1] == 'T':
val = float(istr[0:-1].replace(',', ''))*1000.0
else:
val = float(istr[0:-1].replace(',', ''))/1000.0
return val
def val_toM(self, istr):
if istr == 'NA':
val = -1
elif istr[-1] == 'B':
val = float(istr[0:-1].replace(',', ''))*1000.0
else:
val = float(istr[0:-1].replace(',', ''))
return val
def get_xml(self, **kwargs):
s = kwargs['stock']
m = kwargs['metric']
u = kwargs['url']
key = (s,self.websource[m][WS_PATH])
if self.websource[m][WS_CACHE]:
if key in self.cached_web.keys():
self.logger.debug('get cached url = %s' % u)
return self.cached_web[key]
self.logger.debug('get url = %s' % u)
ua = UserAgent()
hdr = {"User-Agent": ua.random}
req = requests.get(u, headers=hdr)
root = lhtml.fromstring(req.content)
if self.websource[m][WS_CACHE]:
self.cached_web[key] = root
return root
def check_skip_metric(self, **kwargs):
s = kwargs['stock']
m = kwargs['metric']
if self.skip_metric_parse:
self.logger.debug('{0} - {1} - skipped'.format(s, m))
return 1, self.pdata[s][self.websource[m][WS_TO_STR]]
else:
return 0, 0
def check_gph_skip_metric(self, **kwargs):
s = kwargs['stock']
m = kwargs['metric']
if self.skip_metric_parse:
self.logger.debug('{0} - {1} - skipped'.format(s, m))
return 1, self.pdata[s][self.websource[m][WS_TO_STR] + ' date'], \
self.pdata[s][self.websource[m][WS_TO_STR]]
else:
return 0, 0, 0
def parse_ycharts_pgNameVal(self, **kwargs):
root = self.get_xml(**kwargs)
res = root.xpath("//span[@class='page-name-date']")
stk = kwargs['stock']
metric = kwargs['metric']
if len(res) != 1:
self.logger.error("ERROR: stock %s, %s list not unique, or not available" %
(kwargs['stock'], kwargs['metric']))
return -1
res = res[0].text
[val, date] = res.split(" for ")
val = self.val_toB(val)
try:
if date == self.pdata[stk]['latest']:
self.skip_metric_parse = 1
self.logger.debug('%s latest data matches (%s).. skipping ycharts metric parse' % (stk, date))
elif metric != 'mkt_cap':
self.pdata[stk] = {'Mkt Cap' : self.pdata[stk]['Mkt Cap'], 'latest' : ''}
except KeyError:
pass
return val
def parse_mkt_cap(self, **kwargs):
self.skip_metric_parse = 0
self.fy_idx = 0
retval = self.parse_ycharts_pgNameVal(**kwargs)
return float("{0:.3f}".format(retval))
def parse_rev_ttm(self, **kwargs):
skip, retval = self.check_skip_metric(**kwargs)
if skip:
return retval
retval = self.parse_ycharts_pgNameVal(**kwargs)
return float("{0:.3f}".format(retval))
def parse_p_rev_ttm(self, **kwargs):
root = self.get_xml(**kwargs)
td = root.xpath("//td")
try:
retval = td[9].text.strip()
retval = self.val_toB(retval)
except IndexError:
retval = -1
return float("{0:.4f}".format(retval))
def parse_rev_nxt_zacks(self, root):
tb = root.xpath("//section[@id='detailed_earnings_estimates']")[0]
hdr = [th.text_content().split('(')[0].strip() for th in tb.xpath('.//th')]
row = [[td.text_content() for td in tr.xpath('.//td')] for tr in tb.xpath('.//tbody/tr')]
hdr = hdr[1:]
idx = [r[0] for r in row]
row = [r[1:] for r in row]
df = pd.DataFrame(data = row, columns = hdr, index = idx)
val = df['Next Year']['Zacks Consensus Estimate']
retval = self.val_toB(val)
return float("{0:.3f}".format(retval))
def parse_rev_nxt(self, **kwargs):
skip, retval = self.check_skip_metric(**kwargs)
if skip:
return retval
root = self.get_xml(**kwargs)
if self.websource[kwargs['metric']][WS_SRC] == 'yahoo':
retval = self.parse_rev_nxt_yahoo(root)
elif self.websource[kwargs['metric']][WS_SRC] == 'zacks':
retval =self.parse_rev_nxt_zacks(root)
return float("{0:.3f}".format(retval))
def parse_rev_fy(self, **kwargs):
root = self.get_xml(**kwargs)
xpath = "//table[@class='responsive']/tbody/tr[{}]/td[@class='mean']".format(self.fy_idx + 7)
res = root.xpath(xpath)[0].text
return self.val_toB(res)
def get_two_metrics(self, stk, a, b):
if stk not in self.pdata.keys():
aval = self.parse(stk, a)
bval = self.parse(stk, b)
else:
try:
aval = self.pdata[stk][self.websource[a][WS_TO_STR]]
except KeyError:
aval = self.parse(stk, a)
try:
bval = self.pdata[stk][self.websource[b][WS_TO_STR]]
except KeyError:
bval = self.parse(stk, b)
return aval, bval
def parse_ps_ttm(self, **kwargs):
skip, retval = self.check_skip_metric(**kwargs)
if skip:
return retval
mkt_cap, rev_ttm = self.get_two_metrics(kwargs['stock'], 'mkt_cap', 'rev_ttm')
retval = mkt_cap / rev_ttm
return float("{0:.3f}".format(retval))
def parse_ps_nxt(self, **kwargs):
skip, retval = self.check_skip_metric(**kwargs)
if skip:
return retval
mkt_cap, rev_nxt = self.get_two_metrics(kwargs['stock'], 'mkt_cap', 'rev_nxt')
retval = mkt_cap / rev_nxt
return float("{0:.3f}".format(retval))
def parse_rev_grow(self, **kwargs):
skip, retval = self.check_skip_metric(**kwargs)
if skip:
return retval
crev_ttm, prev_ttm = self.get_two_metrics(kwargs['stock'], 'rev_ttm', 'p_rev_ttm')
retval = crev_ttm * 100.0 / prev_ttm - 100
return "{0:.0f}%".format(retval)
# upside = rev_nxt / rev_ttm
def parse_upside(self, **kwargs):
skip, retval = self.check_skip_metric(**kwargs)
if skip:
return retval
rev_nxt, rev_ttm = self.get_two_metrics(kwargs['stock'], 'rev_nxt', 'rev_ttm')
retval = rev_nxt * 100.0 / rev_ttm - 100
return "{0:.0f}%".format(retval)
# ps_fy = market_cap / rev_fy
# rev_fy is not part of the JSON valuation, so we'll always parse it again (from cached web)
def parse_ps_fy(self, **kwargs):
mkt_cap, rev_fy = self.get_two_metrics(kwargs['stock'], 'mkt_cap', 'rev_fy')
retval = mkt_cap / rev_fy
return float("{0:.2f}".format(retval))
def parse_ps_1fy(self, **kwargs):
self.fy_idx = 1
return self.parse_ps_fy(**kwargs)
def parse_ps_2fy(self, **kwargs):
self.fy_idx = 2
return self.parse_ps_fy(**kwargs)
def parse_revgw_fy(self, **kwargs):
curr, nxt = self.get_two_metrics(kwargs['stock'], 'ps_ttm', 'ps_fy')
return '{0:.0f}%'.format((curr-nxt)*100.0 / nxt)
def parse_revgw_1fy(self, **kwargs):
curr, nxt = self.get_two_metrics(kwargs['stock'], 'ps_fy', 'ps_1fy')
return '{0:.0f}%'.format((curr-nxt)*100.0 / nxt)
def parse_revgw_2fy(self, **kwargs):
curr, nxt = self.get_two_metrics(kwargs['stock'], 'ps_1fy', 'ps_2fy')
return '{0:.0f}%'.format((curr-nxt)*100.0 / nxt)
def parse_ycharts_td(self, **kwargs):
root = self.get_xml(**kwargs)
td = root.xpath("//table[@class='table']")[0].xpath('.//td')
tdlen = len(td)
date, val = [], []
for i in range(0, tdlen, 2):
if td[i].text_content() == '': continue
if td[i+1].text_content().strip() == '': continue
date = [td[i].text_content()] + date
val = [self.val_toM(td[i+1].text_content().strip())] + val
return date, val
def parse_gph_inc_qtr(self, **kwargs):
skip, date_ls, val_ls = self.check_gph_skip_metric(**kwargs)
if skip:
return date_ls, val_ls
date, val = self.parse_ycharts_td(**kwargs)
return date, val
def parse_gph_inc_ttm(self, **kwargs):
skip, date_ls, val_ls = self.check_gph_skip_metric(**kwargs)
if skip:
return date_ls, val_ls
date, val = self.parse_ycharts_td(**kwargs)
return date, val
def parse_gph_rev_qtr(self, **kwargs):
skip, date_ls, val_ls = self.check_gph_skip_metric(**kwargs)
if skip:
return date_ls, val_ls
date, val = self.parse_ycharts_td(**kwargs)
return date, val
def parse_gph_rev_ttm(self, **kwargs):
skip, date_ls, val_ls = self.check_gph_skip_metric(**kwargs)
if skip:
return date_ls, val_ls
date, val = self.parse_ycharts_td(**kwargs)
return date, val
def parse_gph_metric(self, stk, m):
if stk not in self.pdata.keys():
date, val = self.parse(stk, m, fn_type="graph")
else:
try:
date = self.pdata[stk][self.websource[m][WS_TO_STR] + ' date']
val = self.pdata[stk][self.websource[m][WS_TO_STR]]
except KeyError:
date, val = self.parse(stk, m, fn_type='graph')
return date, val
def parse_gph_grow(self, **kwargs):
metric = re.sub("grow", "ttm", kwargs['metric']).lower()
date, val = self.parse_gph_metric(kwargs['stock'], metric)
if len(val) <= 4:
return [], []
retval = [float("{0:.2f}".format(val[i] * 100.0 / val[i-4] - 100)) for i in range(4, len(val))]
retdate = date[4:]
return retdate, retval
def parse_gph_inc_grow(self, **kwargs):
return [], []
def parse_gph_rev_grow(self, **kwargs):
return self.parse_gph_grow(**kwargs)
# pre_parse takes in the metric and give the correct URL to go to
# input : stock, metric
# output : stock, modified metric, proper URL
def pre_parse(self, stock, metric):
wp_metric = re.sub(" ", "_", metric).lower()
try:
mainurl = self.websource[wp_metric][WS_SRC]
if mainurl == 'ycharts':
url = "https://ycharts.com/companies/{}/{}".format(
stock, self.websource[wp_metric][WS_PATH])
elif mainurl == "yahoo":
url = "https://www.finance.yahoo.com/quote/{}/{}".format(
stock, self.websource[wp_metric][WS_PATH])
elif mainurl == "zacks":
url = "https://zacks.com/stock/quote/{}/{}".format(
stock, self.websource[wp_metric][WS_PATH])
elif mainurl == 'cml':
url = 'https://www.cmlviz.com/inc/{1}.php?ticker={0}'.format(
stock, self.websource[wp_metric][WS_PATH])
elif mainurl == 'NA':
url = "NA"
else:
url = None
except KeyError:
url = None
return stock, wp_metric, url
def parse(self, stock, metric, **kwargs):
stock, metric, url = self.pre_parse(stock, metric)
if url == None:
msg = """
ERROR: url returned None from pre_parse
stock: %s; metric: %s
""" % (stock, metric)
print(msg)
return -1
try:
if kwargs['fn_type'] == 'graph':
fn_prefix = "parse_gph_"
else:
raise KeyError
except KeyError:
fn_prefix = "parse_"
try:
func = getattr(self, fn_prefix + metric)
except AttributeError:
print("ERROR: no function: %s" % (fn_prefix + metric))
return -1
return func(stock=stock, metric=metric, url=url)
| true
| true
|
790d4ede53c7228132c3f5ffea1e40feaf48a6ff
| 854
|
py
|
Python
|
setup.py
|
dilawar/tinypandas
|
439a1994b6167628ecbddb37369bffd20813c24c
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
dilawar/tinypandas
|
439a1994b6167628ecbddb37369bffd20813c24c
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
dilawar/tinypandas
|
439a1994b6167628ecbddb37369bffd20813c24c
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open("README.md") as f:
readme = f.read()
classifiers = [
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
]
setup(
name = "tinypandas",
version = "0.0.1",
description = "A small pure python library with Pandas like API",
long_description = readme,
packages = ['tinypandas', 'tinypandas.tests'],
package_dir = { 'tinypandas' : 'src', 'tinypandas.tests' : 'tests' },
install_requires = [ ],
author = "@lexual, Dilawar Singh <dilawars@ncbs.res.in>",
maintainer = "Dilawar Singh",
maintainer_email = "dilawars@ncbs.res.in",
url = "http://github.com/dilawar/",
license='GPL?',
classifiers=classifiers,
)
| 26.6875
| 73
| 0.651054
|
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open("README.md") as f:
readme = f.read()
classifiers = [
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
]
setup(
name = "tinypandas",
version = "0.0.1",
description = "A small pure python library with Pandas like API",
long_description = readme,
packages = ['tinypandas', 'tinypandas.tests'],
package_dir = { 'tinypandas' : 'src', 'tinypandas.tests' : 'tests' },
install_requires = [ ],
author = "@lexual, Dilawar Singh <dilawars@ncbs.res.in>",
maintainer = "Dilawar Singh",
maintainer_email = "dilawars@ncbs.res.in",
url = "http://github.com/dilawar/",
license='GPL?',
classifiers=classifiers,
)
| true
| true
|
790d4f0d7fb7dc0745fb4aa116d107f1b4abf0fd
| 2,940
|
py
|
Python
|
tests/support/copyartifacts.py
|
markgras/salt
|
d66cd3c935533c63870b83228b978ce43e0ef70d
|
[
"Apache-2.0"
] | 9,425
|
2015-01-01T05:59:24.000Z
|
2022-03-31T20:44:05.000Z
|
tests/support/copyartifacts.py
|
markgras/salt
|
d66cd3c935533c63870b83228b978ce43e0ef70d
|
[
"Apache-2.0"
] | 33,507
|
2015-01-01T00:19:56.000Z
|
2022-03-31T23:48:20.000Z
|
tests/support/copyartifacts.py
|
markgras/salt
|
d66cd3c935533c63870b83228b978ce43e0ef70d
|
[
"Apache-2.0"
] | 5,810
|
2015-01-01T19:11:45.000Z
|
2022-03-31T02:37:20.000Z
|
"""
Script for copying back xml junit files from tests
"""
import argparse # pylint: disable=minimum-python-version
import os
import subprocess
import paramiko
import salt.utils.yaml
class DownloadArtifacts:
def __init__(self, instance, artifacts):
self.instance = instance
self.artifacts = artifacts
self.transport = self.setup_transport()
self.sftpclient = paramiko.SFTPClient.from_transport(self.transport)
def setup_transport(self):
# pylint: disable=minimum-python-version
config = salt.utils.yaml.safe_load(
subprocess.check_output(
["bundle", "exec", "kitchen", "diagnose", self.instance]
)
)
# pylint: enable=minimum-python-version
state = config["instances"][self.instance]["state_file"]
tport = config["instances"][self.instance]["transport"]
transport = paramiko.Transport(
(state["hostname"], state.get("port", tport.get("port", 22)))
)
pkey = paramiko.rsakey.RSAKey(
filename=state.get("ssh_key", tport.get("ssh_key", "~/.ssh/id_rsa"))
)
transport.connect(
username=state.get("username", tport.get("username", "root")), pkey=pkey
)
return transport
def _set_permissions(self):
"""
Make sure all xml files are readable by the world so that anyone can grab them
"""
for remote, _ in self.artifacts:
self.transport.open_session().exec_command(
"sudo chmod -R +r {}".format(remote)
)
def download(self):
self._set_permissions()
for remote, local in self.artifacts:
if remote.endswith("/"):
for fxml in self.sftpclient.listdir(remote):
self._do_download(
os.path.join(remote, fxml),
os.path.join(local, os.path.basename(fxml)),
)
else:
self._do_download(remote, os.path.join(local, os.path.basename(remote)))
def _do_download(self, remote, local):
print("Copying from {} to {}".format(remote, local))
try:
self.sftpclient.get(remote, local)
except OSError:
print("Failed to copy: {}".format(remote))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Jenkins Artifact Download Helper")
parser.add_argument(
"--instance",
required=True,
action="store",
help="Instance on Test Kitchen to pull from",
)
parser.add_argument(
"--download-artifacts",
dest="artifacts",
nargs=2,
action="append",
metavar=("REMOTE_PATH", "LOCAL_PATH"),
help="Download remote artifacts",
)
args = parser.parse_args()
downloader = DownloadArtifacts(args.instance, args.artifacts)
downloader.download()
| 33.033708
| 88
| 0.593537
|
import argparse
import os
import subprocess
import paramiko
import salt.utils.yaml
class DownloadArtifacts:
def __init__(self, instance, artifacts):
self.instance = instance
self.artifacts = artifacts
self.transport = self.setup_transport()
self.sftpclient = paramiko.SFTPClient.from_transport(self.transport)
def setup_transport(self):
config = salt.utils.yaml.safe_load(
subprocess.check_output(
["bundle", "exec", "kitchen", "diagnose", self.instance]
)
)
state = config["instances"][self.instance]["state_file"]
tport = config["instances"][self.instance]["transport"]
transport = paramiko.Transport(
(state["hostname"], state.get("port", tport.get("port", 22)))
)
pkey = paramiko.rsakey.RSAKey(
filename=state.get("ssh_key", tport.get("ssh_key", "~/.ssh/id_rsa"))
)
transport.connect(
username=state.get("username", tport.get("username", "root")), pkey=pkey
)
return transport
def _set_permissions(self):
for remote, _ in self.artifacts:
self.transport.open_session().exec_command(
"sudo chmod -R +r {}".format(remote)
)
def download(self):
self._set_permissions()
for remote, local in self.artifacts:
if remote.endswith("/"):
for fxml in self.sftpclient.listdir(remote):
self._do_download(
os.path.join(remote, fxml),
os.path.join(local, os.path.basename(fxml)),
)
else:
self._do_download(remote, os.path.join(local, os.path.basename(remote)))
def _do_download(self, remote, local):
print("Copying from {} to {}".format(remote, local))
try:
self.sftpclient.get(remote, local)
except OSError:
print("Failed to copy: {}".format(remote))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Jenkins Artifact Download Helper")
parser.add_argument(
"--instance",
required=True,
action="store",
help="Instance on Test Kitchen to pull from",
)
parser.add_argument(
"--download-artifacts",
dest="artifacts",
nargs=2,
action="append",
metavar=("REMOTE_PATH", "LOCAL_PATH"),
help="Download remote artifacts",
)
args = parser.parse_args()
downloader = DownloadArtifacts(args.instance, args.artifacts)
downloader.download()
| true
| true
|
790d50f20b8da66c44adfcff9c0fea07ddb6a0ce
| 53,839
|
py
|
Python
|
mindaffectBCI/decoder/UtopiaDataInterface.py
|
CkiChen/pymindaffectBCI
|
0119145a8b280c776f4c4e6cd776fed0f0156404
|
[
"MIT"
] | 1
|
2021-04-25T02:07:13.000Z
|
2021-04-25T02:07:13.000Z
|
mindaffectBCI/decoder/UtopiaDataInterface.py
|
CkiChen/pymindaffectBCI
|
0119145a8b280c776f4c4e6cd776fed0f0156404
|
[
"MIT"
] | null | null | null |
mindaffectBCI/decoder/UtopiaDataInterface.py
|
CkiChen/pymindaffectBCI
|
0119145a8b280c776f4c4e6cd776fed0f0156404
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2019 MindAffect B.V.
# Author: Jason Farquhar <jason@mindaffect.nl>
# This file is part of pymindaffectBCI <https://github.com/mindaffect/pymindaffectBCI>.
#
# pymindaffectBCI is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pymindaffectBCI is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pymindaffectBCI. If not, see <http://www.gnu.org/licenses/>
from mindaffectBCI.utopiaclient import UtopiaClient, Subscribe, StimulusEvent, NewTarget, Selection, DataPacket, UtopiaMessage, SignalQuality
from collections import deque
from mindaffectBCI.decoder.utils import RingBuffer, extract_ringbuffer_segment
from mindaffectBCI.decoder.lower_bound_tracker import lower_bound_tracker
from mindaffectBCI.decoder.linear_trend_tracker import linear_trend_tracker
from time import sleep
import numpy as np
class UtopiaDataInterface:
"""Adaptor class for interfacing between the decoder logic and the data source
This class provides functionality to wrap a real time data and stimulus stream to make
it easier to implement standard machine learning pipelines. In particular it provides streamed
pre-processing for both EEG and stimulus streams, and ring-buffers for the same with time-stamp based indexing.
"""
# TODO [X] : infer valid data time-stamps
# TODO [X] : smooth and de-jitter the data time-stamps
# TODO [] : expose a (potentially blocking) message generator interface
# TODO [X] : ring-buffer for the stimulus-state also, so fast random access
# TODO [X] : rate limit waiting to reduce computational load
VERBOSITY = 1
def __init__(self, datawindow_ms=60000, msgwindow_ms=60000,
data_preprocessor=None, stimulus_preprocessor=None, send_signalquality=True,
timeout_ms=100, mintime_ms=50, fs=None, U=None, sample2timestamp='lower_bound_tracker',
clientid=None):
# rate control
self.timeout_ms = timeout_ms
self.mintime_ms = mintime_ms # minimum time to spend in update => max processing rate
# amout of data in the ring-buffer
self.datawindow_ms = datawindow_ms
self.msgwindow_ms = msgwindow_ms
# connect to the mindaffectDecoder
self.host = None
self.port = -1
self.U = UtopiaClient(clientid) if U is None else U
self.t0 = self.getTimeStamp()
# init the buffers
# Messages
self.msg_ringbuffer = deque()
self.msg_timestamp = None # ts of most recent processed message
# DataPackets
self.data_ringbuffer = None # init later...
self.data_timestamp = None # ts of last data packet seen
self.sample2timestamp = sample2timestamp # sample tracker to de-jitter time-stamp information
self.data_preprocessor = data_preprocessor # function to pre-process the incomming data
# StimulusEvents
self.stimulus_ringbuffer = None # init later...
self.stimulus_timestamp = None # ts of most recent processed data
self.stimulus_preprocessor = stimulus_preprocessor # function to pre-process the incomming data
# Info about the data sample rate -- estimated from packet rates..
self.raw_fs = fs
self.fs = None
self.newmsgs = [] # list new unprocssed messages since last update call
# BODGE: running statistics for sig2noise estimation
# TODO []: move into it's own Sig2Noise computation class
self.send_signalquality = send_signalquality
self.last_sigquality_ts = None
self.last_log_ts = None
self.send_sigquality_interval = 1000 # send signal qualities every 1000ms = 1Hz
# noise2sig estimate halflife_ms, running-offset, de-trended power
self.noise2sig_halflife_ms = (5000, 500) # 10s for offset, .5s for power
# TODO [x]: move into a exp-move-ave power est class
self.raw_power = None
self.preproc_power = None
def connect(self, host=None, port=-1, queryifhostnotfound=True):
"""[make a connection to the utopia host]
Args:
host ([type], optional): [description]. Defaults to None.
port (int, optional): [description]. Defaults to -1.
queryifhostnotfound (bool, optional): [description]. Defaults to True.
Returns:
[type]: [description]
"""
if host:
self.host = host
if port > 0:
self.port = port
self.U.autoconnect(self.host, self.port, timeout_ms=5000, queryifhostnotfound=queryifhostnotfound)
if self.U.isConnected:
# subscribe to messages: data, stim, mode, selection
self.U.sendMessage(Subscribe(None, "DEMSN"))
return self.U.isConnected
def isConnected(self):
"""[summary]
Returns:
[type]: [description]
"""
return self.U.isConnected if self.U is not None else False
def getTimeStamp(self):
"""[summary]
Returns:
[type]: [description]
"""
return self.U.getTimeStamp()
def sendMessage(self, msg: UtopiaMessage):
"""[send a UtopiaMessage to the utopia hub]
Args:
msg (UtopiaMessage): [description]
"""
self.U.sendMessage(msg)
def getNewMessages(self, timeout_ms=0):
"""[get new messages from the UtopiaHub]
Args:
timeout_ms (int, optional): [description]. Defaults to 0.
Returns:
[type]: [description]
"""
return self.U.getNewMessages(timeout_ms)
def initDataRingBuffer(self):
"""[initialize the data ring buffer, by getting some seed messages and datapackets to get the data sizes etc.]
Returns:
[type]: [description]
"""
print("geting some initial data to setup the ring buffer")
# get some initial data to get data shape and sample rate
databuf = []
nmsg = 0
iter = 0
data_start_ts = None
data_ts = 0
while data_start_ts is None or data_ts - data_start_ts < 3000:
msgs = self.getNewMessages(100)
for m in msgs:
m = self.preprocess_message(m)
if m.msgID == DataPacket.msgID: # data-packets are special
if len(m.samples) > 0:
databuf.append(m) # append raw data
if data_start_ts is None:
data_start_ts = m.timestamp
data_ts = m.timestamp
else:
print("Huh? got empty data packet: {}".format(m))
else:
self.msg_ringbuffer.append(m)
self.msg_timestamp = m.timestamp
nmsg = nmsg+1
nsamp = [len(m.samples) for m in databuf]
data_ts = [ m.timestamp for m in databuf]
if self.raw_fs is None:
self.raw_fs = np.median( np.array(nsamp[1:]) / np.diff(data_ts) * 1000.0)
print('Estimated sample rate {} samp in {} s ={}'.format(sum(nsamp),(data_ts[-1]-data_ts[0])/1000.0,self.raw_fs))
# init the pre-processor (if one)
if self.data_preprocessor:
self.data_preprocessor.fit(np.array(databuf[0].samples)[0:1,:], fs=self.raw_fs) # tell it the sample rate
# apply the data packet pre-processing -- to get the info
# on the data state after pre-processing
tmpdatabuf = [self.processDataPacket(m) for m in databuf]
# strip empty packets
tmpdatabuf = [d for d in tmpdatabuf if d.shape[0]>0]
# estimate the sample rate of the pre-processed data
pp_nsamp = [m.shape[0] for m in tmpdatabuf]
pp_ts = [ m[-1,-1] for m in tmpdatabuf]
self.fs = np.median( np.array(pp_nsamp[1:]) / np.diff(pp_ts) * 1000.0)# fs = nSamp/time
print('Estimated pre-processed sample rate={}'.format(self.fs))
# create the ring buffer, big enough to store the pre-processed data
if self.data_ringbuffer:
print("Warning: re-init data ring buffer")
# TODO []: why does the datatype of the ring buffer matter so much? Is it because of uss?
# Answer[]: it's the time-stamps, float32 rounds time-stamps to 24bits
self.data_ringbuffer = RingBuffer(maxsize=self.fs*self.datawindow_ms/1000, shape=tmpdatabuf[0].shape[1:], dtype=np.float32)
# insert the warmup data into the ring buffer
self.data_timestamp=None # reset last seen data
nsamp=0
# re-init the preprocessor for consistency with off-line
if self.data_preprocessor:
self.data_preprocessor.fit(np.array(databuf[0].samples)[0:1,:], fs=self.raw_fs)
# use linear trend tracker to de-jitter the sample timestamps
if self.sample2timestamp is None or isinstance(self.sample2timestamp,str):
self.sample2timestamp = timestamp_interpolation(fs=self.fs,
sample2timestamp=self.sample2timestamp)
for m in databuf:
# apply the pre-processing again (this time with fs estimated)
d = self.processDataPacket(m)
self.data_ringbuffer.extend(d)
nsamp = nsamp + d.shape[0]
return (nsamp, nmsg)
def initStimulusRingBuffer(self):
'''initialize the data ring buffer, by getting some seed messages and datapackets to get the data sizes etc.'''
# TODO []: more efficient memory use, with different dtype for 'real' data and the time-stamps?
self.stimulus_ringbuffer = RingBuffer(maxsize=self.fs*self.datawindow_ms/1000, shape=(257,), dtype=np.float32)
def preprocess_message(self, m:UtopiaMessage):
"""[apply pre-processing to topia message before any more work]
Args:
m (UtopiaMessage): [description]
Returns:
[type]: [description]
"""
# WARNING BODGE: fit time-stamp in 24bits for float32 ring buffer
# Note: this leads to wrap-arroung in (1<<24)/1000/3600 = 4.6 hours
# but that shouldn't matter.....
m.timestamp = m.timestamp % (1<<24)
return m
def processDataPacket(self, m: DataPacket):
"""[pre-process a datapacket message ready to be inserted into the ringbuffer]
Args:
m (DataPacket): [description]
Returns:
[type]: [description]
"""
#print("DP: {}".format(m))
# extract the raw data
d = np.array(m.samples, dtype=np.float32) # process as singles
# apply the pre-processor, if one was given
if self.data_preprocessor:
d_raw = d.copy()
# warning-- with agressive downsample this may not produce any data!
d = self.data_preprocessor.transform(d)
# BODGE: running estimate of the electrode-quality, ONLY after initialization!
if self.send_signalquality and self.data_ringbuffer is not None:
self.update_and_send_ElectrodeQualities(d_raw, d, m.timestamp)
#if self.VERBOSITY > 0 and self.data_ringbuffer is not None:
# self.plot_raw_preproc_data(d_raw,d,m.timestamp)
if d.size > 0 :
# If have data to add to the ring-buffer, guarding for time-stamp wrap-around
# TODO [ ]: de-jitter and better timestamp interpolation
# guard for wrap-around!
if self.data_timestamp is not None and m.timestamp < self.data_timestamp:
print("Warning: Time-stamp wrap-around detected!!")
d = self.add_sample_timestamps(d,m.timestamp,self.fs)
# update the last time-stamp tracking
self.data_timestamp= m.timestamp
return d
def add_sample_timestamps(self,d:np.ndarray,timestamp:float,fs:float):
"""add per-sample timestamp information to the data matrix
Args:
d (np.ndarray): (t,d) the data matrix to attach time stamps to
timestamp (float): the timestamp of the last sample of d
fs (float): the nomional sample rate of d
Returns:
np.ndarray: (t,d+1) data matrix with attached time-stamp channel
"""
if self.sample2timestamp is not None and not isinstance(self.sample2timestamp,str):
sample_ts = self.sample2timestamp.transform(timestamp, len(d))
else: # all the same ts
sample_ts = np.ones((len(d),),dtype=int)*timestamp
# combine data with timestamps, ensuring type is preserved
d = np.append(np.array(d), sample_ts[:, np.newaxis], -1).astype(d.dtype)
return d
def plot_raw_preproc_data(self, d_raw, d_preproc, ts):
"""[debugging function to check the diff between the raw and pre-processed data]
Args:
d_raw ([type]): [description]
d_preproc ([type]): [description]
ts ([type]): [description]
"""
if not hasattr(self,'rawringbuffer'):
self.preprocringbuffer=RingBuffer(maxsize=self.fs*3,shape=(d_preproc.shape[-1]+1,))
self.rawringbuffer=RingBuffer(maxsize=self.raw_fs*3,shape=(d_raw.shape[-1]+1,))
d_preproc = self.add_sample_timestamps(d_preproc,ts,self.fs)
self.preprocringbuffer.extend(d_preproc)
d_raw = self.add_sample_timestamps(d_raw,ts,self.raw_fs)
self.rawringbuffer.extend(d_raw)
if self.last_sigquality_ts is None or ts > self.last_sigquality_ts + self.send_sigquality_interval:
import matplotlib.pyplot as plt
plt.figure(10);plt.clf();
idx = np.flatnonzero(self.rawringbuffer[:,-1])[0]
plt.subplot(211); plt.cla(); plt.plot(self.rawringbuffer[idx:,-1],self.rawringbuffer[idx:,:-1])
idx = np.flatnonzero(self.preprocringbuffer[:,-1])[0]
plt.subplot(212); plt.cla(); plt.plot(self.preprocringbuffer[idx:,-1],self.preprocringbuffer[idx:,:-1])
plt.show(block=False)
def processStimulusEvent(self, m: StimulusEvent):
"""[pre-process a StimulusEvent message ready to be inserted into the stimulus ringbuffer]
Args:
m (StimulusEvent): [description]
Returns:
[type]: [description]
"""
# get the vector to hold the stimulus info
d = np.zeros((257,),dtype=np.float32)
if self.stimulus_ringbuffer is not None and self.stimulus_timestamp is not None:
# hold value of used objIDs from previous time stamp
d[:] = self.stimulus_ringbuffer[-1,:]
# insert the updated state
d[m.objIDs] = m.objState
d[-1] = m.timestamp
# apply the pre-processor, if one was given
if self.stimulus_preprocessor:
d = self.stimulus_preprocessor.transform(d)
# update the last time-stamp tracking
self.stimulus_timestamp= m.timestamp
return d
def update_and_send_ElectrodeQualities(self, d_raw: np.ndarray, d_preproc: np.ndarray, ts: int):
"""[compute running estimate of electrode qality and stream it]
Args:
d_raw (np.ndarray): [description]
d_preproc (np.ndarray): [description]
ts (int): [description]
"""
raw_power, preproc_power = self.update_electrode_powers(d_raw, d_preproc)
# convert to average amplitude
raw_amp = np.sqrt(raw_power)
preproc_amp = np.sqrt(preproc_power)
# noise2signal estimated as removed raw amplitude (assumed=noise) to preprocessed amplitude (assumed=signal)
noise2sig = np.maximum(float(1e-6), np.abs(raw_amp - preproc_amp)) / np.maximum(float(1e-8),preproc_amp)
# hack - detect disconnected channels
noise2sig[ raw_power < 1e-6 ] = 100
# hack - detect filter artifacts = preproc power is too big..
noise2sig[ preproc_amp > raw_amp*10 ] = 100
# hack - cap to 100
noise2sig = np.minimum(noise2sig,100)
# rate limit sending of signal-quality messages
if self.last_sigquality_ts is None or ts > self.last_sigquality_ts + self.send_sigquality_interval:
print("SigQ:\nraw_power=({}/{})\npp_power=({}/{})\nnoise2sig={}".format(
raw_amp,d_raw.shape[0],
preproc_amp,d_preproc.shape[0],
noise2sig))
print("Q",end='')
# N.B. use *our* time-stamp for outgoing messages!
self.sendMessage(SignalQuality(None, noise2sig))
self.last_sigquality_ts = ts
if self.VERBOSITY>2:
# plot the sample time-stamp jitter...
import matplotlib.pyplot as plt
plt.figure(10)
ts = self.data_ringbuffer[:,-1]
idx = np.flatnonzero(ts)
if len(idx)>0:
ts = ts[idx[0]:]
plt.subplot(211); plt.cla(); plt.plot(np.diff(ts)); plt.title('diff time-sample')
plt.subplot(212); plt.cla(); plt.plot((ts-ts[0])-np.arange(len(ts))*1000.0/self.fs); plt.title('regression against sample-number')
plt.show(block=False)
def update_electrode_powers(self, d_raw: np.ndarray, d_preproc:np.ndarray):
"""[track exp-weighted-moving average centered power for 2 input streams]
Args:
d_raw (np.ndarray): [description]
d_preproc (np.ndarray): [description]
Returns:
[type]: [description]
"""
if self.raw_power is None:
mu_hl, pow_hl = self.noise2sig_halflife_ms
self.raw_power = power_tracker(mu_hl, pow_hl, self.raw_fs)
self.preproc_power = power_tracker(mu_hl, pow_hl, self.fs)
self.raw_power.transform(d_raw)
self.preproc_power.transform(d_preproc)
return (self.raw_power.power(), self.preproc_power.power())
def update(self, timeout_ms=None, mintime_ms=None):
'''Update the tracking state w.r.t. the data source
By adding data to the data_ringbuffer, stimulus info to the stimulus_ringbuffer,
and other messages to the messages ring buffer.
Args
timeout_ms : int
max block waiting for messages before returning
mintime_ms : int
min time to accumulate messages before returning
Returns
newmsgs : [newMsgs :UtopiaMessage]
list of the *new* utopia messages from the server
nsamp: int
number of new data samples in this call
Note: use data_ringbuffer[-nsamp:,...] to get the new data
nstimulus : int
number of new stimulus events in this call
Note: use stimulus_ringbuffer[-nstimulus:,...] to get the new data
'''
if timeout_ms is None:
timeout_ms = self.timeout_ms
if mintime_ms is None:
mintime_ms = self.mintime_ms
if not self.isConnected():
self.connect()
if not self.isConnected():
return [],0,0
t0 = self.getTimeStamp()
nsamp = 0
nmsg = 0
nstimulus = 0
if self.data_ringbuffer is None: # do special init stuff if not done
nsamp, nmsg = self.initDataRingBuffer()
if self.stimulus_ringbuffer is None: # do special init stuff if not done
self.initStimulusRingBuffer()
if self.last_log_ts is None:
self.last_log_ts = self.getTimeStamp()
if t0 is None:
t0 = self.getTimeStamp()
# record the list of new messages from this call
newmsgs = self.newmsgs # start with any left-overs from old calls
self.newmsgs=[] # clear the left-over messages stack
ttg = timeout_ms - (self.getTimeStamp() - t0) # time-to-go in the update loop
while ttg > 0:
# rate limit
if ttg >= mintime_ms:
sleep(mintime_ms/1000.0)
ttg = timeout_ms - (self.getTimeStamp() - t0) # udate time-to-go
# get the new messages
msgs = self.getNewMessages(ttg)
# process the messages - basically to split datapackets from the rest
print(".",end='')
#print("{} in {}".format(len(msgs),self.getTimeStamp()-t0),end='',flush=True)
for m in msgs:
m = self.preprocess_message(m)
print("{:c}".format(m.msgID), end='', flush=True)
if m.msgID == DataPacket.msgID: # data-packets are special
d = self.processDataPacket(m) # (samp x ...)
self.data_ringbuffer.extend(d)
nsamp = nsamp + d.shape[0]
elif m.msgID == StimulusEvent.msgID: # as are stmiuluse events
d = self.processStimulusEvent(m) # (nY x ...)
self.stimulus_ringbuffer.append(d)
nstimulus = nstimulus + 1
else:
# NewTarget/Selection are also special in that they clear stimulus state...
if m.msgID == NewTarget.msgID or m.msgID == Selection.msgID :
# Make a dummy stim-event to reset all objIDs to off
d = self.processStimulusEvent(StimulusEvent(m.timestamp,
np.arange(255,dtype=np.int32),
np.zeros(255,dtype=np.int8)))
self.stimulus_ringbuffer.append(d)
self.stimulus_timestamp= m.timestamp
if len(self.msg_ringbuffer)>0 and m.timestamp > self.msg_ringbuffer[0].timestamp + self.msgwindow_ms: # slide msg buffer
self.msg_ringbuffer.popleft()
self.msg_ringbuffer.append(m)
newmsgs.append(m)
nmsg = nmsg+1
self.msg_timestamp = m.timestamp
# update time-to-go
ttg = timeout_ms - (self.getTimeStamp() - t0)
# new line
if self.getTimeStamp() > self.last_log_ts + 2000:
print("",flush=True)
self.last_log_ts = self.getTimeStamp()
# return new messages, and count new samples/stimulus
return (newmsgs, nsamp, nstimulus)
def push_back_newmsgs(self,oldmsgs):
"""[put unprocessed messages back onto the newmessages queue]
Args:
oldmsgs ([type]): [description]
"""
# TODO []: ensure this preserves message time-stamp order?
self.newmsgs.extend(oldmsgs)
def extract_data_segment(self, bgn_ts, end_ts=None):
"""extract a segment of data based on a start and end time-stamp
Args:
bgn_ts (float): segment start time-stamp
end_ts (float, optional): segment end time-stamp. Defaults to None.
Returns:
(np.ndarray): the data between these time-stamps, or None if timestamps invalid
"""
return extract_ringbuffer_segment(self.data_ringbuffer,bgn_ts,end_ts)
def extract_stimulus_segment(self, bgn_ts, end_ts=None):
"""extract a segment of the stimulus stream based on a start and end time-stamp
Args:
bgn_ts (float): segment start time-stamp
end_ts (float, optional): segment end time-stamp. Defaults to None.
Returns:
(np.ndarray): the stimulus events between these time-stamps, or None if timestamps invalid
"""
return extract_ringbuffer_segment(self.stimulus_ringbuffer,bgn_ts,end_ts)
def extract_msgs_segment(self, bgn_ts, end_ts=None):
"""[extract the messages between start/end time stamps]
Args:
bgn_ts ([type]): [description]
end_ts ([type], optional): [description]. Defaults to None.
Returns:
[type]: [description]
"""
msgs = [] # store the trial stimEvents
for m in reversed(self.msg_ringbuffer):
if m.timestamp <= bgn_ts:
# stop as soon as earlier than bgn_ts
break
if end_ts is None or m.timestamp < end_ts:
msgs.append(m)
# reverse back to input order
msgs.reverse()
return msgs
def run(self, timeout_ms=30000):
"""[test run the interface forever, just getting and storing data]
Args:
timeout_ms (int, optional): [description]. Defaults to 30000.
"""
t0 = self.getTimeStamp()
# test getting 5s data
tstart = self.data_timestamp
trlen_ms = 5000
while self.getTimeStamp() < t0+timeout_ms:
self.update()
# test getting a data segment
if tstart is None :
tstart = self.data_timestamp
if tstart and self.data_timestamp > tstart + trlen_ms:
X = self.extract_data_segment(tstart, tstart+trlen_ms)
print("Got data: {}->{}\n{}".format(tstart, tstart+trlen_ms, X[:, -1]))
Y = self.extract_stimulus_segment(tstart, tstart+trlen_ms)
print("Got stimulus: {}->{}\n{}".format(tstart, tstart+trlen_ms, Y[:, -1]))
tstart = self.data_timestamp + 5000
print('.', flush=True)
try:
from sklearn.base import TransformerMixin
except:
# fake the class if sklearn is not available, e.g. Android/iOS
class TransformerMixin:
def __init__():
pass
def fit(self,X):
pass
def transform(self,X):
pass
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
from mindaffectBCI.decoder.utils import sosfilt, butter_sosfilt, sosfilt_zi_warmup
class butterfilt_and_downsample(TransformerMixin):
"""Incremental streaming transformer to provide filtering and downsampling data transformations
Args:
TransformerMixin ([type]): sklearn compatible transformer
"""
def __init__(self, stopband=((0,5),(5,-1)), order:int=6, fs:float =250, fs_out:float =60, ftype='butter'):
self.stopband = stopband
self.fs = fs
self.fs_out = fs_out if fs_out is not None and fs_out < fs else fs
self.order = order
self.axis = -2
if not self.axis == -2:
raise ValueError("axis != -2 is not yet supported!")
self.nsamp = 0
self.ftype = ftype
def fit(self, X, fs:float =None, zi=None):
"""[summary]
Args:
X ([type]): [description]
fs (float, optional): [description]. Defaults to None.
zi ([type], optional): [description]. Defaults to None.
Returns:
[type]: [description]
"""
if fs is not None: # parameter overrides stored fs
self.fs = fs
# preprocess -> spectral filter
if isinstance(self.stopband, str):
import pickle
import os
# load coefficients from file -- when scipy isn't available
if os.path.isfile(self.stopband):
fn = self.stopband
else: # try relative to our py file
fn = os.path.join(os.path.dirname(os.path.abspath(__file__)),self.stopband)
with open(fn,'rb') as f:
self.sos_ = pickle.load(f)
self.zi_ = pickle.load(f)
f.close()
# tweak the shape/scale of zi to the actual data shape
self.zi_ = sosfilt_zi_warmup(self.zi_, X, self.axis)
print("X={} zi={}".format(X.shape,self.zi_.shape))
else:
# estimate them from the given information
X, self.sos_, self.zi_ = butter_sosfilt(X, self.stopband, self.fs, order=self.order, axis=self.axis, zi=zi, ftype=self.ftype)
# preprocess -> downsample
self.nsamp = 0
self.resamprate_ = int(round(self.fs*2.0/self.fs_out))/2.0 if self.fs_out is not None else 1
self.out_fs_ = self.fs/self.resamprate_
print("resample: {}->{}hz rsrate={}".format(self.fs, self.out_fs_, self.resamprate_))
return self
def transform(self, X, Y=None):
"""[summary]
Args:
X ([type]): [description]
Y ([type], optional): [description]. Defaults to None.
Returns:
[type]: [description]
"""
# propogate the filter coefficients between calls
if not hasattr(self,'sos_'):
self.fit(X[0:1,:])
if self.sos_ is not None:
X, self.zi_ = sosfilt(self.sos_, X, axis=self.axis, zi=self.zi_)
nsamp = self.nsamp
self.nsamp = self.nsamp + X.shape[self.axis] # track *raw* sample counter
# preprocess -> downsample @60hz
if self.resamprate_ > 1:
# number samples through this cycle due to remainder of last block
resamp_start = nsamp%self.resamprate_
# convert to number samples needed to complete this cycle
# this is then the sample to take for the next cycle
if resamp_start > 0:
resamp_start = self.resamprate_ - resamp_start
# allow non-integer resample rates
idx = np.arange(resamp_start,X.shape[self.axis],self.resamprate_)
if self.resamprate_%1 > 0 and idx.size>0 : # non-integer re-sample, interpolate
idx_l = np.floor(idx).astype(int) # sample above
idx_u = np.ceil(idx).astype(int) # sample below
# BODGE: guard for packet ending at sample boundary.
idx_u[-1] = idx_u[-1] if idx_u[-1]<X.shape[self.axis] else X.shape[self.axis]-1
w_u = idx - idx_l # linear weight of the upper sample
X = X[...,idx_u,:] * w_u[:,np.newaxis] + X[...,idx_l,:] * (1-w_u[:,np.newaxis]) # linear interpolation
if Y is not None:
Y = Y[...,idx_u,:] * w_u[:,np.newaxis] + Y[...,idx_l,:] * (1-w_u[:,np.newaxis])
else:
idx = idx.astype(int)
X = X[..., idx, :] # decimate X (trl, samp, d)
if Y is not None:
Y = Y[..., idx, :] # decimate Y (trl, samp, y)
return X if Y is None else (X, Y)
@staticmethod
def testcase():
''' test the filt+downsample transformation filter by incremental calling '''
#X=np.cumsum(np.random.randn(100,1),axis=0)
X=np.sin(np.arange(100)[:,np.newaxis]*2*np.pi/30)
xs = np.arange(X.shape[0])[:,np.newaxis]
# high-pass and decimate
bands = ((0,20,'bandpass'))
fs = 200
fs_out = 130
fds = butterfilt_and_downsample(stopband=bands,fs=fs,fs_out=fs_out)
print("single step")
fds.fit(X[0:1,:])
m0,xs0 = fds.transform(X,xs) # (samp,ny,ne)
print("M0 -> {}".format(m0[:20]))
step=6
print("Step size = {}".format(step))
fds.fit(X[0:1,:])
m1=np.zeros(m0.shape,m0.dtype)
xs1 = np.zeros(xs0.shape,xs0.dtype)
t=0
for i in range(0,len(X),step):
idx=np.arange(i,min(i+step,len(X)))
mm, idx1=fds.transform(X[idx,:],idx[:,np.newaxis])
m1[t:t+mm.shape[0],:]=mm
xs1[t:t+mm.shape[0]]=idx1
t = t +mm.shape[0]
print("M1 -> {}".format(m1[:20]))
print("diff: {}".format(np.max(np.abs(m0-m1))))
import matplotlib.pyplot as plt
plt.plot(xs,X,'*-',label='X')
plt.plot(xs0,m0,'*-',label='{} {}->{}Hz single'.format(bands,fs,fs_out))
plt.plot(xs1,m1,'*-',label='{} {}->{}Hz incremental'.format(bands,fs,fs_out))
plt.legend()
plt.show()
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
from mindaffectBCI.decoder.stim2event import stim2event
class stim2eventfilt(TransformerMixin):
''' Incremental streaming transformer to transform a sequence of stimulus states to a brain event sequence
For example by transforming a sequence of stimulus intensities, to rising and falling edge events.
'''
def __init__(self, evtlabs=None, histlen=20):
self.evtlabs = evtlabs
self.histlen = histlen
self.prevX = None
def fit(self, X):
"""[summary]
Args:
X ([type]): [description]
Returns:
[type]: [description]
"""
return self
def transform(self, X):
"""[transform Stimulus-encoded to brain-encoded]
Args:
X ([type]): [description]
Returns:
[type]: [description]
"""
if X is None:
return None
# keep old fitler state for the later transformation call
prevX = self.prevX
# grab the new filter state (if wanted)
if self.histlen>0:
#print('prevX={}'.format(prevX))
#print("X={}".format(X))
if X.shape[0] >= self.histlen or prevX is None:
self.prevX = X
else:
self.prevX = np.append(prevX, X, 0)
# only keep the last bit -- copy in case gets changed in-place
self.prevX = self.prevX[-self.histlen:,:].copy()
#print('new_prevX={}'.format(self.prevX))
# convert from stimulus coding to brain response coding, with old state
X = stim2event(X, self.evtlabs, axis=-2, oM=prevX)
return X
def testcase():
''' test the stimulus transformation filter by incremental calling '''
M=np.array([0,0,0,1,0,0,1,1,0,1])[:,np.newaxis] # samp,nY
s2ef = stim2eventfilt(evtlabs=('re','fe'),histlen=3)
print("single step")
m0=s2ef.transform(M) # (samp,ny,ne)
print("{} -> {}".format(M,m0))
print("Step size = 1")
m1=np.zeros(m0.shape,m0.dtype)
for i in range(len(M)):
idx=slice(i,i+1)
mm=s2ef.transform(M[idx,:])
m1[idx,...]=mm
print("{} {} -> {}".format(i,M[idx,...],mm))
print("Step size=4")
m4=np.zeros(m0.shape,m0.dtype)
for i in range(0,len(M),4):
idx=slice(i,i+4)
mm=s2ef.transform(M[idx,:])
m4[idx,...]=mm
print("{} {} -> {}".format(i,M[idx,...],mm))
print("m0={}\nm1={}\n,m4={}\n".format(m0,m1,m4))
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
class power_tracker(TransformerMixin):
"""Incremental streaming transformer from raw n-channel data, to exponientially smoothed channel powers
Args:
TransformerMixin ([type]): sklearn compatiable transformer
"""
def __init__(self,halflife_mu_ms, halflife_power_ms, fs, car=True):
# convert to per-sample decay factor
self.alpha_mu = self.hl2alpha(fs * halflife_mu_ms / 1000.0 )
self.alpha_power= self.hl2alpha(fs * halflife_power_ms / 1000.0 )
self.car = car
self.sX_N = None
self.sX = None
self.sXX_N = None
self.sXX = None
def hl2alpha(self,hl):
"""[summary]
Args:
hl ([type]): [description]
Returns:
[type]: [description]
"""
return np.exp(np.log(.5)/hl)
def fit(self,X):
"""[summary]
Args:
X ([type]): [description]
Returns:
[type]: [description]
"""
self.sX_N = X.shape[0]
if self.car and X.shape[-1]>4:
X = X.copy() - np.mean(X,-1,keepdims=True)
self.sX = np.sum(X,axis=0)
self.sXX_N = X.shape[0]
self.sXX = np.sum((X-(self.sX/self.sX_N))**2,axis=0)
return self.power()
def transform(self, X: np.ndarray):
"""[compute the exponientially weighted centered power of X]
Args:
X (np.ndarray): [description]
Returns:
[type]: [description]
"""
if self.sX is None: # not fitted yet!
return self.fit(X)
if self.car and X.shape[-1]>4:
ch_power = self.power()
# identify the active channels, i.e. are attached and have some signal
act_ch = ch_power > np.max(ch_power)*1e-3
X = X.copy() - np.mean(X[...,act_ch], -1, keepdims=True)
# compute updated mean
alpha_mu = self.alpha_mu ** X.shape[0]
self.sX_N = self.sX_N*alpha_mu + X.shape[0]
self.sX = self.sX*alpha_mu + np.sum(X, axis=0)
# center and compute updated power
alpha_pow = self.alpha_power ** X.shape[0]
self.sXX_N = self.sXX_N*alpha_pow + X.shape[0]
self.sXX = self.sXX*alpha_pow + np.sum((X-(self.sX/self.sX_N))**2, axis=0)
return self.power()
def mean(self):
"""[summary]
Returns:
[type]: [description]
"""
return self.sX / self.sX_N
def power(self):
"""[summary]
Returns:
[type]: [description]
"""
return self.sXX / self.sXX_N
def testcase(self):
"""[summary]
"""
import matplotlib.pyplot as plt
X = np.random.randn(10000,2)
#X = np.cumsum(X,axis=0)
pt = power_tracker(100,100,100)
print("All at once: power={}".format(pt.transform(X))) # all at once
pt = power_tracker(100,1000,1000)
print("alpha_mu={} alpha_pow={}".format(pt.alpha_mu,pt.alpha_power) )
step = 30
idxs = list(range(step,X.shape[0],step))
powers = np.zeros((len(idxs),X.shape[-1]))
mus = np.zeros((len(idxs),X.shape[-1]))
for i,j in enumerate(idxs):
powers[i,:] = np.sqrt(pt.transform(X[j-step:j,:]))
mus[i,:]=pt.mean()
for d in range(X.shape[-1]):
plt.subplot(X.shape[-1],1,d+1)
plt.plot(X[:,d])
plt.plot(idxs,mus[:,d])
plt.plot(idxs,powers[:,d])
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
class timestamp_interpolation(TransformerMixin):
"""Incremental streaming tranformer to transform from per-packet time-stamps to per-sample timestamps
with time-stamp smoothing, de-jittering, and dropped-sample detection.
"""
def __init__(self,fs=None,sample2timestamp=None, max_delta=200):
"""tranform from per-packet (i.e. multiple-samples) to per-sample timestamps
Args:
fs (float): default sample rate, used when no other timing info is available
sample2timestamp (transformer, optional): class to de-jitter timestamps based on sample-count. Defaults to None.
"""
self.fs=fs
a0 = 1000/self.fs if self.fs is not None else 1
# BODGE: special cases for particular mapping functions so can include the prior slope
if sample2timestamp=='lower_bound_tracker':
self.sample2timestamp = lower_bound_tracker(a0=a0)
elif sample2timestamp=='linear_trend_tracker':
self.sample2timestamp = linear_trend_tracker(a0=a0)
else:
self.sample2timestamp = sample2timestamp
self.max_delta = max_delta
def fit(self,ts,nsamp=1):
"""[summary]
Args:
ts ([type]): [description]
nsamp (int, optional): [description]. Defaults to 1.
"""
self.last_sample_timestamp_ = ts
self.n_ = 0
def transform(self,timestamp:float,nsamp:int=1):
"""add per-sample timestamp information to the data matrix
Args:
timestamp (float): the timestamp of the last sample of d
nsamp(int): number of samples to interpolate
Returns:
np.ndarray: (nsamp) the interpolated time-stamps
"""
if not hasattr(self,'last_sample_timestamp_'):
self.fit(timestamp,nsamp)
# update tracking number samples processed
self.n_ = self.n_ + nsamp
if self.last_sample_timestamp_ < timestamp or self.sample2timestamp is not None:
# update the tracker for the sample-number to sample timestamp mapping
if self.sample2timestamp is not None:
#print("n={} ts={}".format(n,timestamp))
newtimestamp = self.sample2timestamp.transform(self.n_, timestamp)
#print("ts={} newts={} diff={}".format(timestamp,newtimestamp,timestamp-newtimestamp))
# use the corrected de-jittered time-stamp -- if it's not tooo different
if abs(timestamp-newtimestamp) < self.max_delta:
timestamp = int(newtimestamp)
# simple linear interpolation for the sample time-stamps
samples_ts = np.linspace(self.last_sample_timestamp_, timestamp, nsamp+1, endpoint=True, dtype=int)
samples_ts = samples_ts[1:]
else:
if self.fs :
# interpolate with the estimated sample rate
samples_ts = np.arange(-nsamp+1,1,dtype=int)*(1000/self.fs) + timestamp
else:
# give all same timestamp
samples_ts = np.ones(nsamp,dtype=int)*timestamp
# update the tracking info
self.last_sample_timestamp_ = timestamp
return samples_ts
def testcase(self, npkt=1000, fs=100):
"""[summary]
Args:
npkt (int, optional): [description]. Defaults to 1000.
fs (int, optional): [description]. Defaults to 100.
"""
# generate random packet sizes
nsamp = np.random.random_integers(0,10,size=(npkt,))
# generate true sample timestamps
ts_true = np.arange(np.sum(nsamp))*1000/fs
# packet end indices
idx = np.cumsum(nsamp)-1
# packet end time-stamps
pkt_ts = ts_true[idx]
# add some time-stamp jitter, always positive..
pkt_ts = pkt_ts + np.random.uniform(0,.5*1000/fs,size=pkt_ts.shape)
# apply the time-stamp interplotation
sts=[]
tsfn = timestamp_interpolation(fs=fs,sample2timestamp = 'lower_bound_tracker')
for i,(n,t) in enumerate(zip(nsamp,pkt_ts)):
samp_ts = tsfn.transform(t,n)
sts.extend(samp_ts)
# plot the result.
import matplotlib.pyplot as plt
plt.plot(ts_true - sts)
plt.show()
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
from mindaffectBCI.decoder.preprocess import temporally_decorrelate
class temporal_decorrelator(TransformerMixin):
"""Incremental streaming tranformer to decorrelate temporally channels in an input stream
"""
def __init__(self, order=10, reg=1e-4, eta=1e-5, axis=-2):
self.reg=reg
self.eta=eta
self.axis=axis
def fit(self,X):
"""[summary]
Args:
X ([type]): [description]
"""
self.W_ = np.zeros((self.order,X.shape[-1]),dtype=X.dtype)
self.W_[-1,:]=1
_, self.W_ = self.transform(X[1:,:])
def transform(self,X):
"""add per-sample timestamp information to the data matrix
Args:
X (float): the data to decorrelate
nsamp(int): number of samples to interpolate
Returns:
np.ndarray: the decorrelated data
"""
if not hasattr(self,'W_'):
self.fit(X)
X, self.W_ = temporally_decorrelate(X, W=self.W_, reg=self.reg, eta=self.eta, axis=self.axis)
return X
def testcase(self, dur=3, fs=100, blksize=10):
"""[summary]
Args:
dur (int, optional): [description]. Defaults to 3.
fs (int, optional): [description]. Defaults to 100.
blksize (int, optional): [description]. Defaults to 10.
"""
import numpy as np
import matplotlib.pyplot as plt
from mindaffectBCI.decoder.preprocess import plot_grand_average_spectrum
fs=100
X = np.random.standard_normal((2,fs*dur,2)) # flat spectrum
#X = X + np.sin(np.arange(X.shape[-2])*2*np.pi/10)[:,np.newaxis]
X = X[:,:-1,:]+X[:,1:,:] # weak low-pass
#X = np.cumsum(X,-2) # 1/f spectrum
print("X={}".format(X.shape))
plt.figure(1)
plot_grand_average_spectrum(X, fs)
plt.suptitle('Raw')
plt.show(block=False)
tdc = temporal_decorrelator()
wX = np.zeros(X.shape,X.dtype)
for i in range(0,X.shape[-1],blksize):
idx = range(i,i+blksize)
wX[idx,:] = tdc.transform(X[idx,:])
# compare raw vs summed filterbank
plt.figure(2)
plot_grand_average_spectrum(wX,fs)
plt.suptitle('Decorrelated')
plt.show()
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
from mindaffectBCI.decoder.preprocess import standardize_channel_power
class channel_power_standardizer(TransformerMixin):
"""Incremental streaming tranformer to channel power normalization in an input stream
"""
def __init__(self, reg=1e-4, axis=-2):
self.reg=reg
self.axis=axis
def fit(self,X):
"""[summary]
Args:
X ([type]): [description]
"""
self.sigma2_ = np.zeros((X.shape[-1],), dtype=X.dtype)
self.sigma2_ = X[0,:]*X[0,:] # warmup with 1st sample power
self.transform(X[1:,:])
def transform(self,X):
"""add per-sample timestamp information to the data matrix
Args:
X (float): the data to decorrelate
Returns:
np.ndarray: the decorrelated data
"""
if not hasattr(self,'sigma2_'):
self.fit(X)
X, self.W_ = standardize_channel_power(X, sigma2=self.sigma2_, reg=self.reg, axis=self.axis)
return X
def testcase(self, dur=3, fs=100, blksize=10):
"""[summary]
Args:
dur (int, optional): [description]. Defaults to 3.
fs (int, optional): [description]. Defaults to 100.
blksize (int, optional): [description]. Defaults to 10.
"""
import numpy as np
import matplotlib.pyplot as plt
from mindaffectBCI.decoder.preprocess import plot_grand_average_spectrum
fs=100
X = np.random.standard_normal((2,fs*dur,2)) # flat spectrum
#X = X + np.sin(np.arange(X.shape[-2])*2*np.pi/10)[:,np.newaxis]
X = X[:,:-1,:]+X[:,1:,:] # weak low-pass
#X = np.cumsum(X,-2) # 1/f spectrum
print("X={}".format(X.shape))
plt.figure(1)
plot_grand_average_spectrum(X, fs)
plt.suptitle('Raw')
plt.show(block=False)
cps = channel_power_standardizer()
wX = np.zeros(X.shape,X.dtype)
for i in range(0,X.shape[-1],blksize):
idx = range(i,i+blksize)
wX[idx,:] = cps.transform(X[idx,:])
# compare raw vs summed filterbank
plt.figure(2)
plot_grand_average_spectrum(wX,fs)
plt.suptitle('Decorrelated')
plt.show()
def testRaw():
"""[summary]
"""
# test with raw
ui = UtopiaDataInterface()
ui.connect()
sigViewer(ui,30000) # 30s sigviewer
def testPP():
"""[summary]
"""
from sigViewer import sigViewer
# test with a filter + downsampler
ppfn= butterfilt_and_downsample(order=4, stopband=((0,1),(25,-1)), fs_out=100)
#ppfn= butterfilt_and_downsample(order=4, stopband='butter_stopband((0, 5), (25, -1))_fs200.pk', fs_out=80)
ui = UtopiaDataInterface(data_preprocessor=ppfn, stimulus_preprocessor=None)
ui.connect()
sigViewer(ui)
def testFileProxy(filename,fs_out=999):
"""[summary]
Args:
filename ([type]): [description]
fs_out (int, optional): [description]. Defaults to 999.
"""
from mindaffectBCI.decoder.FileProxyHub import FileProxyHub
U = FileProxyHub(filename)
from sigViewer import sigViewer
# test with a filter + downsampler
#ppfn= butterfilt_and_downsample(order=4, stopband=((0,3),(25,-1)), fs_out=fs_out)
ppfn= butterfilt_and_downsample(order=4, stopband=(1,15,'bandpass'), fs_out=fs_out)
#ppfn = None
ui = UtopiaDataInterface(data_preprocessor=ppfn, stimulus_preprocessor=None, mintime_ms=0, U=U)
ui.connect()
sigViewer(ui)
def testFileProxy2(filename):
"""[summary]
Args:
filename ([type]): [description]
"""
from mindaffectBCI.decoder.FileProxyHub import FileProxyHub
U = FileProxyHub(filename)
fs = 200
fs_out = 200
# test with a filter + downsampler
ppfn= butterfilt_and_downsample(order=4, stopband=((45,65),(0,3),(25,-1)), fs=fs, fs_out=fs_out)
ui = UtopiaDataInterface(data_preprocessor=ppfn, stimulus_preprocessor=None, mintime_ms=0, U=U, fs=fs)
ui.connect()
# run in bits..
data=[]
stim=[]
emptycount = 0
while True:
newmsg, nsamp, nstim = ui.update()
if len(newmsg) == 0 and nsamp == 0 and nstim == 0:
emptycount = emptycount + 1
if emptycount > 10:
break
else:
emptycount=0
if nsamp > 0:
data.append(ui.data_ringbuffer[-nsamp:,:].copy())
if nstim > 0:
stim.append(ui.stimulus_ringbuffer[-nstim:,:].copy())
# convert to single data block
data = np.vstack(data)
stim = np.vstack(stim)
# dump as pickle
import pickle
if ppfn is None:
pickle.dump(dict(data=data,stim=stim),open('raw_udi.pk','wb'))
else:
pickle.dump(dict(data=data,stim=stim),open('pp_udi.pk','wb'))
def testERP():
"""[summary]
"""
ui = UtopiaDataInterface()
ui.connect()
erpViewer(ui,evtlabs=None) # 30s sigviewer
def testElectrodeQualities(X,fs=200,pktsize=20):
"""[summary]
Args:
X ([type]): [description]
fs (int, optional): [description]. Defaults to 200.
pktsize (int, optional): [description]. Defaults to 20.
Returns:
[type]: [description]
"""
# recurse if more dims than we want...
if X.ndim>2:
sigq=[]
for i in range(X.shape[0]):
sigqi = testElectrodeQualities(X[i,...],fs,pktsize)
sigq.append(sigqi)
sigq=np.concatenate(sigq,0)
return sigq
ppfn= butterfilt_and_downsample(order=6, stopband='butter_stopband((0, 5), (25, -1))_fs200.pk', fs_out=100)
ppfn.fit(X[:10,:],fs=200)
noise2sig = np.zeros((int(X.shape[0]/pktsize),X.shape[-1]),dtype=np.float32)
for pkti in range(noise2sig.shape[0]):
t = pkti*pktsize
Xi = X[t:t+pktsize,:]
Xip = ppfn.transform(Xi)
raw_power, preproc_power = UtopiaDataInterface.update_electrode_powers(Xi,Xip)
noise2sig[pkti,:] = np.maximum(float(1e-6), (raw_power - preproc_power)) / np.maximum(float(1e-8),preproc_power)
return noise2sig
if __name__ == "__main__":
#timestamp_interpolation().testcase()
#butterfilt_and_downsample.testcase()
#testRaw()
#testPP()
#testERP()
filename="~/Desktop/mark/mindaffectBCI_*.txt"
testFileProxy(filename)
#testFileProxy2(filename)
# "C:\\Users\\Developer\\Downloads\\mark\\mindaffectBCI_brainflow_200911_1229_90cal.txt")
#"..\..\Downloads\khash\mindaffectBCI_noisetag_bci_200907_1433.txt"
| 38.649677
| 150
| 0.566207
|
from mindaffectBCI.utopiaclient import UtopiaClient, Subscribe, StimulusEvent, NewTarget, Selection, DataPacket, UtopiaMessage, SignalQuality
from collections import deque
from mindaffectBCI.decoder.utils import RingBuffer, extract_ringbuffer_segment
from mindaffectBCI.decoder.lower_bound_tracker import lower_bound_tracker
from mindaffectBCI.decoder.linear_trend_tracker import linear_trend_tracker
from time import sleep
import numpy as np
class UtopiaDataInterface:
VERBOSITY = 1
def __init__(self, datawindow_ms=60000, msgwindow_ms=60000,
data_preprocessor=None, stimulus_preprocessor=None, send_signalquality=True,
timeout_ms=100, mintime_ms=50, fs=None, U=None, sample2timestamp='lower_bound_tracker',
clientid=None):
self.timeout_ms = timeout_ms
self.mintime_ms = mintime_ms
self.datawindow_ms = datawindow_ms
self.msgwindow_ms = msgwindow_ms
self.host = None
self.port = -1
self.U = UtopiaClient(clientid) if U is None else U
self.t0 = self.getTimeStamp()
self.msg_ringbuffer = deque()
self.msg_timestamp = None
self.data_ringbuffer = None
self.data_timestamp = None
self.sample2timestamp = sample2timestamp
self.data_preprocessor = data_preprocessor
self.stimulus_ringbuffer = None
self.stimulus_timestamp = None
self.stimulus_preprocessor = stimulus_preprocessor
self.raw_fs = fs
self.fs = None
self.newmsgs = []
self.send_signalquality = send_signalquality
self.last_sigquality_ts = None
self.last_log_ts = None
self.send_sigquality_interval = 1000 # send signal qualities every 1000ms = 1Hz
# noise2sig estimate halflife_ms, running-offset, de-trended power
self.noise2sig_halflife_ms = (5000, 500) # 10s for offset, .5s for power
# TODO [x]: move into a exp-move-ave power est class
self.raw_power = None
self.preproc_power = None
def connect(self, host=None, port=-1, queryifhostnotfound=True):
if host:
self.host = host
if port > 0:
self.port = port
self.U.autoconnect(self.host, self.port, timeout_ms=5000, queryifhostnotfound=queryifhostnotfound)
if self.U.isConnected:
# subscribe to messages: data, stim, mode, selection
self.U.sendMessage(Subscribe(None, "DEMSN"))
return self.U.isConnected
def isConnected(self):
return self.U.isConnected if self.U is not None else False
def getTimeStamp(self):
return self.U.getTimeStamp()
def sendMessage(self, msg: UtopiaMessage):
self.U.sendMessage(msg)
def getNewMessages(self, timeout_ms=0):
return self.U.getNewMessages(timeout_ms)
def initDataRingBuffer(self):
print("geting some initial data to setup the ring buffer")
# get some initial data to get data shape and sample rate
databuf = []
nmsg = 0
iter = 0
data_start_ts = None
data_ts = 0
while data_start_ts is None or data_ts - data_start_ts < 3000:
msgs = self.getNewMessages(100)
for m in msgs:
m = self.preprocess_message(m)
if m.msgID == DataPacket.msgID: # data-packets are special
if len(m.samples) > 0:
databuf.append(m) # append raw data
if data_start_ts is None:
data_start_ts = m.timestamp
data_ts = m.timestamp
else:
print("Huh? got empty data packet: {}".format(m))
else:
self.msg_ringbuffer.append(m)
self.msg_timestamp = m.timestamp
nmsg = nmsg+1
nsamp = [len(m.samples) for m in databuf]
data_ts = [ m.timestamp for m in databuf]
if self.raw_fs is None:
self.raw_fs = np.median( np.array(nsamp[1:]) / np.diff(data_ts) * 1000.0)
print('Estimated sample rate {} samp in {} s ={}'.format(sum(nsamp),(data_ts[-1]-data_ts[0])/1000.0,self.raw_fs))
# init the pre-processor (if one)
if self.data_preprocessor:
self.data_preprocessor.fit(np.array(databuf[0].samples)[0:1,:], fs=self.raw_fs) # tell it the sample rate
# apply the data packet pre-processing -- to get the info
# on the data state after pre-processing
tmpdatabuf = [self.processDataPacket(m) for m in databuf]
# strip empty packets
tmpdatabuf = [d for d in tmpdatabuf if d.shape[0]>0]
# estimate the sample rate of the pre-processed data
pp_nsamp = [m.shape[0] for m in tmpdatabuf]
pp_ts = [ m[-1,-1] for m in tmpdatabuf]
self.fs = np.median( np.array(pp_nsamp[1:]) / np.diff(pp_ts) * 1000.0)# fs = nSamp/time
print('Estimated pre-processed sample rate={}'.format(self.fs))
# create the ring buffer, big enough to store the pre-processed data
if self.data_ringbuffer:
print("Warning: re-init data ring buffer")
# TODO []: why does the datatype of the ring buffer matter so much? Is it because of uss?
# Answer[]: it's the time-stamps, float32 rounds time-stamps to 24bits
self.data_ringbuffer = RingBuffer(maxsize=self.fs*self.datawindow_ms/1000, shape=tmpdatabuf[0].shape[1:], dtype=np.float32)
self.data_timestamp=None
nsamp=0
if self.data_preprocessor:
self.data_preprocessor.fit(np.array(databuf[0].samples)[0:1,:], fs=self.raw_fs)
if self.sample2timestamp is None or isinstance(self.sample2timestamp,str):
self.sample2timestamp = timestamp_interpolation(fs=self.fs,
sample2timestamp=self.sample2timestamp)
for m in databuf:
d = self.processDataPacket(m)
self.data_ringbuffer.extend(d)
nsamp = nsamp + d.shape[0]
return (nsamp, nmsg)
def initStimulusRingBuffer(self):
self.stimulus_ringbuffer = RingBuffer(maxsize=self.fs*self.datawindow_ms/1000, shape=(257,), dtype=np.float32)
def preprocess_message(self, m:UtopiaMessage):
m.timestamp = m.timestamp % (1<<24)
return m
def processDataPacket(self, m: DataPacket):
#print("DP: {}".format(m))
# extract the raw data
d = np.array(m.samples, dtype=np.float32) # process as singles
# apply the pre-processor, if one was given
if self.data_preprocessor:
d_raw = d.copy()
# warning-- with agressive downsample this may not produce any data!
d = self.data_preprocessor.transform(d)
# BODGE: running estimate of the electrode-quality, ONLY after initialization!
if self.send_signalquality and self.data_ringbuffer is not None:
self.update_and_send_ElectrodeQualities(d_raw, d, m.timestamp)
#if self.VERBOSITY > 0 and self.data_ringbuffer is not None:
# self.plot_raw_preproc_data(d_raw,d,m.timestamp)
if d.size > 0 :
# If have data to add to the ring-buffer, guarding for time-stamp wrap-around
# TODO [ ]: de-jitter and better timestamp interpolation
# guard for wrap-around!
if self.data_timestamp is not None and m.timestamp < self.data_timestamp:
print("Warning: Time-stamp wrap-around detected!!")
d = self.add_sample_timestamps(d,m.timestamp,self.fs)
# update the last time-stamp tracking
self.data_timestamp= m.timestamp
return d
def add_sample_timestamps(self,d:np.ndarray,timestamp:float,fs:float):
if self.sample2timestamp is not None and not isinstance(self.sample2timestamp,str):
sample_ts = self.sample2timestamp.transform(timestamp, len(d))
else: # all the same ts
sample_ts = np.ones((len(d),),dtype=int)*timestamp
# combine data with timestamps, ensuring type is preserved
d = np.append(np.array(d), sample_ts[:, np.newaxis], -1).astype(d.dtype)
return d
def plot_raw_preproc_data(self, d_raw, d_preproc, ts):
if not hasattr(self,'rawringbuffer'):
self.preprocringbuffer=RingBuffer(maxsize=self.fs*3,shape=(d_preproc.shape[-1]+1,))
self.rawringbuffer=RingBuffer(maxsize=self.raw_fs*3,shape=(d_raw.shape[-1]+1,))
d_preproc = self.add_sample_timestamps(d_preproc,ts,self.fs)
self.preprocringbuffer.extend(d_preproc)
d_raw = self.add_sample_timestamps(d_raw,ts,self.raw_fs)
self.rawringbuffer.extend(d_raw)
if self.last_sigquality_ts is None or ts > self.last_sigquality_ts + self.send_sigquality_interval:
import matplotlib.pyplot as plt
plt.figure(10);plt.clf();
idx = np.flatnonzero(self.rawringbuffer[:,-1])[0]
plt.subplot(211); plt.cla(); plt.plot(self.rawringbuffer[idx:,-1],self.rawringbuffer[idx:,:-1])
idx = np.flatnonzero(self.preprocringbuffer[:,-1])[0]
plt.subplot(212); plt.cla(); plt.plot(self.preprocringbuffer[idx:,-1],self.preprocringbuffer[idx:,:-1])
plt.show(block=False)
def processStimulusEvent(self, m: StimulusEvent):
# get the vector to hold the stimulus info
d = np.zeros((257,),dtype=np.float32)
if self.stimulus_ringbuffer is not None and self.stimulus_timestamp is not None:
# hold value of used objIDs from previous time stamp
d[:] = self.stimulus_ringbuffer[-1,:]
# insert the updated state
d[m.objIDs] = m.objState
d[-1] = m.timestamp
# apply the pre-processor, if one was given
if self.stimulus_preprocessor:
d = self.stimulus_preprocessor.transform(d)
# update the last time-stamp tracking
self.stimulus_timestamp= m.timestamp
return d
def update_and_send_ElectrodeQualities(self, d_raw: np.ndarray, d_preproc: np.ndarray, ts: int):
raw_power, preproc_power = self.update_electrode_powers(d_raw, d_preproc)
# convert to average amplitude
raw_amp = np.sqrt(raw_power)
preproc_amp = np.sqrt(preproc_power)
# noise2signal estimated as removed raw amplitude (assumed=noise) to preprocessed amplitude (assumed=signal)
noise2sig = np.maximum(float(1e-6), np.abs(raw_amp - preproc_amp)) / np.maximum(float(1e-8),preproc_amp)
# hack - detect disconnected channels
noise2sig[ raw_power < 1e-6 ] = 100
# hack - detect filter artifacts = preproc power is too big..
noise2sig[ preproc_amp > raw_amp*10 ] = 100
# hack - cap to 100
noise2sig = np.minimum(noise2sig,100)
# rate limit sending of signal-quality messages
if self.last_sigquality_ts is None or ts > self.last_sigquality_ts + self.send_sigquality_interval:
print("SigQ:\nraw_power=({}/{})\npp_power=({}/{})\nnoise2sig={}".format(
raw_amp,d_raw.shape[0],
preproc_amp,d_preproc.shape[0],
noise2sig))
print("Q",end='')
# N.B. use *our* time-stamp for outgoing messages!
self.sendMessage(SignalQuality(None, noise2sig))
self.last_sigquality_ts = ts
if self.VERBOSITY>2:
# plot the sample time-stamp jitter...
import matplotlib.pyplot as plt
plt.figure(10)
ts = self.data_ringbuffer[:,-1]
idx = np.flatnonzero(ts)
if len(idx)>0:
ts = ts[idx[0]:]
plt.subplot(211); plt.cla(); plt.plot(np.diff(ts)); plt.title('diff time-sample')
plt.subplot(212); plt.cla(); plt.plot((ts-ts[0])-np.arange(len(ts))*1000.0/self.fs); plt.title('regression against sample-number')
plt.show(block=False)
def update_electrode_powers(self, d_raw: np.ndarray, d_preproc:np.ndarray):
if self.raw_power is None:
mu_hl, pow_hl = self.noise2sig_halflife_ms
self.raw_power = power_tracker(mu_hl, pow_hl, self.raw_fs)
self.preproc_power = power_tracker(mu_hl, pow_hl, self.fs)
self.raw_power.transform(d_raw)
self.preproc_power.transform(d_preproc)
return (self.raw_power.power(), self.preproc_power.power())
def update(self, timeout_ms=None, mintime_ms=None):
if timeout_ms is None:
timeout_ms = self.timeout_ms
if mintime_ms is None:
mintime_ms = self.mintime_ms
if not self.isConnected():
self.connect()
if not self.isConnected():
return [],0,0
t0 = self.getTimeStamp()
nsamp = 0
nmsg = 0
nstimulus = 0
if self.data_ringbuffer is None: # do special init stuff if not done
nsamp, nmsg = self.initDataRingBuffer()
if self.stimulus_ringbuffer is None: # do special init stuff if not done
self.initStimulusRingBuffer()
if self.last_log_ts is None:
self.last_log_ts = self.getTimeStamp()
if t0 is None:
t0 = self.getTimeStamp()
# record the list of new messages from this call
newmsgs = self.newmsgs # start with any left-overs from old calls
self.newmsgs=[] # clear the left-over messages stack
ttg = timeout_ms - (self.getTimeStamp() - t0) # time-to-go in the update loop
while ttg > 0:
# rate limit
if ttg >= mintime_ms:
sleep(mintime_ms/1000.0)
ttg = timeout_ms - (self.getTimeStamp() - t0) # udate time-to-go
# get the new messages
msgs = self.getNewMessages(ttg)
# process the messages - basically to split datapackets from the rest
print(".",end='')
#print("{} in {}".format(len(msgs),self.getTimeStamp()-t0),end='',flush=True)
for m in msgs:
m = self.preprocess_message(m)
print("{:c}".format(m.msgID), end='', flush=True)
if m.msgID == DataPacket.msgID: # data-packets are special
d = self.processDataPacket(m) # (samp x ...)
self.data_ringbuffer.extend(d)
nsamp = nsamp + d.shape[0]
elif m.msgID == StimulusEvent.msgID: # as are stmiuluse events
d = self.processStimulusEvent(m) # (nY x ...)
self.stimulus_ringbuffer.append(d)
nstimulus = nstimulus + 1
else:
# NewTarget/Selection are also special in that they clear stimulus state...
if m.msgID == NewTarget.msgID or m.msgID == Selection.msgID :
# Make a dummy stim-event to reset all objIDs to off
d = self.processStimulusEvent(StimulusEvent(m.timestamp,
np.arange(255,dtype=np.int32),
np.zeros(255,dtype=np.int8)))
self.stimulus_ringbuffer.append(d)
self.stimulus_timestamp= m.timestamp
if len(self.msg_ringbuffer)>0 and m.timestamp > self.msg_ringbuffer[0].timestamp + self.msgwindow_ms: # slide msg buffer
self.msg_ringbuffer.popleft()
self.msg_ringbuffer.append(m)
newmsgs.append(m)
nmsg = nmsg+1
self.msg_timestamp = m.timestamp
# update time-to-go
ttg = timeout_ms - (self.getTimeStamp() - t0)
# new line
if self.getTimeStamp() > self.last_log_ts + 2000:
print("",flush=True)
self.last_log_ts = self.getTimeStamp()
# return new messages, and count new samples/stimulus
return (newmsgs, nsamp, nstimulus)
def push_back_newmsgs(self,oldmsgs):
# TODO []: ensure this preserves message time-stamp order?
self.newmsgs.extend(oldmsgs)
def extract_data_segment(self, bgn_ts, end_ts=None):
return extract_ringbuffer_segment(self.data_ringbuffer,bgn_ts,end_ts)
def extract_stimulus_segment(self, bgn_ts, end_ts=None):
return extract_ringbuffer_segment(self.stimulus_ringbuffer,bgn_ts,end_ts)
def extract_msgs_segment(self, bgn_ts, end_ts=None):
msgs = [] # store the trial stimEvents
for m in reversed(self.msg_ringbuffer):
if m.timestamp <= bgn_ts:
# stop as soon as earlier than bgn_ts
break
if end_ts is None or m.timestamp < end_ts:
msgs.append(m)
# reverse back to input order
msgs.reverse()
return msgs
def run(self, timeout_ms=30000):
t0 = self.getTimeStamp()
# test getting 5s data
tstart = self.data_timestamp
trlen_ms = 5000
while self.getTimeStamp() < t0+timeout_ms:
self.update()
# test getting a data segment
if tstart is None :
tstart = self.data_timestamp
if tstart and self.data_timestamp > tstart + trlen_ms:
X = self.extract_data_segment(tstart, tstart+trlen_ms)
print("Got data: {}->{}\n{}".format(tstart, tstart+trlen_ms, X[:, -1]))
Y = self.extract_stimulus_segment(tstart, tstart+trlen_ms)
print("Got stimulus: {}->{}\n{}".format(tstart, tstart+trlen_ms, Y[:, -1]))
tstart = self.data_timestamp + 5000
print('.', flush=True)
try:
from sklearn.base import TransformerMixin
except:
# fake the class if sklearn is not available, e.g. Android/iOS
class TransformerMixin:
def __init__():
pass
def fit(self,X):
pass
def transform(self,X):
pass
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
from mindaffectBCI.decoder.utils import sosfilt, butter_sosfilt, sosfilt_zi_warmup
class butterfilt_and_downsample(TransformerMixin):
def __init__(self, stopband=((0,5),(5,-1)), order:int=6, fs:float =250, fs_out:float =60, ftype='butter'):
self.stopband = stopband
self.fs = fs
self.fs_out = fs_out if fs_out is not None and fs_out < fs else fs
self.order = order
self.axis = -2
if not self.axis == -2:
raise ValueError("axis != -2 is not yet supported!")
self.nsamp = 0
self.ftype = ftype
def fit(self, X, fs:float =None, zi=None):
if fs is not None: # parameter overrides stored fs
self.fs = fs
# preprocess -> spectral filter
if isinstance(self.stopband, str):
import pickle
import os
# load coefficients from file -- when scipy isn't available
if os.path.isfile(self.stopband):
fn = self.stopband
else:
fn = os.path.join(os.path.dirname(os.path.abspath(__file__)),self.stopband)
with open(fn,'rb') as f:
self.sos_ = pickle.load(f)
self.zi_ = pickle.load(f)
f.close()
self.zi_ = sosfilt_zi_warmup(self.zi_, X, self.axis)
print("X={} zi={}".format(X.shape,self.zi_.shape))
else:
X, self.sos_, self.zi_ = butter_sosfilt(X, self.stopband, self.fs, order=self.order, axis=self.axis, zi=zi, ftype=self.ftype)
self.nsamp = 0
self.resamprate_ = int(round(self.fs*2.0/self.fs_out))/2.0 if self.fs_out is not None else 1
self.out_fs_ = self.fs/self.resamprate_
print("resample: {}->{}hz rsrate={}".format(self.fs, self.out_fs_, self.resamprate_))
return self
def transform(self, X, Y=None):
if not hasattr(self,'sos_'):
self.fit(X[0:1,:])
if self.sos_ is not None:
X, self.zi_ = sosfilt(self.sos_, X, axis=self.axis, zi=self.zi_)
nsamp = self.nsamp
self.nsamp = self.nsamp + X.shape[self.axis]
if self.resamprate_ > 1:
resamp_start = nsamp%self.resamprate_
if resamp_start > 0:
resamp_start = self.resamprate_ - resamp_start
idx = np.arange(resamp_start,X.shape[self.axis],self.resamprate_)
if self.resamprate_%1 > 0 and idx.size>0 :
idx_l = np.floor(idx).astype(int)
idx_u = np.ceil(idx).astype(int)
idx_u[-1] = idx_u[-1] if idx_u[-1]<X.shape[self.axis] else X.shape[self.axis]-1
w_u = idx - idx_l
X = X[...,idx_u,:] * w_u[:,np.newaxis] + X[...,idx_l,:] * (1-w_u[:,np.newaxis])
if Y is not None:
Y = Y[...,idx_u,:] * w_u[:,np.newaxis] + Y[...,idx_l,:] * (1-w_u[:,np.newaxis])
else:
idx = idx.astype(int)
X = X[..., idx, :]
if Y is not None:
Y = Y[..., idx, :]
return X if Y is None else (X, Y)
@staticmethod
def testcase():
X=np.sin(np.arange(100)[:,np.newaxis]*2*np.pi/30)
xs = np.arange(X.shape[0])[:,np.newaxis]
bands = ((0,20,'bandpass'))
fs = 200
fs_out = 130
fds = butterfilt_and_downsample(stopband=bands,fs=fs,fs_out=fs_out)
print("single step")
fds.fit(X[0:1,:])
m0,xs0 = fds.transform(X,xs)
print("M0 -> {}".format(m0[:20]))
step=6
print("Step size = {}".format(step))
fds.fit(X[0:1,:])
m1=np.zeros(m0.shape,m0.dtype)
xs1 = np.zeros(xs0.shape,xs0.dtype)
t=0
for i in range(0,len(X),step):
idx=np.arange(i,min(i+step,len(X)))
mm, idx1=fds.transform(X[idx,:],idx[:,np.newaxis])
m1[t:t+mm.shape[0],:]=mm
xs1[t:t+mm.shape[0]]=idx1
t = t +mm.shape[0]
print("M1 -> {}".format(m1[:20]))
print("diff: {}".format(np.max(np.abs(m0-m1))))
import matplotlib.pyplot as plt
plt.plot(xs,X,'*-',label='X')
plt.plot(xs0,m0,'*-',label='{} {}->{}Hz single'.format(bands,fs,fs_out))
plt.plot(xs1,m1,'*-',label='{} {}->{}Hz incremental'.format(bands,fs,fs_out))
plt.legend()
plt.show()
from mindaffectBCI.decoder.stim2event import stim2event
class stim2eventfilt(TransformerMixin):
def __init__(self, evtlabs=None, histlen=20):
self.evtlabs = evtlabs
self.histlen = histlen
self.prevX = None
def fit(self, X):
return self
def transform(self, X):
if X is None:
return None
prevX = self.prevX
if self.histlen>0:
if X.shape[0] >= self.histlen or prevX is None:
self.prevX = X
else:
self.prevX = np.append(prevX, X, 0)
self.prevX = self.prevX[-self.histlen:,:].copy()
X = stim2event(X, self.evtlabs, axis=-2, oM=prevX)
return X
def testcase():
M=np.array([0,0,0,1,0,0,1,1,0,1])[:,np.newaxis]
s2ef = stim2eventfilt(evtlabs=('re','fe'),histlen=3)
print("single step")
m0=s2ef.transform(M)
print("{} -> {}".format(M,m0))
print("Step size = 1")
m1=np.zeros(m0.shape,m0.dtype)
for i in range(len(M)):
idx=slice(i,i+1)
mm=s2ef.transform(M[idx,:])
m1[idx,...]=mm
print("{} {} -> {}".format(i,M[idx,...],mm))
print("Step size=4")
m4=np.zeros(m0.shape,m0.dtype)
for i in range(0,len(M),4):
idx=slice(i,i+4)
mm=s2ef.transform(M[idx,:])
m4[idx,...]=mm
print("{} {} -> {}".format(i,M[idx,...],mm))
print("m0={}\nm1={}\n,m4={}\n".format(m0,m1,m4))
class power_tracker(TransformerMixin):
def __init__(self,halflife_mu_ms, halflife_power_ms, fs, car=True):
self.alpha_mu = self.hl2alpha(fs * halflife_mu_ms / 1000.0 )
self.alpha_power= self.hl2alpha(fs * halflife_power_ms / 1000.0 )
self.car = car
self.sX_N = None
self.sX = None
self.sXX_N = None
self.sXX = None
def hl2alpha(self,hl):
return np.exp(np.log(.5)/hl)
def fit(self,X):
self.sX_N = X.shape[0]
if self.car and X.shape[-1]>4:
X = X.copy() - np.mean(X,-1,keepdims=True)
self.sX = np.sum(X,axis=0)
self.sXX_N = X.shape[0]
self.sXX = np.sum((X-(self.sX/self.sX_N))**2,axis=0)
return self.power()
def transform(self, X: np.ndarray):
if self.sX is None:
return self.fit(X)
if self.car and X.shape[-1]>4:
ch_power = self.power()
act_ch = ch_power > np.max(ch_power)*1e-3
X = X.copy() - np.mean(X[...,act_ch], -1, keepdims=True)
alpha_mu = self.alpha_mu ** X.shape[0]
self.sX_N = self.sX_N*alpha_mu + X.shape[0]
self.sX = self.sX*alpha_mu + np.sum(X, axis=0)
alpha_pow = self.alpha_power ** X.shape[0]
self.sXX_N = self.sXX_N*alpha_pow + X.shape[0]
self.sXX = self.sXX*alpha_pow + np.sum((X-(self.sX/self.sX_N))**2, axis=0)
return self.power()
def mean(self):
return self.sX / self.sX_N
def power(self):
return self.sXX / self.sXX_N
def testcase(self):
import matplotlib.pyplot as plt
X = np.random.randn(10000,2)
pt = power_tracker(100,100,100)
print("All at once: power={}".format(pt.transform(X)))
pt = power_tracker(100,1000,1000)
print("alpha_mu={} alpha_pow={}".format(pt.alpha_mu,pt.alpha_power) )
step = 30
idxs = list(range(step,X.shape[0],step))
powers = np.zeros((len(idxs),X.shape[-1]))
mus = np.zeros((len(idxs),X.shape[-1]))
for i,j in enumerate(idxs):
powers[i,:] = np.sqrt(pt.transform(X[j-step:j,:]))
mus[i,:]=pt.mean()
for d in range(X.shape[-1]):
plt.subplot(X.shape[-1],1,d+1)
plt.plot(X[:,d])
plt.plot(idxs,mus[:,d])
plt.plot(idxs,powers[:,d])
class timestamp_interpolation(TransformerMixin):
def __init__(self,fs=None,sample2timestamp=None, max_delta=200):
self.fs=fs
a0 = 1000/self.fs if self.fs is not None else 1
if sample2timestamp=='lower_bound_tracker':
self.sample2timestamp = lower_bound_tracker(a0=a0)
elif sample2timestamp=='linear_trend_tracker':
self.sample2timestamp = linear_trend_tracker(a0=a0)
else:
self.sample2timestamp = sample2timestamp
self.max_delta = max_delta
def fit(self,ts,nsamp=1):
self.last_sample_timestamp_ = ts
self.n_ = 0
def transform(self,timestamp:float,nsamp:int=1):
if not hasattr(self,'last_sample_timestamp_'):
self.fit(timestamp,nsamp)
self.n_ = self.n_ + nsamp
if self.last_sample_timestamp_ < timestamp or self.sample2timestamp is not None:
if self.sample2timestamp is not None:
newtimestamp = self.sample2timestamp.transform(self.n_, timestamp)
if abs(timestamp-newtimestamp) < self.max_delta:
timestamp = int(newtimestamp)
# simple linear interpolation for the sample time-stamps
samples_ts = np.linspace(self.last_sample_timestamp_, timestamp, nsamp+1, endpoint=True, dtype=int)
samples_ts = samples_ts[1:]
else:
if self.fs :
# interpolate with the estimated sample rate
samples_ts = np.arange(-nsamp+1,1,dtype=int)*(1000/self.fs) + timestamp
else:
# give all same timestamp
samples_ts = np.ones(nsamp,dtype=int)*timestamp
# update the tracking info
self.last_sample_timestamp_ = timestamp
return samples_ts
def testcase(self, npkt=1000, fs=100):
# generate random packet sizes
nsamp = np.random.random_integers(0,10,size=(npkt,))
# generate true sample timestamps
ts_true = np.arange(np.sum(nsamp))*1000/fs
# packet end indices
idx = np.cumsum(nsamp)-1
# packet end time-stamps
pkt_ts = ts_true[idx]
# add some time-stamp jitter, always positive..
pkt_ts = pkt_ts + np.random.uniform(0,.5*1000/fs,size=pkt_ts.shape)
# apply the time-stamp interplotation
sts=[]
tsfn = timestamp_interpolation(fs=fs,sample2timestamp = 'lower_bound_tracker')
for i,(n,t) in enumerate(zip(nsamp,pkt_ts)):
samp_ts = tsfn.transform(t,n)
sts.extend(samp_ts)
# plot the result.
import matplotlib.pyplot as plt
plt.plot(ts_true - sts)
plt.show()
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
from mindaffectBCI.decoder.preprocess import temporally_decorrelate
class temporal_decorrelator(TransformerMixin):
def __init__(self, order=10, reg=1e-4, eta=1e-5, axis=-2):
self.reg=reg
self.eta=eta
self.axis=axis
def fit(self,X):
self.W_ = np.zeros((self.order,X.shape[-1]),dtype=X.dtype)
self.W_[-1,:]=1
_, self.W_ = self.transform(X[1:,:])
def transform(self,X):
if not hasattr(self,'W_'):
self.fit(X)
X, self.W_ = temporally_decorrelate(X, W=self.W_, reg=self.reg, eta=self.eta, axis=self.axis)
return X
def testcase(self, dur=3, fs=100, blksize=10):
import numpy as np
import matplotlib.pyplot as plt
from mindaffectBCI.decoder.preprocess import plot_grand_average_spectrum
fs=100
X = np.random.standard_normal((2,fs*dur,2)) # flat spectrum
#X = X + np.sin(np.arange(X.shape[-2])*2*np.pi/10)[:,np.newaxis]
X = X[:,:-1,:]+X[:,1:,:] # weak low-pass
#X = np.cumsum(X,-2) # 1/f spectrum
print("X={}".format(X.shape))
plt.figure(1)
plot_grand_average_spectrum(X, fs)
plt.suptitle('Raw')
plt.show(block=False)
tdc = temporal_decorrelator()
wX = np.zeros(X.shape,X.dtype)
for i in range(0,X.shape[-1],blksize):
idx = range(i,i+blksize)
wX[idx,:] = tdc.transform(X[idx,:])
# compare raw vs summed filterbank
plt.figure(2)
plot_grand_average_spectrum(wX,fs)
plt.suptitle('Decorrelated')
plt.show()
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
from mindaffectBCI.decoder.preprocess import standardize_channel_power
class channel_power_standardizer(TransformerMixin):
def __init__(self, reg=1e-4, axis=-2):
self.reg=reg
self.axis=axis
def fit(self,X):
self.sigma2_ = np.zeros((X.shape[-1],), dtype=X.dtype)
self.sigma2_ = X[0,:]*X[0,:] # warmup with 1st sample power
self.transform(X[1:,:])
def transform(self,X):
if not hasattr(self,'sigma2_'):
self.fit(X)
X, self.W_ = standardize_channel_power(X, sigma2=self.sigma2_, reg=self.reg, axis=self.axis)
return X
def testcase(self, dur=3, fs=100, blksize=10):
import numpy as np
import matplotlib.pyplot as plt
from mindaffectBCI.decoder.preprocess import plot_grand_average_spectrum
fs=100
X = np.random.standard_normal((2,fs*dur,2)) # flat spectrum
#X = X + np.sin(np.arange(X.shape[-2])*2*np.pi/10)[:,np.newaxis]
X = X[:,:-1,:]+X[:,1:,:] # weak low-pass
#X = np.cumsum(X,-2) # 1/f spectrum
print("X={}".format(X.shape))
plt.figure(1)
plot_grand_average_spectrum(X, fs)
plt.suptitle('Raw')
plt.show(block=False)
cps = channel_power_standardizer()
wX = np.zeros(X.shape,X.dtype)
for i in range(0,X.shape[-1],blksize):
idx = range(i,i+blksize)
wX[idx,:] = cps.transform(X[idx,:])
# compare raw vs summed filterbank
plt.figure(2)
plot_grand_average_spectrum(wX,fs)
plt.suptitle('Decorrelated')
plt.show()
def testRaw():
# test with raw
ui = UtopiaDataInterface()
ui.connect()
sigViewer(ui,30000) # 30s sigviewer
def testPP():
from sigViewer import sigViewer
# test with a filter + downsampler
ppfn= butterfilt_and_downsample(order=4, stopband=((0,1),(25,-1)), fs_out=100)
#ppfn= butterfilt_and_downsample(order=4, stopband='butter_stopband((0, 5), (25, -1))_fs200.pk', fs_out=80)
ui = UtopiaDataInterface(data_preprocessor=ppfn, stimulus_preprocessor=None)
ui.connect()
sigViewer(ui)
def testFileProxy(filename,fs_out=999):
from mindaffectBCI.decoder.FileProxyHub import FileProxyHub
U = FileProxyHub(filename)
from sigViewer import sigViewer
# test with a filter + downsampler
#ppfn= butterfilt_and_downsample(order=4, stopband=((0,3),(25,-1)), fs_out=fs_out)
ppfn= butterfilt_and_downsample(order=4, stopband=(1,15,'bandpass'), fs_out=fs_out)
#ppfn = None
ui = UtopiaDataInterface(data_preprocessor=ppfn, stimulus_preprocessor=None, mintime_ms=0, U=U)
ui.connect()
sigViewer(ui)
def testFileProxy2(filename):
from mindaffectBCI.decoder.FileProxyHub import FileProxyHub
U = FileProxyHub(filename)
fs = 200
fs_out = 200
# test with a filter + downsampler
ppfn= butterfilt_and_downsample(order=4, stopband=((45,65),(0,3),(25,-1)), fs=fs, fs_out=fs_out)
ui = UtopiaDataInterface(data_preprocessor=ppfn, stimulus_preprocessor=None, mintime_ms=0, U=U, fs=fs)
ui.connect()
# run in bits..
data=[]
stim=[]
emptycount = 0
while True:
newmsg, nsamp, nstim = ui.update()
if len(newmsg) == 0 and nsamp == 0 and nstim == 0:
emptycount = emptycount + 1
if emptycount > 10:
break
else:
emptycount=0
if nsamp > 0:
data.append(ui.data_ringbuffer[-nsamp:,:].copy())
if nstim > 0:
stim.append(ui.stimulus_ringbuffer[-nstim:,:].copy())
# convert to single data block
data = np.vstack(data)
stim = np.vstack(stim)
# dump as pickle
import pickle
if ppfn is None:
pickle.dump(dict(data=data,stim=stim),open('raw_udi.pk','wb'))
else:
pickle.dump(dict(data=data,stim=stim),open('pp_udi.pk','wb'))
def testERP():
ui = UtopiaDataInterface()
ui.connect()
erpViewer(ui,evtlabs=None) # 30s sigviewer
def testElectrodeQualities(X,fs=200,pktsize=20):
# recurse if more dims than we want...
if X.ndim>2:
sigq=[]
for i in range(X.shape[0]):
sigqi = testElectrodeQualities(X[i,...],fs,pktsize)
sigq.append(sigqi)
sigq=np.concatenate(sigq,0)
return sigq
ppfn= butterfilt_and_downsample(order=6, stopband='butter_stopband((0, 5), (25, -1))_fs200.pk', fs_out=100)
ppfn.fit(X[:10,:],fs=200)
noise2sig = np.zeros((int(X.shape[0]/pktsize),X.shape[-1]),dtype=np.float32)
for pkti in range(noise2sig.shape[0]):
t = pkti*pktsize
Xi = X[t:t+pktsize,:]
Xip = ppfn.transform(Xi)
raw_power, preproc_power = UtopiaDataInterface.update_electrode_powers(Xi,Xip)
noise2sig[pkti,:] = np.maximum(float(1e-6), (raw_power - preproc_power)) / np.maximum(float(1e-8),preproc_power)
return noise2sig
if __name__ == "__main__":
#timestamp_interpolation().testcase()
#butterfilt_and_downsample.testcase()
#testRaw()
#testPP()
#testERP()
filename="~/Desktop/mark/mindaffectBCI_*.txt"
testFileProxy(filename)
#testFileProxy2(filename)
# "C:\\Users\\Developer\\Downloads\\mark\\mindaffectBCI_brainflow_200911_1229_90cal.txt")
#"..\..\Downloads\khash\mindaffectBCI_noisetag_bci_200907_1433.txt"
| true
| true
|
790d51d6cd4cc81c554bb2826bb40447876b43d6
| 3,885
|
py
|
Python
|
tools/erpc/basic_codec.py
|
openlunar/fc-bootloader
|
793e42ea0095a5d1d767c1eca7e1d3a27b7f5599
|
[
"Unlicense",
"MIT"
] | 1
|
2020-08-23T20:24:19.000Z
|
2020-08-23T20:24:19.000Z
|
tools/erpc/basic_codec.py
|
openlunar/fc-bootloader
|
793e42ea0095a5d1d767c1eca7e1d3a27b7f5599
|
[
"Unlicense",
"MIT"
] | 1
|
2020-08-24T00:41:48.000Z
|
2020-08-24T02:17:44.000Z
|
tools/erpc/basic_codec.py
|
openlunar/fc-bootloader
|
793e42ea0095a5d1d767c1eca7e1d3a27b7f5599
|
[
"Unlicense",
"MIT"
] | null | null | null |
#!/usr/bin/env python
# Copyright (c) 2015 Freescale Semiconductor, Inc.
# Copyright 2016-2017 NXP
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import struct
from .codec import (MessageType, MessageInfo, Codec, CodecError)
class BasicCodec(Codec):
## Version of this codec.
BASIC_CODEC_VERSION = 1
def start_write_message(self, msgInfo):
header = (self.BASIC_CODEC_VERSION << 24) \
| ((msgInfo.service & 0xff) << 16) \
| ((msgInfo.request & 0xff) << 8) \
| (msgInfo.type.value & 0xff)
self.write_uint32(header)
self.write_uint32(msgInfo.sequence)
def _write(self, fmt, value):
self._buffer += struct.pack(fmt, value)
self._cursor += struct.calcsize(fmt)
def write_bool(self, value):
self._write('<?', value)
def write_int8(self, value):
self._write('<b', value)
def write_int16(self, value):
self._write('<h', value)
def write_int32(self, value):
self._write('<i', value)
def write_int64(self, value):
self._write('<q', value)
def write_uint8(self, value):
self._write('<B', value)
def write_uint16(self, value):
self._write('<H', value)
def write_uint32(self, value):
self._write('<I', value)
def write_uint64(self, value):
self._write('<Q', value)
def write_float(self, value):
self._write('<f', value)
def write_double(self, value):
self._write('<d', value)
def write_string(self, value):
self.write_binary(value.encode())
def write_binary(self, value):
self.write_uint32(len(value))
self._buffer += value
def start_write_list(self, length):
self.write_uint32(length)
def start_write_union(self, discriminator):
self.write_uint32(discriminator)
def write_null_flag(self, flag):
self.write_uint8(1 if flag else 0)
##
# @return 4-tuple of msgType, service, request, sequence.
def start_read_message(self):
header = self.read_uint32()
sequence = self.read_uint32()
version = header >> 24
if version != self.BASIC_CODEC_VERSION:
raise CodecError("unsupported codec version %d" % version)
service = (header >> 16) & 0xff
request = (header >> 8) & 0xff
msgType = MessageType(header & 0xff)
return MessageInfo(type=msgType, service=service, request=request, sequence=sequence)
def _read(self, fmt):
result = struct.unpack_from(fmt, self._buffer, self._cursor)
self._cursor += struct.calcsize(fmt)
return result[0]
def read_bool(self):
return self._read('<?')
def read_int8(self):
return self._read('<b')
def read_int16(self):
return self._read('<h')
def read_int32(self):
return self._read('<i')
def read_int64(self):
return self._read('<q')
def read_uint8(self):
return self._read('<B')
def read_uint16(self):
return self._read('<H')
def read_uint32(self):
return self._read('<I')
def read_uint64(self):
return self._read('<Q')
def read_float(self):
return self._read('<f')
def read_double(self):
return self._read('<d')
def read_string(self):
return self.read_binary().decode()
def read_binary(self):
length = self.read_uint32()
data = self._buffer[self._cursor:self._cursor+length]
self._cursor += length
return data
##
# @return Int of list length.
def start_read_list(self):
return self.read_uint32()
##
# @return Int of union discriminator.
def start_read_union(self):
return self.read_int32()
def read_null_flag(self):
return self.read_uint8()
| 25.559211
| 93
| 0.60592
|
import struct
from .codec import (MessageType, MessageInfo, Codec, CodecError)
class BasicCodec(Codec):
= 1
def start_write_message(self, msgInfo):
header = (self.BASIC_CODEC_VERSION << 24) \
| ((msgInfo.service & 0xff) << 16) \
| ((msgInfo.request & 0xff) << 8) \
| (msgInfo.type.value & 0xff)
self.write_uint32(header)
self.write_uint32(msgInfo.sequence)
def _write(self, fmt, value):
self._buffer += struct.pack(fmt, value)
self._cursor += struct.calcsize(fmt)
def write_bool(self, value):
self._write('<?', value)
def write_int8(self, value):
self._write('<b', value)
def write_int16(self, value):
self._write('<h', value)
def write_int32(self, value):
self._write('<i', value)
def write_int64(self, value):
self._write('<q', value)
def write_uint8(self, value):
self._write('<B', value)
def write_uint16(self, value):
self._write('<H', value)
def write_uint32(self, value):
self._write('<I', value)
def write_uint64(self, value):
self._write('<Q', value)
def write_float(self, value):
self._write('<f', value)
def write_double(self, value):
self._write('<d', value)
def write_string(self, value):
self.write_binary(value.encode())
def write_binary(self, value):
self.write_uint32(len(value))
self._buffer += value
def start_write_list(self, length):
self.write_uint32(length)
def start_write_union(self, discriminator):
self.write_uint32(discriminator)
def write_null_flag(self, flag):
self.write_uint8(1 if flag else 0)
def start_read_message(self):
header = self.read_uint32()
sequence = self.read_uint32()
version = header >> 24
if version != self.BASIC_CODEC_VERSION:
raise CodecError("unsupported codec version %d" % version)
service = (header >> 16) & 0xff
request = (header >> 8) & 0xff
msgType = MessageType(header & 0xff)
return MessageInfo(type=msgType, service=service, request=request, sequence=sequence)
def _read(self, fmt):
result = struct.unpack_from(fmt, self._buffer, self._cursor)
self._cursor += struct.calcsize(fmt)
return result[0]
def read_bool(self):
return self._read('<?')
def read_int8(self):
return self._read('<b')
def read_int16(self):
return self._read('<h')
def read_int32(self):
return self._read('<i')
def read_int64(self):
return self._read('<q')
def read_uint8(self):
return self._read('<B')
def read_uint16(self):
return self._read('<H')
def read_uint32(self):
return self._read('<I')
def read_uint64(self):
return self._read('<Q')
def read_float(self):
return self._read('<f')
def read_double(self):
return self._read('<d')
def read_string(self):
return self.read_binary().decode()
def read_binary(self):
length = self.read_uint32()
data = self._buffer[self._cursor:self._cursor+length]
self._cursor += length
return data
def start_read_list(self):
return self.read_uint32()
def start_read_union(self):
return self.read_int32()
def read_null_flag(self):
return self.read_uint8()
| true
| true
|
790d5615d3562c6caefacfcf222dab4687b95031
| 4,419
|
py
|
Python
|
venv/lib/python3.6/site-packages/requests_mock/request.py
|
Guillaume-Fernandez/phishfinder
|
b459a30202fd5dfb1340b43c70363705de7cedd9
|
[
"MIT"
] | 1
|
2020-11-02T15:00:52.000Z
|
2020-11-02T15:00:52.000Z
|
venv/lib/python3.6/site-packages/requests_mock/request.py
|
Guillaume-Fernandez/phishfinder
|
b459a30202fd5dfb1340b43c70363705de7cedd9
|
[
"MIT"
] | null | null | null |
venv/lib/python3.6/site-packages/requests_mock/request.py
|
Guillaume-Fernandez/phishfinder
|
b459a30202fd5dfb1340b43c70363705de7cedd9
|
[
"MIT"
] | 1
|
2020-11-09T16:11:07.000Z
|
2020-11-09T16:11:07.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import json
import requests
import six
from six.moves.urllib import parse as urlparse
class _RequestObjectProxy(object):
"""A wrapper around a requests.Request that gives some extra information.
This will be important both for matching and so that when it's save into
the request_history users will be able to access these properties.
"""
def __init__(self, request, **kwargs):
self._request = request
self._matcher = None
self._url_parts_ = None
self._qs = None
# All of these params should always exist but we use a default
# to make the test setup easier.
self._timeout = kwargs.pop('timeout', None)
self._allow_redirects = kwargs.pop('allow_redirects', None)
self._verify = kwargs.pop('verify', None)
self._stream = kwargs.pop('stream', None)
self._cert = kwargs.pop('cert', None)
self._proxies = copy.deepcopy(kwargs.pop('proxies', {}))
# FIXME(jamielennox): This is part of bug #1584008 and should default
# to True (or simply removed) in a major version bump.
self._case_sensitive = kwargs.pop('case_sensitive', False)
def __getattr__(self, name):
return getattr(self._request, name)
@property
def _url_parts(self):
if self._url_parts_ is None:
url = self._request.url
if not self._case_sensitive:
url = url.lower()
self._url_parts_ = urlparse.urlparse(url)
return self._url_parts_
@property
def scheme(self):
return self._url_parts.scheme
@property
def netloc(self):
return self._url_parts.netloc
@property
def hostname(self):
try:
return self.netloc.split(':')[0]
except IndexError:
return ''
@property
def port(self):
components = self.netloc.split(':')
try:
return int(components[1])
except (IndexError, ValueError):
pass
if self.scheme == 'https':
return 443
if self.scheme == 'http':
return 80
# The default return shouldn't matter too much because if you are
# wanting to test this value you really should be explicitly setting it
# somewhere. 0 at least is a boolean False and an int.
return 0
@property
def path(self):
return self._url_parts.path
@property
def query(self):
return self._url_parts.query
@property
def qs(self):
if self._qs is None:
self._qs = urlparse.parse_qs(self.query)
return self._qs
@property
def timeout(self):
return self._timeout
@property
def allow_redirects(self):
return self._allow_redirects
@property
def verify(self):
return self._verify
@property
def stream(self):
return self._stream
@property
def cert(self):
return self._cert
@property
def proxies(self):
return self._proxies
@classmethod
def _create(cls, *args, **kwargs):
return cls(requests.Request(*args, **kwargs).prepare())
@property
def text(self):
body = self.body
if isinstance(body, six.binary_type):
body = body.decode('utf-8')
return body
def json(self, **kwargs):
return json.loads(self.text, **kwargs)
@property
def matcher(self):
"""The matcher that this request was handled by.
The matcher object is handled by a weakref. It will return the matcher
object if it is still available - so if the mock is still in place. If
the matcher is not available it will return None.
"""
return self._matcher()
def __str__(self):
return "{0.method} {0.url}".format(self._request)
| 27.110429
| 79
| 0.630686
|
import copy
import json
import requests
import six
from six.moves.urllib import parse as urlparse
class _RequestObjectProxy(object):
def __init__(self, request, **kwargs):
self._request = request
self._matcher = None
self._url_parts_ = None
self._qs = None
self._timeout = kwargs.pop('timeout', None)
self._allow_redirects = kwargs.pop('allow_redirects', None)
self._verify = kwargs.pop('verify', None)
self._stream = kwargs.pop('stream', None)
self._cert = kwargs.pop('cert', None)
self._proxies = copy.deepcopy(kwargs.pop('proxies', {}))
e_sensitive = kwargs.pop('case_sensitive', False)
def __getattr__(self, name):
return getattr(self._request, name)
@property
def _url_parts(self):
if self._url_parts_ is None:
url = self._request.url
if not self._case_sensitive:
url = url.lower()
self._url_parts_ = urlparse.urlparse(url)
return self._url_parts_
@property
def scheme(self):
return self._url_parts.scheme
@property
def netloc(self):
return self._url_parts.netloc
@property
def hostname(self):
try:
return self.netloc.split(':')[0]
except IndexError:
return ''
@property
def port(self):
components = self.netloc.split(':')
try:
return int(components[1])
except (IndexError, ValueError):
pass
if self.scheme == 'https':
return 443
if self.scheme == 'http':
return 80
# wanting to test this value you really should be explicitly setting it
# somewhere. 0 at least is a boolean False and an int.
return 0
@property
def path(self):
return self._url_parts.path
@property
def query(self):
return self._url_parts.query
@property
def qs(self):
if self._qs is None:
self._qs = urlparse.parse_qs(self.query)
return self._qs
@property
def timeout(self):
return self._timeout
@property
def allow_redirects(self):
return self._allow_redirects
@property
def verify(self):
return self._verify
@property
def stream(self):
return self._stream
@property
def cert(self):
return self._cert
@property
def proxies(self):
return self._proxies
@classmethod
def _create(cls, *args, **kwargs):
return cls(requests.Request(*args, **kwargs).prepare())
@property
def text(self):
body = self.body
if isinstance(body, six.binary_type):
body = body.decode('utf-8')
return body
def json(self, **kwargs):
return json.loads(self.text, **kwargs)
@property
def matcher(self):
return self._matcher()
def __str__(self):
return "{0.method} {0.url}".format(self._request)
| true
| true
|
790d56a58b51f072fa37a6b7c101ec16af0ffa1c
| 4,987
|
py
|
Python
|
gen/interface.py
|
Rioghasarig/trlu
|
10aa768f6cf58be17d76923daecae2c70867f5e2
|
[
"X11"
] | 21
|
2015-03-14T03:19:00.000Z
|
2022-03-30T05:56:38.000Z
|
gen/interface.py
|
Rioghasarig/trlu
|
10aa768f6cf58be17d76923daecae2c70867f5e2
|
[
"X11"
] | 5
|
2015-01-03T13:02:30.000Z
|
2020-10-06T16:58:28.000Z
|
gen/interface.py
|
Rioghasarig/trlu
|
10aa768f6cf58be17d76923daecae2c70867f5e2
|
[
"X11"
] | 5
|
2015-06-05T08:25:37.000Z
|
2021-09-30T11:12:55.000Z
|
#!/usr/bin/env python3
import io
def parse_org_table(table_lines):
# remove separator row
table_lines.pop(1)
table_list = [[b.strip() for b in a[1:-2].split('|')] for a in table_lines]
# get column list
column_list = table_list.pop(0)
#print(column_names)
# organize table data
table_data = []
for param in table_list:
param_dict = {}
for column, value in zip(column_list,param):
param_dict[column] = value
table_data.append(param_dict)
#print(table_data)
return table_data
def read_org_file(file_name):
# read lines
file = open(file_name,'r')
file_lines = file.readlines()
file.close()
# get function name
function_name = file_lines[0].strip()
#print(function_name)
# parse remaining lines as table
table_data = parse_org_table(file_lines[1:])
return function_name, table_data
def load_interface_files(file_name):
# read lines
file = open(file_name,'r')
file_lines = file.readlines()
file.close()
interface_list = parse_org_table(file_lines)
interface_data = []
for interface_function in interface_list:
function_name, argument_data = read_org_file(interface_function['interface_file'])
d = {}
d['function_name'] = function_name
d['argument_data'] = argument_data
d['format'] = interface_function['format']
interface_data.append(d)
#print(interface_data)
return interface_data
def function_declaration(function_dict,prefix='',suffix=''):
f = io.StringIO()
f.write('void ')
f.write(prefix + function_dict['function_name'] + suffix)
f.write('(\n')
for arg in function_dict['argument_data']:
f.write(' {0}* {1},\n'.format(arg['c_type'],arg['var_name']))
func_dec = f.getvalue()[:-2] + ')'
#print(func_dec)
return func_dec
def function_call(function_dict,prefix='',suffix=''):
f = io.StringIO()
f.write(' ' + prefix + function_dict['function_name'] + suffix)
f.write('(')
for arg in function_dict['argument_data']:
f.write('{},'.format(arg['var_name']))
func_dec = f.getvalue()[:-1] + ')'
#print(func_dec)
return func_dec
def get_header(file_name):
interface_data = load_interface_files(file_name)
# start the header buffer
f = io.StringIO()
# start the header file
f.write('#ifndef CLUSOL_H_\n')
f.write('#define CLUSOL_H_\n')
f.write('\n')
# include directives
f.write('#include <stdint.h>')
f.write('\n\n')
# function declarations
for interface_func in interface_data:
f.write(function_declaration(interface_func,prefix='c'))
f.write(';\n\n')
# end the headerfile
f.write('#endif // CLUSOL_H_\n')
# clean up and return
header_str = f.getvalue()
f.close();
return header_str
def get_source(file_name):
interface_data = load_interface_files(file_name)
# start the source buffer
f = io.StringIO()
# include directives
f.write('#include "clusol.h"\n')
f.write('\n')
# fortran function declarations
f.write('// declarations for fortran function calls\n')
for interface_func in interface_data:
if interface_func['format'] == 'f90':
f.write(function_declaration(interface_func,prefix='__lusol_MOD_'))
if interface_func['format'] == 'f77':
f.write(function_declaration(interface_func,suffix='_'))
f.write(';\n\n')
# function calls in c
f.write('// c interface function definitions\n')
for interface_func in interface_data:
f.write(function_declaration(interface_func,prefix='c'))
f.write(' {\n')
if interface_func['format'] == 'f90':
f.write(function_call(interface_func,prefix='__lusol_MOD_'))
if interface_func['format'] == 'f77':
f.write(function_call(interface_func,suffix='_'))
f.write(';\n')
f.write('}\n\n')
# clean up and return
source_str = f.getvalue()
f.close();
return source_str
# for testing
if __name__ == '__main__':
# parse arguments
import argparse
parser = argparse.ArgumentParser(
description='Generate C interface to LUSOL.')
parser.add_argument('-i','--input',
help='input file name',
required=True)
parser.add_argument('-o','--output',
help='output file name',
required=True)
parser.add_argument('-t','--type',
help='output file type',
required=True,
choices=['header','source'])
args = parser.parse_args()
# generate code
if args.type == 'header':
file_str = get_header(args.input)
elif args.type == 'source':
file_str = get_source(args.input)
else:
raise Exception('uknown type')
# write code
f = open(args.output,'w')
f.write(file_str)
f.close()
| 32.594771
| 90
| 0.620413
|
import io
def parse_org_table(table_lines):
table_lines.pop(1)
table_list = [[b.strip() for b in a[1:-2].split('|')] for a in table_lines]
column_list = table_list.pop(0)
table_data = []
for param in table_list:
param_dict = {}
for column, value in zip(column_list,param):
param_dict[column] = value
table_data.append(param_dict)
return table_data
def read_org_file(file_name):
file = open(file_name,'r')
file_lines = file.readlines()
file.close()
function_name = file_lines[0].strip()
table_data = parse_org_table(file_lines[1:])
return function_name, table_data
def load_interface_files(file_name):
file = open(file_name,'r')
file_lines = file.readlines()
file.close()
interface_list = parse_org_table(file_lines)
interface_data = []
for interface_function in interface_list:
function_name, argument_data = read_org_file(interface_function['interface_file'])
d = {}
d['function_name'] = function_name
d['argument_data'] = argument_data
d['format'] = interface_function['format']
interface_data.append(d)
return interface_data
def function_declaration(function_dict,prefix='',suffix=''):
f = io.StringIO()
f.write('void ')
f.write(prefix + function_dict['function_name'] + suffix)
f.write('(\n')
for arg in function_dict['argument_data']:
f.write(' {0}* {1},\n'.format(arg['c_type'],arg['var_name']))
func_dec = f.getvalue()[:-2] + ')'
return func_dec
def function_call(function_dict,prefix='',suffix=''):
f = io.StringIO()
f.write(' ' + prefix + function_dict['function_name'] + suffix)
f.write('(')
for arg in function_dict['argument_data']:
f.write('{},'.format(arg['var_name']))
func_dec = f.getvalue()[:-1] + ')'
return func_dec
def get_header(file_name):
interface_data = load_interface_files(file_name)
f = io.StringIO()
f.write('#ifndef CLUSOL_H_\n')
f.write('#define CLUSOL_H_\n')
f.write('\n')
f.write('#include <stdint.h>')
f.write('\n\n')
for interface_func in interface_data:
f.write(function_declaration(interface_func,prefix='c'))
f.write(';\n\n')
f.write('#endif // CLUSOL_H_\n')
header_str = f.getvalue()
f.close();
return header_str
def get_source(file_name):
interface_data = load_interface_files(file_name)
f = io.StringIO()
f.write('#include "clusol.h"\n')
f.write('\n')
f.write('// declarations for fortran function calls\n')
for interface_func in interface_data:
if interface_func['format'] == 'f90':
f.write(function_declaration(interface_func,prefix='__lusol_MOD_'))
if interface_func['format'] == 'f77':
f.write(function_declaration(interface_func,suffix='_'))
f.write(';\n\n')
f.write('// c interface function definitions\n')
for interface_func in interface_data:
f.write(function_declaration(interface_func,prefix='c'))
f.write(' {\n')
if interface_func['format'] == 'f90':
f.write(function_call(interface_func,prefix='__lusol_MOD_'))
if interface_func['format'] == 'f77':
f.write(function_call(interface_func,suffix='_'))
f.write(';\n')
f.write('}\n\n')
source_str = f.getvalue()
f.close();
return source_str
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='Generate C interface to LUSOL.')
parser.add_argument('-i','--input',
help='input file name',
required=True)
parser.add_argument('-o','--output',
help='output file name',
required=True)
parser.add_argument('-t','--type',
help='output file type',
required=True,
choices=['header','source'])
args = parser.parse_args()
if args.type == 'header':
file_str = get_header(args.input)
elif args.type == 'source':
file_str = get_source(args.input)
else:
raise Exception('uknown type')
f = open(args.output,'w')
f.write(file_str)
f.close()
| true
| true
|
790d56c7bb7566425f8df84756f87444a8e96569
| 8,105
|
py
|
Python
|
batracker/signal_detection/detection.py
|
thejasvibr/batracker
|
def2ae9a0f18df0b9b95d67a203d2afd8be0f2ce
|
[
"MIT"
] | null | null | null |
batracker/signal_detection/detection.py
|
thejasvibr/batracker
|
def2ae9a0f18df0b9b95d67a203d2afd8be0f2ce
|
[
"MIT"
] | null | null | null |
batracker/signal_detection/detection.py
|
thejasvibr/batracker
|
def2ae9a0f18df0b9b95d67a203d2afd8be0f2ce
|
[
"MIT"
] | null | null | null |
'''
Deals with the actual detection of signals in multichannel audio files.
There are two problems that need to solved while detecting a signal of interest.
#. within-channel signal detection
#. across-channel correspondence matching
Within-channel signal detection
-------------------------------
This task involves `locally` checking if there are any signals of interest in one channel at a time. The exact methods used for
the within-channel can be set by the user, though the simplest is of course a basic threshold-type detector. Whenever the
signal goes beyond a particular threshold, a signal is considered to be in that region.
Built-in detection routines
---------------------------
The detection module has a few simple detection routines. More advanced routines
are unlikely to form a core part of the package, and need to be written by the
user.
#. dBrms_detector : Calculates the moving dB rms profile of an audio clip. The
User needs to define the size of the moving window and the threshold in dB rms.
#. envelope_detector : Generates the Hilbert envelop of the audio clip. Regions above
the set threshold in dB peak amplitude are defined as detections. This method is faster
than the dBrms_detector.
'''
import matplotlib.pyplot as plt
plt.rcParams['agg.path.chunksize']=10000
import numpy as np
import scipy.signal as signal
import scipy.io.wavfile as wav
import scipy.ndimage as ndimage
import tqdm
from batracker.common_dsp.sigproc import *
def cross_channel_threshold_detector(multichannel, fs, **kwargs):
'''
Parameters
----------
multichannel : np.array
Msamples x Nchannels audio data
fs : float >0
detector_function : function, optional
The function used to detect the start and end of a signal.
Any custom detector function can be given, the compulsory inputs
are audio np.array, sample rate and the function should accept keyword
arguments (even if it doesn't use them.)
Defaults to dBrms_detector.
Returns
-------
all_detections : list
A list with sublists containing start-stop times of the detections
in each channel. Each sublist contains the detections in one channel.
Notes
-----
For further keyword arguments see the `threshold_detector` function
See Also
--------
dBrms_detector
'''
samples, channels = multichannel.shape
detector_function = kwargs.get('detector_function', dBrms_detector)
print(channels, samples)
all_detections = []
for each in tqdm.tqdm(range(channels)):
all_detections.append(detector_function(multichannel[:,each], fs, **kwargs))
return all_detections
def dBrms_detector(one_channel, fs, **kwargs):
'''
Calculates the dB rms profile of the input audio and
selects regions which arae above the profile.
Parameters
----------
one_channel
fs
dbrms_threshold: float, optional
Defaults to -50 dB rms
dbrms_window: float, optional
The window which is used to calculate the dB rms profile
in seconds. Defaults to 0.001 seconds.
Returns
-------
detections : list with tuples
Each tuple corresponds to a candidate signal region
'''
if one_channel.ndim > 1:
raise IndexError(f'Input audio must be flattened, and have only 1 dimension. \
Current audio has {one_channel.ndim} dimensions')
dbrms_window = kwargs.get('dbrms_window',0.001) # seconds
dbrms_threshold = kwargs.get('dbrms_threshold', -50)
window_samples = int(fs*dbrms_window)
dBrms_profile = dB(moving_rms(one_channel, window_size=window_samples))
labelled, num_regions = ndimage.label(dBrms_profile>dbrms_threshold)
if num_regions==0:
print (f'No regions above threshold: {dbrms_threshold} dBrms found in this channel!')
regions_above = ndimage.find_objects(labelled.flatten())
regions_above_timestamps = [get_start_stop_times(each, fs) for each in regions_above]
return regions_above_timestamps
def envelope_detector(audio, fs, **kwargs):
'''
Generates the Hilbert envelope of the audio. Signals are detected
wherever the envelope goes beyond a user-defined threshold value.
Two main options are to segment loud signals with reference to dB peak or
with reference dB above floor level.
Parameters
----------
audio
fs
Keyword Arguments
-----------------
threshold_db_floor: float, optional
The threshold for signal detection in dB above the floor level. The 5%ile level of the whole envelope is chosen as
the floor level. If not specified, then threshold_dbpeak is used to segment signals.
threshold_dbpeak : float, optional
The value beyond which a signal is considered to start.
Used only if relative_to_baseline is True.
lowpass_durn: float, optional
The highest time-resolution of envelope fluctuation to keep.
This effectively performs a low-pass at 1/lowpass_durn Hz on the raw envelope
signal.
Returns
-------
regions_above_timestamps
'''
envelope = np.abs(signal.hilbert(audio))
if not kwargs.get('lowpass_durn') is None:
lowpass_durn = kwargs['lowpass_durn'] # seconds
freq = 1.0/lowpass_durn
b,a = signal.butter(1, freq/(fs*0.5),'lowpass')
envelope = signal.filtfilt(b,a,envelope)
if not kwargs.get('threshold_db_floor', None) is None:
floor_level = np.percentile(20*np.log10(envelope),5)
threshold_db = floor_level + kwargs['threshold_db_floor']
else:
# get regions above the threshold
threshold_db = kwargs['threshold_dbpeak']
linear_threshold = 10**(threshold_db/20)
labelled, num_detections = ndimage.label(envelope>=linear_threshold)
regions_above = ndimage.find_objects(labelled.flatten())
regions_above_timestamps = [get_start_stop_times(each, fs ) for each in regions_above]
return regions_above_timestamps
def get_start_stop_times(findobjects_tuple, fs):
'''
'''
only_tuple = findobjects_tuple[0]
start, stop = only_tuple.start/fs, only_tuple.stop/fs
return start, stop
def moving_rms(X, **kwargs):
'''Calculates moving rms of a signal with given window size.
Outputs np.array of *same* size as X. The rms of the
last few samples <= window_size away from the end are assigned
to last full-window rms calculated
Parameters
----------
X : np.array
Signal of interest.
window_size : int, optional
Defaults to 125 samples.
Returns
-------
all_rms : np.array
Moving rms of the signal.
'''
window_size = kwargs.get('window_size', 125)
starts = np.arange(0, X.size)
stops = starts+window_size
valid = stops<X.size
valid_starts = np.int32(starts[valid])
valid_stops = np.int32(stops[valid])
all_rms = np.ones(X.size).reshape(-1,1)*999
for i, (start, stop) in enumerate(zip(valid_starts, valid_stops)):
rms_value = rms(X[start:stop])
all_rms[i] = rms_value
# replace all un-assigned samples with the last rms value
all_rms[all_rms==999] = np.nan
return all_rms
#
#if __name__ == '__main__':
# import scipy.signal as signal
# # trying out the hilbert envelope method:
# fs = 250000
# background = -60 # dB rms
# audio = np.random.normal(0, 10**(background/20), fs)
# duration = 0.005
# sound_start = 0.05
# t = np.linspace(0, duration, int(fs*duration))
# bat_call = signal.chirp(t,90000, 25000, t[-1])
# bat_call *= 0.5
# sound_stop = sound_start+duration
#
# start, end = np.int32(np.array([sound_start,
# sound_stop])*fs)
# audio[start:end] += bat_call
#
# envelope = np.abs(signal.hilbert(audio))
#
# dets = envelope_detector(audio, fs, threshold_dbpeak=-20)
# print(dets)
##
| 33.912134
| 128
| 0.672424
|
import matplotlib.pyplot as plt
plt.rcParams['agg.path.chunksize']=10000
import numpy as np
import scipy.signal as signal
import scipy.io.wavfile as wav
import scipy.ndimage as ndimage
import tqdm
from batracker.common_dsp.sigproc import *
def cross_channel_threshold_detector(multichannel, fs, **kwargs):
samples, channels = multichannel.shape
detector_function = kwargs.get('detector_function', dBrms_detector)
print(channels, samples)
all_detections = []
for each in tqdm.tqdm(range(channels)):
all_detections.append(detector_function(multichannel[:,each], fs, **kwargs))
return all_detections
def dBrms_detector(one_channel, fs, **kwargs):
if one_channel.ndim > 1:
raise IndexError(f'Input audio must be flattened, and have only 1 dimension. \
Current audio has {one_channel.ndim} dimensions')
dbrms_window = kwargs.get('dbrms_window',0.001)
dbrms_threshold = kwargs.get('dbrms_threshold', -50)
window_samples = int(fs*dbrms_window)
dBrms_profile = dB(moving_rms(one_channel, window_size=window_samples))
labelled, num_regions = ndimage.label(dBrms_profile>dbrms_threshold)
if num_regions==0:
print (f'No regions above threshold: {dbrms_threshold} dBrms found in this channel!')
regions_above = ndimage.find_objects(labelled.flatten())
regions_above_timestamps = [get_start_stop_times(each, fs) for each in regions_above]
return regions_above_timestamps
def envelope_detector(audio, fs, **kwargs):
envelope = np.abs(signal.hilbert(audio))
if not kwargs.get('lowpass_durn') is None:
lowpass_durn = kwargs['lowpass_durn']
freq = 1.0/lowpass_durn
b,a = signal.butter(1, freq/(fs*0.5),'lowpass')
envelope = signal.filtfilt(b,a,envelope)
if not kwargs.get('threshold_db_floor', None) is None:
floor_level = np.percentile(20*np.log10(envelope),5)
threshold_db = floor_level + kwargs['threshold_db_floor']
else:
threshold_db = kwargs['threshold_dbpeak']
linear_threshold = 10**(threshold_db/20)
labelled, num_detections = ndimage.label(envelope>=linear_threshold)
regions_above = ndimage.find_objects(labelled.flatten())
regions_above_timestamps = [get_start_stop_times(each, fs ) for each in regions_above]
return regions_above_timestamps
def get_start_stop_times(findobjects_tuple, fs):
only_tuple = findobjects_tuple[0]
start, stop = only_tuple.start/fs, only_tuple.stop/fs
return start, stop
def moving_rms(X, **kwargs):
window_size = kwargs.get('window_size', 125)
starts = np.arange(0, X.size)
stops = starts+window_size
valid = stops<X.size
valid_starts = np.int32(starts[valid])
valid_stops = np.int32(stops[valid])
all_rms = np.ones(X.size).reshape(-1,1)*999
for i, (start, stop) in enumerate(zip(valid_starts, valid_stops)):
rms_value = rms(X[start:stop])
all_rms[i] = rms_value
all_rms[all_rms==999] = np.nan
return all_rms
| true
| true
|
790d573c720b423748239250a55bc07862a053db
| 3,014
|
py
|
Python
|
test/test_lstm2d_cell.py
|
FlorianPfisterer/2D-LSTM-Seq2Seq
|
1b07273fc73237259ae99eabfc509f54ad233ccf
|
[
"MIT"
] | 28
|
2019-04-11T19:03:27.000Z
|
2022-03-08T07:32:56.000Z
|
test/test_lstm2d_cell.py
|
FlorianPfisterer/2D-LSTM-Seq2Seq
|
1b07273fc73237259ae99eabfc509f54ad233ccf
|
[
"MIT"
] | 1
|
2018-12-18T17:23:29.000Z
|
2018-12-18T17:23:29.000Z
|
test/test_lstm2d_cell.py
|
FlorianPfisterer/2d-seq2seq
|
1b07273fc73237259ae99eabfc509f54ad233ccf
|
[
"MIT"
] | 6
|
2019-04-11T19:03:29.000Z
|
2021-11-23T13:31:34.000Z
|
from unittest import TestCase
import torch
from model.lstm2d_cell import LSTM2dCell
class LSTM2dCellTest(TestCase):
"""
Unit tests for the 2D-LSTM cell.
"""
embed_dim = 50
encoder_state_dim = 20
input_dim = 2 * encoder_state_dim + embed_dim
cell_state_dim = 25
batch_size = 42
def setUp(self):
torch.manual_seed(42)
self.x_j = torch.randn(self.batch_size, self.input_dim)
self.s_prev_hor = torch.randn(self.batch_size, self.cell_state_dim)
self.s_prev_ver = torch.randn(self.batch_size, self.cell_state_dim)
self.c_prev_hor = torch.randn(self.batch_size, self.cell_state_dim)
self.c_prev_ver = torch.randn(self.batch_size, self.cell_state_dim)
self.device = torch.device('cpu')
def test_dimensions(self):
"""
Tests if the input and output dimensions of the cell are as expected.
"""
cell = LSTM2dCell(self.input_dim, self.cell_state_dim, self.device)
c_ji, s_ji = cell.forward(x=self.x_j, s_prev_hor=self.s_prev_hor, s_prev_ver=self.s_prev_ver,
c_prev_hor=self.c_prev_hor, c_prev_ver=self.c_prev_ver)
c_shape = list(c_ji.shape)
s_shape = list(s_ji.shape)
self.assertEqual(c_shape, [self.batch_size, self.cell_state_dim], 'Next cell state has unexpected shape')
self.assertEqual(s_shape, [self.batch_size, self.cell_state_dim], 'Next hidden state has unexpected shape')
def test_same_over_batch(self):
"""
Tests if the outputs of the cell are the same over the batch if the same input is fed in multiple times.
"""
toy_input_dim = 4
toy_batch_size = 7
toy_state_dim = 3
# create toy values and repeat them over the batch
toy_x = torch.Tensor([1.5, 4.2, 3.1415, 2.71]).expand(toy_batch_size, toy_input_dim)
toy_s_prev_hor = torch.Tensor([-.4, 1.2, 42.195]).expand(toy_batch_size, toy_state_dim)
toy_s_prev_ver = torch.Tensor([2.3, 7.12, -3.14]).expand(toy_batch_size, toy_state_dim)
toy_c_prev_hor = torch.Tensor([-10.1, 4.5, -0.1]).expand(toy_batch_size, toy_state_dim)
toy_c_prev_ver = torch.Tensor([17, 1.001, -2.23]).expand(toy_batch_size, toy_state_dim)
cell = LSTM2dCell(toy_input_dim, toy_state_dim, self.device)
c, s = cell.forward(x=toy_x, s_prev_hor=toy_s_prev_hor, s_prev_ver=toy_s_prev_ver,
c_prev_hor=toy_c_prev_hor, c_prev_ver=toy_c_prev_ver)
# check if the cell and hidden state are the same across the whole batch
c_first = c[0, :]
repeated_c_first = c_first.expand(toy_batch_size, c_first.shape[-1])
self.assertTrue(repeated_c_first.allclose(c), 'Next cell state varies across same-input batch')
s_first = s[0, :]
repeated_s_first = s_first.expand(toy_batch_size, s_first.shape[-1])
self.assertTrue(repeated_s_first.allclose(s), 'Next hidden state varies across same-input batch')
| 42.450704
| 115
| 0.675514
|
from unittest import TestCase
import torch
from model.lstm2d_cell import LSTM2dCell
class LSTM2dCellTest(TestCase):
embed_dim = 50
encoder_state_dim = 20
input_dim = 2 * encoder_state_dim + embed_dim
cell_state_dim = 25
batch_size = 42
def setUp(self):
torch.manual_seed(42)
self.x_j = torch.randn(self.batch_size, self.input_dim)
self.s_prev_hor = torch.randn(self.batch_size, self.cell_state_dim)
self.s_prev_ver = torch.randn(self.batch_size, self.cell_state_dim)
self.c_prev_hor = torch.randn(self.batch_size, self.cell_state_dim)
self.c_prev_ver = torch.randn(self.batch_size, self.cell_state_dim)
self.device = torch.device('cpu')
def test_dimensions(self):
cell = LSTM2dCell(self.input_dim, self.cell_state_dim, self.device)
c_ji, s_ji = cell.forward(x=self.x_j, s_prev_hor=self.s_prev_hor, s_prev_ver=self.s_prev_ver,
c_prev_hor=self.c_prev_hor, c_prev_ver=self.c_prev_ver)
c_shape = list(c_ji.shape)
s_shape = list(s_ji.shape)
self.assertEqual(c_shape, [self.batch_size, self.cell_state_dim], 'Next cell state has unexpected shape')
self.assertEqual(s_shape, [self.batch_size, self.cell_state_dim], 'Next hidden state has unexpected shape')
def test_same_over_batch(self):
toy_input_dim = 4
toy_batch_size = 7
toy_state_dim = 3
toy_x = torch.Tensor([1.5, 4.2, 3.1415, 2.71]).expand(toy_batch_size, toy_input_dim)
toy_s_prev_hor = torch.Tensor([-.4, 1.2, 42.195]).expand(toy_batch_size, toy_state_dim)
toy_s_prev_ver = torch.Tensor([2.3, 7.12, -3.14]).expand(toy_batch_size, toy_state_dim)
toy_c_prev_hor = torch.Tensor([-10.1, 4.5, -0.1]).expand(toy_batch_size, toy_state_dim)
toy_c_prev_ver = torch.Tensor([17, 1.001, -2.23]).expand(toy_batch_size, toy_state_dim)
cell = LSTM2dCell(toy_input_dim, toy_state_dim, self.device)
c, s = cell.forward(x=toy_x, s_prev_hor=toy_s_prev_hor, s_prev_ver=toy_s_prev_ver,
c_prev_hor=toy_c_prev_hor, c_prev_ver=toy_c_prev_ver)
c_first = c[0, :]
repeated_c_first = c_first.expand(toy_batch_size, c_first.shape[-1])
self.assertTrue(repeated_c_first.allclose(c), 'Next cell state varies across same-input batch')
s_first = s[0, :]
repeated_s_first = s_first.expand(toy_batch_size, s_first.shape[-1])
self.assertTrue(repeated_s_first.allclose(s), 'Next hidden state varies across same-input batch')
| true
| true
|
790d57b676bc83661ce64849f5d68313fc98cf35
| 8,488
|
py
|
Python
|
bidir_dijkstra.py
|
colon3ltocard/pythonalgorithms
|
60e2a46d4e53430570142f79e9930b02c3f89ed0
|
[
"MIT"
] | null | null | null |
bidir_dijkstra.py
|
colon3ltocard/pythonalgorithms
|
60e2a46d4e53430570142f79e9930b02c3f89ed0
|
[
"MIT"
] | null | null | null |
bidir_dijkstra.py
|
colon3ltocard/pythonalgorithms
|
60e2a46d4e53430570142f79e9930b02c3f89ed0
|
[
"MIT"
] | null | null | null |
"""
Visualizing bidirectionnal Dijkstra
using matplotlib
"""
import sys
from dataclasses import dataclass
from heapq import heappush, heappop
from itertools import permutations
from collections import defaultdict
import matplotlib
from matplotlib import pyplot as plt
import matplotlib.animation as animation
from dijkstra import (
Node,
generate_random_graph,
build_shortest_path,
dijkstra,
)
@dataclass
class Context:
distances: dict
previous: dict
node: None
visited_nodes: set
def dijkstra_iterator(nodes: list[Node], src_id: int, hf=lambda x: 0.0):
"""
Internal loop of the Dijkstra algorithm
as a step by step iterator
hf is an optional heuristic
"""
visited_nodes = set()
h: list[tuple[float, Node]] = []
previous = dict()
distances = defaultdict(lambda: sys.maxsize)
distances[src_id] = hf(nodes[src_id])
ctx: Context = Context(
previous=previous,
distances=distances,
node=None,
visited_nodes=visited_nodes,
)
heappush(h, (0.0, nodes[src_id]))
while h:
_, node = heappop(h)
if node.id in visited_nodes:
continue
dist = distances[node.id]
for n, d in (
(nodes[k], v)
for k, v in node.neighbours.items()
if k not in visited_nodes
):
new_dist = dist + d
cost = new_dist + hf(n) - hf(node)
if cost <= distances[n.id]:
distances[n.id] = cost
previous[n.id] = node.id
heappush(h, (cost, n))
visited_nodes.add(node.id)
ctx.node = node
yield ctx
ctx.node = None
yield ctx
def dijkstra_forward(
nodes: list[Node], src_id: int, dst_id: int, hf=lambda x: 0.0
) -> list[int]:
"""
'classical' forward Dijkstra but based on our iterator.
"""
coro = dijkstra_iterator(nodes, src_id, hf=hf)
for ctx in coro:
if ctx.node is None:
return [], []
elif ctx.node.id == dst_id:
return ctx.distances[dst_id], list(
build_shortest_path(ctx.previous, dst_id, src_id)
)
def bidir_dijkstra(
nodes: list[Node],
src_id: int,
dst_id: int,
hff=lambda _: 0.0,
hfb=lambda _: 0.0,
consistent: bool = True,
) -> list[int]:
"""
bidirectionnal dijkstra, we search from both start => end
and end => start using two iterators.
hff and hfb are optional heuristics
for respectively the forward and backward iterators
(for later bidir A*)
"""
forward = dijkstra_iterator(nodes, src_id, hf=hff)
backward = dijkstra_iterator(nodes, dst_id, hf=hfb)
shortest = sys.maxsize
forward_node = backward_node = None
f = []
b = []
for idx, (ctx_forward, ctx_backward) in enumerate(zip(forward, backward)):
if any(x.node is None for x in (ctx_forward, ctx_backward)):
# no path between the two nodes
return [], [], (f, b)
f.append(ctx_forward.node)
b.append(ctx_backward.node)
if forward_node and (
not consistent
or sum(
x.distances[x.node.id] - hf(x.node)
for x, hf in ((ctx_forward, hff), (ctx_backward, hfb))
)
>= shortest
):
forward_path = build_shortest_path(
ctx_forward.previous, forward_node.id, src_id
)
backward_path = build_shortest_path(
ctx_backward.previous, backward_node.id, dst_id
)[::-1]
path = forward_path + backward_path
return (
shortest,
path,
(f, b),
)
else:
for (ctx, hf), (ctx2, hf2) in permutations(
((ctx_forward, hff), (ctx_backward, hfb)), 2
):
for n, d in ctx.node.neighbours.items():
if n in ctx2.visited_nodes:
distance = (
ctx.distances[ctx.node.id]
+ ctx2.distances[n]
+ d
- hf(ctx.node)
- hf2(nodes[n])
)
if distance < shortest:
shortest = distance
forward_node = (
ctx.node if ctx is ctx_forward else nodes[n]
)
backward_node = (
ctx.node if ctx is ctx_backward else nodes[n]
)
print(
f'Iter_{idx}: contact between {forward_node}->{backward_node} with d={shortest}'
)
class Animator:
"""
Builds an animation from
a bidir shortest path finder.
"""
def __init__(self, nodes: list[Node], title='', draw_edges=True) -> None:
self.fig, self.ax = plt.subplots()
plt.title(title)
plt.tight_layout()
self.ax.set_aspect('equal')
self.i = True
if draw_edges:
edges = {
tuple(sorted((n.id, x))) for n in nodes for x in n.neighbours
}
for edge in edges:
from_node, to_node = [nodes[x] for x in edge]
x = [n.x for n in (from_node, to_node)]
y = [n.y for n in (from_node, to_node)]
plt.plot(x, y, color='gray', linewidth=0.5)
x, y = [n.x for n in nodes], [n.y for n in nodes]
self.ax.scatter = plt.scatter(
x,
y,
c=[0 for _ in range(len(x))],
s=[30] + [10] * (len(nodes) - 2) + [30],
vmin=0,
vmax=3,
cmap=matplotlib.colors.ListedColormap(
['grey', 'springgreen', 'red', 'white']
),
)
self._colors = self.ax.scatter.get_array()
for n in nodes:
if not n.neighbours:
self._colors[n.id] = 3
def update(self, nodes: tuple[Node, Node, list[Node]]):
"""
Updates the plot with a tuple of nodes (forward, backward, shortest_path)
"""
f, b, s = nodes
if not s:
self._colors[f.id] = 1
self._colors[b.id] = 2
self.ax.scatter.set_array(self._colors)
return (self.ax.scatter,)
else:
x = [n.x for n in s]
y = [n.y for n in s]
if self.i:
c = 'green'
else:
c = 'orange'
ap = self.ax.plot(x, y, color=c, linewidth=2)
self.i = not (self.i)
return ap
def make_animated_gif(
title: str,
g: list[Node],
dst_file: str,
fs: list[Node],
bs: list[Node],
shortest: list[Node],
draw_edges: bool = True,
writer: str = 'ffmpeg',
interval: int = 250,
blinking_ratio=0.5,
):
"""
Makes an animated gif out of two sequences of forward (fs) and backward (bs)
path-finding algorithm. The final shortest path will be blinked.
"""
anim = Animator(g, title=title, draw_edges=draw_edges)
def node_gen():
for fn, bn in zip(fs, bs):
yield fn, bn, []
res = [g[i] for i in shortest]
for _ in range(int(len(fs) * blinking_ratio)):
yield _, _, res
ani = animation.FuncAnimation(
anim.fig,
anim.update,
node_gen(),
interval=interval,
blit=True,
repeat_delay=500,
save_count=len(fs) * 2,
)
ani.save(f'imgs/{dst_file}', writer=writer)
if __name__ == '__main__':
# sanity check on the iterator versus 'simple' implementation
g = generate_random_graph(100, connect_probability=0.1)
cost, sp = dijkstra_forward(g, 0, len(g) - 1)
cost2, sp2 = dijkstra(g, 0, len(g) - 1)
# we also compare our bidir version agaisnt the other two ^^
cost3, sp3, (f, b) = bidir_dijkstra(g, 0, len(g) - 1)
# and against a backward run only
cost4, sp4 = dijkstra_forward(g, len(g) - 1, 0)
sp4 = sp4[::-1]
print(cost, cost2, cost3, cost4)
for p in (sp, sp2, sp4, sp3):
print(' -> '.join(str(p) for p in p))
assert sp == sp2 == sp3 == sp4
make_animated_gif(
f'Bidir Dijkstra n={len(f)}', g, 'bidir_100.gif', f, b, sp3
)
| 28.38796
| 112
| 0.521206
|
import sys
from dataclasses import dataclass
from heapq import heappush, heappop
from itertools import permutations
from collections import defaultdict
import matplotlib
from matplotlib import pyplot as plt
import matplotlib.animation as animation
from dijkstra import (
Node,
generate_random_graph,
build_shortest_path,
dijkstra,
)
@dataclass
class Context:
distances: dict
previous: dict
node: None
visited_nodes: set
def dijkstra_iterator(nodes: list[Node], src_id: int, hf=lambda x: 0.0):
visited_nodes = set()
h: list[tuple[float, Node]] = []
previous = dict()
distances = defaultdict(lambda: sys.maxsize)
distances[src_id] = hf(nodes[src_id])
ctx: Context = Context(
previous=previous,
distances=distances,
node=None,
visited_nodes=visited_nodes,
)
heappush(h, (0.0, nodes[src_id]))
while h:
_, node = heappop(h)
if node.id in visited_nodes:
continue
dist = distances[node.id]
for n, d in (
(nodes[k], v)
for k, v in node.neighbours.items()
if k not in visited_nodes
):
new_dist = dist + d
cost = new_dist + hf(n) - hf(node)
if cost <= distances[n.id]:
distances[n.id] = cost
previous[n.id] = node.id
heappush(h, (cost, n))
visited_nodes.add(node.id)
ctx.node = node
yield ctx
ctx.node = None
yield ctx
def dijkstra_forward(
nodes: list[Node], src_id: int, dst_id: int, hf=lambda x: 0.0
) -> list[int]:
coro = dijkstra_iterator(nodes, src_id, hf=hf)
for ctx in coro:
if ctx.node is None:
return [], []
elif ctx.node.id == dst_id:
return ctx.distances[dst_id], list(
build_shortest_path(ctx.previous, dst_id, src_id)
)
def bidir_dijkstra(
nodes: list[Node],
src_id: int,
dst_id: int,
hff=lambda _: 0.0,
hfb=lambda _: 0.0,
consistent: bool = True,
) -> list[int]:
forward = dijkstra_iterator(nodes, src_id, hf=hff)
backward = dijkstra_iterator(nodes, dst_id, hf=hfb)
shortest = sys.maxsize
forward_node = backward_node = None
f = []
b = []
for idx, (ctx_forward, ctx_backward) in enumerate(zip(forward, backward)):
if any(x.node is None for x in (ctx_forward, ctx_backward)):
return [], [], (f, b)
f.append(ctx_forward.node)
b.append(ctx_backward.node)
if forward_node and (
not consistent
or sum(
x.distances[x.node.id] - hf(x.node)
for x, hf in ((ctx_forward, hff), (ctx_backward, hfb))
)
>= shortest
):
forward_path = build_shortest_path(
ctx_forward.previous, forward_node.id, src_id
)
backward_path = build_shortest_path(
ctx_backward.previous, backward_node.id, dst_id
)[::-1]
path = forward_path + backward_path
return (
shortest,
path,
(f, b),
)
else:
for (ctx, hf), (ctx2, hf2) in permutations(
((ctx_forward, hff), (ctx_backward, hfb)), 2
):
for n, d in ctx.node.neighbours.items():
if n in ctx2.visited_nodes:
distance = (
ctx.distances[ctx.node.id]
+ ctx2.distances[n]
+ d
- hf(ctx.node)
- hf2(nodes[n])
)
if distance < shortest:
shortest = distance
forward_node = (
ctx.node if ctx is ctx_forward else nodes[n]
)
backward_node = (
ctx.node if ctx is ctx_backward else nodes[n]
)
print(
f'Iter_{idx}: contact between {forward_node}->{backward_node} with d={shortest}'
)
class Animator:
def __init__(self, nodes: list[Node], title='', draw_edges=True) -> None:
self.fig, self.ax = plt.subplots()
plt.title(title)
plt.tight_layout()
self.ax.set_aspect('equal')
self.i = True
if draw_edges:
edges = {
tuple(sorted((n.id, x))) for n in nodes for x in n.neighbours
}
for edge in edges:
from_node, to_node = [nodes[x] for x in edge]
x = [n.x for n in (from_node, to_node)]
y = [n.y for n in (from_node, to_node)]
plt.plot(x, y, color='gray', linewidth=0.5)
x, y = [n.x for n in nodes], [n.y for n in nodes]
self.ax.scatter = plt.scatter(
x,
y,
c=[0 for _ in range(len(x))],
s=[30] + [10] * (len(nodes) - 2) + [30],
vmin=0,
vmax=3,
cmap=matplotlib.colors.ListedColormap(
['grey', 'springgreen', 'red', 'white']
),
)
self._colors = self.ax.scatter.get_array()
for n in nodes:
if not n.neighbours:
self._colors[n.id] = 3
def update(self, nodes: tuple[Node, Node, list[Node]]):
f, b, s = nodes
if not s:
self._colors[f.id] = 1
self._colors[b.id] = 2
self.ax.scatter.set_array(self._colors)
return (self.ax.scatter,)
else:
x = [n.x for n in s]
y = [n.y for n in s]
if self.i:
c = 'green'
else:
c = 'orange'
ap = self.ax.plot(x, y, color=c, linewidth=2)
self.i = not (self.i)
return ap
def make_animated_gif(
title: str,
g: list[Node],
dst_file: str,
fs: list[Node],
bs: list[Node],
shortest: list[Node],
draw_edges: bool = True,
writer: str = 'ffmpeg',
interval: int = 250,
blinking_ratio=0.5,
):
anim = Animator(g, title=title, draw_edges=draw_edges)
def node_gen():
for fn, bn in zip(fs, bs):
yield fn, bn, []
res = [g[i] for i in shortest]
for _ in range(int(len(fs) * blinking_ratio)):
yield _, _, res
ani = animation.FuncAnimation(
anim.fig,
anim.update,
node_gen(),
interval=interval,
blit=True,
repeat_delay=500,
save_count=len(fs) * 2,
)
ani.save(f'imgs/{dst_file}', writer=writer)
if __name__ == '__main__':
g = generate_random_graph(100, connect_probability=0.1)
cost, sp = dijkstra_forward(g, 0, len(g) - 1)
cost2, sp2 = dijkstra(g, 0, len(g) - 1)
cost3, sp3, (f, b) = bidir_dijkstra(g, 0, len(g) - 1)
cost4, sp4 = dijkstra_forward(g, len(g) - 1, 0)
sp4 = sp4[::-1]
print(cost, cost2, cost3, cost4)
for p in (sp, sp2, sp4, sp3):
print(' -> '.join(str(p) for p in p))
assert sp == sp2 == sp3 == sp4
make_animated_gif(
f'Bidir Dijkstra n={len(f)}', g, 'bidir_100.gif', f, b, sp3
)
| true
| true
|
790d581890de64f1e5155e44980aa6fdd0e58cb9
| 7,023
|
py
|
Python
|
djadmin2/core.py
|
PowerOlive/django-admin2
|
5fa267064358f9017c60a366c316c7f527d45fb2
|
[
"BSD-3-Clause"
] | null | null | null |
djadmin2/core.py
|
PowerOlive/django-admin2
|
5fa267064358f9017c60a366c316c7f527d45fb2
|
[
"BSD-3-Clause"
] | null | null | null |
djadmin2/core.py
|
PowerOlive/django-admin2
|
5fa267064358f9017c60a366c316c7f527d45fb2
|
[
"BSD-3-Clause"
] | null | null | null |
"""
WARNING: This file about to undergo major refactoring by @pydanny per
Issue #99.
"""
from importlib import import_module
from django.conf import settings
from django.conf.urls import url
from django.core.exceptions import ImproperlyConfigured
from . import apiviews
from . import types
from . import utils
from . import views
class Admin2(object):
"""
The base Admin2 object.
It keeps a registry of all registered Models and collects the urls of their
related ModelAdmin2 instances.
It also provides an index view that serves as an entry point to the
admin site.
"""
index_view = views.IndexView
login_view = views.LoginView
app_index_view = views.AppIndexView
api_index_view = apiviews.IndexAPIView
def __init__(self, name='admin2'):
self.registry = {}
self.apps = {}
self.app_verbose_names = {}
self.name = name
def register(self, model, model_admin=None, **kwargs):
"""
Registers the given model with the given admin class. Once a model is
registered in self.registry, we also add it to app registries in
self.apps.
If no model_admin is passed, it will use ModelAdmin2. If keyword
arguments are given they will be passed to the admin class on
instantiation.
If a model is already registered, this will raise ImproperlyConfigured.
"""
if model in self.registry:
raise ImproperlyConfigured(
'%s is already registered in django-admin2' % model)
if not model_admin:
model_admin = types.ModelAdmin2
self.registry[model] = model_admin(model, admin=self, **kwargs)
# Add the model to the apps registry
app_label = utils.model_options(model).app_label
if app_label in self.apps.keys():
self.apps[app_label][model] = self.registry[model]
else:
self.apps[app_label] = {model: self.registry[model]}
def deregister(self, model):
"""
Deregisters the given model. Remove the model from the self.app as well
If the model is not already registered, this will raise
ImproperlyConfigured.
"""
try:
del self.registry[model]
except KeyError:
raise ImproperlyConfigured(
'%s was never registered in django-admin2' % model)
# Remove the model from the apps registry
# Get the app label
app_label = utils.model_options(model).app_label
# Delete the model from it's app registry
del self.apps[app_label][model]
# if no more models in an app's registry
# then delete the app from the apps.
if self.apps[app_label] is {}:
del self.apps[app_label] # no
def register_app_verbose_name(self, app_label, app_verbose_name):
"""
Registers the given app label with the given app verbose name.
If a app_label is already registered, this will raise
ImproperlyConfigured.
"""
if app_label in self.app_verbose_names:
raise ImproperlyConfigured(
'%s is already registered in django-admin2' % app_label)
self.app_verbose_names[app_label] = app_verbose_name
def deregister_app_verbose_name(self, app_label):
"""
Deregisters the given app label. Remove the app label from the
self.app_verbose_names as well.
If the app label is not already registered, this will raise
ImproperlyConfigured.
"""
try:
del self.app_verbose_names[app_label]
except KeyError:
raise ImproperlyConfigured(
'%s app label was never registered in django-admin2' % app_label)
def autodiscover(self):
"""
Autodiscovers all admin2.py modules for apps in INSTALLED_APPS by
trying to import them.
"""
for app_name in [x for x in settings.INSTALLED_APPS]:
try:
import_module("%s.admin2" % app_name)
except ImportError as e:
if str(e).startswith("No module named") and 'admin2' in str(e):
continue
raise e
def get_admin_by_name(self, name):
"""
Returns the admin instance that was registered with the passed in
name.
"""
for object_admin in self.registry.values():
if object_admin.name == name:
return object_admin
raise ValueError(
u'No object admin found with name {}'.format(repr(name)))
def get_index_kwargs(self):
return {
'registry': self.registry,
'app_verbose_names': self.app_verbose_names,
'apps': self.apps,
'login_view': self.login_view,
}
def get_app_index_kwargs(self):
return {
'registry': self.registry,
'app_verbose_names': self.app_verbose_names,
'apps': self.apps,
}
def get_api_index_kwargs(self):
return {
'registry': self.registry,
'app_verbose_names': self.app_verbose_names,
'apps': self.apps,
}
def get_urls(self):
urlpatterns = [
url(regex=r'^$',
view=self.index_view.as_view(**self.get_index_kwargs()),
name='dashboard'
),
url(regex=r'^auth/user/(?P<pk>\d+)/update/password/$',
view=views.PasswordChangeView.as_view(),
name='password_change'
),
url(regex='^password_change_done/$',
view=views.PasswordChangeDoneView.as_view(),
name='password_change_done'
),
url(regex='^logout/$',
view=views.LogoutView.as_view(),
name='logout'
),
url(regex=r'^(?P<app_label>\w+)/$',
view=self.app_index_view.as_view(
**self.get_app_index_kwargs()),
name='app_index'
),
url(regex=r'^api/v0/$',
view=self.api_index_view.as_view(
**self.get_api_index_kwargs()),
name='api_index'
),
]
for model, model_admin in self.registry.items():
model_options = utils.model_options(model)
urlpatterns += [
url('^{}/{}/'.format(
model_options.app_label,
model_options.object_name.lower()),
model_admin.urls),
url('^api/v0/{}/{}/'.format(
model_options.app_label,
model_options.object_name.lower()),
model_admin.api_urls),
]
return urlpatterns
@property
def urls(self):
# We set the application and instance namespace here
return self.get_urls(), self.name, self.name
| 33.927536
| 81
| 0.581803
|
from importlib import import_module
from django.conf import settings
from django.conf.urls import url
from django.core.exceptions import ImproperlyConfigured
from . import apiviews
from . import types
from . import utils
from . import views
class Admin2(object):
index_view = views.IndexView
login_view = views.LoginView
app_index_view = views.AppIndexView
api_index_view = apiviews.IndexAPIView
def __init__(self, name='admin2'):
self.registry = {}
self.apps = {}
self.app_verbose_names = {}
self.name = name
def register(self, model, model_admin=None, **kwargs):
if model in self.registry:
raise ImproperlyConfigured(
'%s is already registered in django-admin2' % model)
if not model_admin:
model_admin = types.ModelAdmin2
self.registry[model] = model_admin(model, admin=self, **kwargs)
app_label = utils.model_options(model).app_label
if app_label in self.apps.keys():
self.apps[app_label][model] = self.registry[model]
else:
self.apps[app_label] = {model: self.registry[model]}
def deregister(self, model):
try:
del self.registry[model]
except KeyError:
raise ImproperlyConfigured(
'%s was never registered in django-admin2' % model)
app_label = utils.model_options(model).app_label
del self.apps[app_label][model]
# if no more models in an app's registry
if self.apps[app_label] is {}:
del self.apps[app_label]
def register_app_verbose_name(self, app_label, app_verbose_name):
if app_label in self.app_verbose_names:
raise ImproperlyConfigured(
'%s is already registered in django-admin2' % app_label)
self.app_verbose_names[app_label] = app_verbose_name
def deregister_app_verbose_name(self, app_label):
try:
del self.app_verbose_names[app_label]
except KeyError:
raise ImproperlyConfigured(
'%s app label was never registered in django-admin2' % app_label)
def autodiscover(self):
for app_name in [x for x in settings.INSTALLED_APPS]:
try:
import_module("%s.admin2" % app_name)
except ImportError as e:
if str(e).startswith("No module named") and 'admin2' in str(e):
continue
raise e
def get_admin_by_name(self, name):
for object_admin in self.registry.values():
if object_admin.name == name:
return object_admin
raise ValueError(
u'No object admin found with name {}'.format(repr(name)))
def get_index_kwargs(self):
return {
'registry': self.registry,
'app_verbose_names': self.app_verbose_names,
'apps': self.apps,
'login_view': self.login_view,
}
def get_app_index_kwargs(self):
return {
'registry': self.registry,
'app_verbose_names': self.app_verbose_names,
'apps': self.apps,
}
def get_api_index_kwargs(self):
return {
'registry': self.registry,
'app_verbose_names': self.app_verbose_names,
'apps': self.apps,
}
def get_urls(self):
urlpatterns = [
url(regex=r'^$',
view=self.index_view.as_view(**self.get_index_kwargs()),
name='dashboard'
),
url(regex=r'^auth/user/(?P<pk>\d+)/update/password/$',
view=views.PasswordChangeView.as_view(),
name='password_change'
),
url(regex='^password_change_done/$',
view=views.PasswordChangeDoneView.as_view(),
name='password_change_done'
),
url(regex='^logout/$',
view=views.LogoutView.as_view(),
name='logout'
),
url(regex=r'^(?P<app_label>\w+)/$',
view=self.app_index_view.as_view(
**self.get_app_index_kwargs()),
name='app_index'
),
url(regex=r'^api/v0/$',
view=self.api_index_view.as_view(
**self.get_api_index_kwargs()),
name='api_index'
),
]
for model, model_admin in self.registry.items():
model_options = utils.model_options(model)
urlpatterns += [
url('^{}/{}/'.format(
model_options.app_label,
model_options.object_name.lower()),
model_admin.urls),
url('^api/v0/{}/{}/'.format(
model_options.app_label,
model_options.object_name.lower()),
model_admin.api_urls),
]
return urlpatterns
@property
def urls(self):
return self.get_urls(), self.name, self.name
| true
| true
|
790d588fc2f69b62d0c9d565aec9181eeb2a9265
| 41,090
|
py
|
Python
|
auto/auto_visualizer/auto_visualizer.py
|
lu-w/criticality-recognition
|
5ad2e12699ad4bf2d7f60ce9e30f26110adce436
|
[
"MIT"
] | 4
|
2022-03-13T19:33:43.000Z
|
2022-03-15T22:20:36.000Z
|
auto/auto_visualizer/auto_visualizer.py
|
lu-w/criticality-recognition
|
5ad2e12699ad4bf2d7f60ce9e30f26110adce436
|
[
"MIT"
] | null | null | null |
auto/auto_visualizer/auto_visualizer.py
|
lu-w/criticality-recognition
|
5ad2e12699ad4bf2d7f60ce9e30f26110adce436
|
[
"MIT"
] | null | null | null |
# Visualizer is for debugging purposes only
import logging
import math
import random
import threading
import http.server
import socketserver
import os
import re
from shapely import wkt
import matplotlib.pyplot as plt
import mpld3
import screeninfo
import tempfile
import webbrowser
import owlready2
from shapely import geometry
import numpy as np
from tqdm import tqdm
import time as pytime
import auto.auto
from criticality_recognition import phenomena_extraction
# TODO
# - visualize scenario level CPs
# - show has distance to in table for each individual - as ternary relations - instead of omitting it
####################
# Config constants #
####################
# Classes to not show in visualization
_NO_PRINTING_CLASSES = {"physics.Has_Distance_To", "perception.Is_Full_Occlusion", "perception.Is_Occlusion"}
# Data/object properties to hide from the individual tables shown when hovering
_NO_PRINTING_PROPERTIES = {"perceptional_property", "traffic_related_concept_property",
"descriptive_traffic_entity_property", "traffic_entity_property", "activity_property",
"physical_property", "traffic_modeling_property", "traffic_entity_property",
"automotive_urban_traffic_property", "L1_property", "L2_property", "L3_property",
"L4_property", "L5_property", "L6_property", "traffic_model_element_property",
"criticality_phenomenon_as_object_property", "has_positional_relation",
"has_spatial_relation", "has_dynamical_relation", "SF_spatial_relation",
"performance_spatial_relation", "EH_spatial_relation", "RCC8_spatial_relation", "rcc8dc",
"ehDisjoint"}
# If one hides long property lists, this is the number after which the list is cut off
_MAX_PROPS_DISPLAY = 4
_AVOID_LABEL_COLLISIONS = False
# Logging
logger = logging.getLogger(__name__)
# Helper function for sorting CPs & individuals
def natural_sort_key(s, _nsre=re.compile("([0-9]+)")):
return [int(text) if text.isdigit() else text.lower() for text in _nsre.split(str(s))]
#######
# CSS #
#######
# Scene CSS (added is iframes to scenario HTML)
scene_css = """
<style>
svg * {
font-size: 4pt;
}
table {
border: solid 1px #DDEEEE;
border-collapse: collapse;
border-spacing: 0;
font: normal 8px, sans-serif;
}
thead th {
background-color: #DDEFEF;
border: solid 1px #DDEEEE;
color: #336B6B;
padding: 3px;
text-align: left;
text-shadow: 1px 1px 1px #fff;
font-size: 10pt;
}
tbody td {
background-color: #FFFFFF;
border: solid 1px #DDEEEE;
color: #333;
padding: 3px;
text-shadow: 1px 1px 1px #fff;
font-size: 8pt;
}
.cp-tooltip {}
</style>
"""
# Scenario CSS (main CSS)
scenario_css = """
<style>
.slider {
-webkit-appearance: none; /* Override default CSS styles */
appearance: none;
width: 100%; /* Full-width */
height: 25px; /* Specified height */
background: #d3d3d3; /* Grey background */
outline: none; /* Remove outline */
opacity: 0.7; /* Set transparency (for mouse-over effects on hover) */
-webkit-transition: .2s; /* 0.2 seconds transition on hover */
transition: opacity .2s;
}
.slider:hover {
opacity: 1; /* Fully shown on mouse-over */
}
.slider::-webkit-slider-thumb {
-webkit-appearance: none; /* Override default look */
appearance: none;
width: 25px; /* Set a specific slider handle width */
height: 25px; /* Slider handle height */
background: #04AA6D; /* Green background */
cursor: pointer; /* Cursor on hover */
}
.slider::-moz-range-thumb {
width: 25px; /* Set a specific slider handle width */
height: 25px; /* Slider handle height */
background: #04AA6D; /* Green background */
cursor: pointer; /* Cursor on hover */
}
</style>"""
def visualize_scenario(scenario, cps=None):
"""
Creates an HTML visualization of the given scenario. Starts a simple web server at localhost:8000 (blocking).
:param scenario: Either a list of worlds, each world representing a single scene or a single world representing a
whole scenario
:param cps: A list of criticality phenomena which optionally to visualize as well.
:return: The path to the directory in which to find the created HTML visualization.
"""
pl_html = []
scenario_inst = None
if cps is None:
cps = []
# Fetch scene list
if type(scenario) == list:
scenes = [scene_world.search(type=auto.auto.get_ontology(auto.auto.Ontology.Traffic_Model, scene_world).Scene)
[0] for scene_world in scenario]
elif type(scenario) == owlready2.namespace.World or type(scenario) == owlready2.World:
tm = auto.auto.get_ontology(auto.auto.Ontology.Traffic_Model, scenario)
scenario_inst = scenario.search(type=tm.Scenario)[0]
scenes = list(filter(lambda x: tm.Scene in x.is_a, scenario_inst.has_traffic_model))
else:
raise ValueError
scenes = sorted(scenes, key=lambda x: x.inTimePosition[0].numericPosition[0])
# Assemble scenario title
title = "Scenario"
if scenario_inst and hasattr(scenario_inst, "identifier") and len(scenario_inst.identifier) > 0:
title += " " + str(scenario_inst.identifier[0])
scenario_info = "(" + str(len(scenes)) + " Scenes)"
# Main HTML code for index.html
html_body = """<!DOCTYPE html>
<html>
<head>
<link href="https://cdn.jsdelivr.net/npm/bootstrap@5.1.3/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-1BmE4kWBq78iYhFldvKuhfTAU6auU8tT94WrHftjDbrCEXSU1oBoqyl2QvZ6jIW3" crossorigin="anonymous">
<script src="https://cdn.jsdelivr.net/npm/bootstrap@5.1.3/dist/js/bootstrap.bundle.min.js" integrity="sha384-ka7Sk0Gln4gmtz2MlQnikT1wXgYsOg+OMhuP+IlRH9sENBO0LRn5q+8nbTov4+1p" crossorigin="anonymous"></script>
<script src="https://code.jquery.com/jquery-3.6.0.min.js"></script>
<meta charset="utf-8">""" + scenario_css + """
<title>""" + title + """</title>
</head>
<body>
<div class=\"d-flex flex-row justify-content-center\"><div class=\"mt-3 py-1 px-6 alert alert-info\" style=\"display: inline-block\" role=\"alert\"><center><h5>""" + title + """ """ + scenario_info + """</h5></center></div></div>
<div class="slidecontainer m-2">
<input type="range" min="1" max=\"""" + str(len(scenes)) + """\" value="1" class="slider" id="myRange">
</div>
<script>
var slider = document.getElementById("myRange");
var last_set = 1
var show_all_cps = true
slider.oninput = function() {
var output = document.getElementById("plt" + this.value);
var last_output = document.getElementById("plt" + last_set);
last_output.style.display = 'none';
output.style.display = 'block';
last_set = this.value
}
function toggle_cps_all_iframes() {
show_all_cps = !show_all_cps
$(".cp-all-button").each(function(i) {
if (show_all_cps) {
this.parentElement.classList.add("active")
this.checked = true
} else {
this.parentElement.classList.remove("active")
this.checked = false
}
})
$(".cp-button").each(function(i) {
if (show_all_cps) {
this.parentElement.classList.add("active")
this.checked = true
} else {
this.parentElement.classList.remove("active")
this.checked = false
}
})
$(".scene-plot").each(function(i) {
this.contentWindow.toggle_cps(show_all_cps)
})
}
function toggle_cp_class(ele, cp_cls_id) {
// 0. disable automatically checked checkbox (will be added again at step 3)
ele.checked = !ele.checked
// 1. find active scene plot
active_scene = $(".scene-plot-container").filter(function(i) {
return this.style.display !== "none"
})[0]
// 2. get CP pred. str for given cp_cls_id
cp_pred = active_scene.getElementsByClassName("scene-plot")[0].contentWindow.cp_predicates[cp_cls_id]
// 3. Toggle all buttons for this CP pred
$("label > span:contains(" + cp_pred + ")").each(function(i) {
this.parentElement.classList.toggle("active")
this.parentElement.querySelector(".cp-button").checked = !this.parentElement.querySelector(".cp-button").checked
})
// 4. check if (and where) CP pred. str is present in cp_predicates, pass the resulting index
$(".scene-plot").each(function(k) {
cp_cls_id_scene = -1
for (var i = 0; i < this.contentWindow.cp_predicates.length; i++) {
if (cp_pred === this.contentWindow.cp_predicates[i]) {
cp_cls_id_scene = i
}
}
if (cp_cls_id_scene >= 0) {
this.contentWindow.toggle_cp_class(cp_cls_id_scene, ele.checked)
}
})
}
</script>
"""
pl_html.append(html_body)
iframes = []
def get_color(p):
# Fetches a different color each time, but ensures that it has a readable contrast.
_LUMA_LIMIT = 170
color = 0
luma = _LUMA_LIMIT
while luma >= _LUMA_LIMIT:
color = random.randrange(0, 0xFFFFFF, 0xF)
luma = 0.2126 * ((color >> 16) & 0xff) + 0.7152 * ((color >> 8) & 0xff) + 0.0722 * ((color >> 0) & 0xff)
return "#" + "%06x" % color
# Create HTML for each scene
for i, scene in enumerate(scenes):
logger.info("Plotting scene " + str(i + 1) + " / " + str(len(scenes)))
scene_cps = [cp for cp in cps if cp.is_representable_in_scene(scene)]
cp_colors = list(map(get_color, range(len([x for c in scene_cps for x in c.subjects]))))
cp_color = 0
no_geo_entities = []
width = 24.5
height = 10
try:
primary_screens = list(filter(lambda x: x.is_primary, screeninfo.get_monitors()))
if len(primary_screens) > 0:
width = (primary_screens[0].width_mm / 25.4) * 0.73
height = (primary_screens[0].height_mm / 25.4) * 0.73
except screeninfo.common.ScreenInfoError:
logger.info("No screens found, using default plot size of " + str(width) + " in x " + str(height) + " in")
fig = plt.figure(figsize=(width, height))
plt.axis("equal")
entity_labels = []
entity_relations = []
relations_per_cp_class = dict()
cps_relations = []
cps_for_tooltips = []
centroids_x = []
centroids_y = []
plotted_labels = []
entity_points = dict()
traffic_entities = tqdm(scene.has_traffic_entity)
for entity in traffic_entities:
traffic_entities.set_description(str(entity))
if len(entity.hasGeometry) > 0:
for geo in entity.hasGeometry:
shape = wkt.loads(geo.asWKT[0])
entity_cp_relations = []
points = None
if hasattr(shape, "exterior"):
points = shape.exterior.xy
try:
hasattr(shape, "coords")
points = shape.coords.xy
except NotImplementedError:
pass
if points:
if (np.isclose(centroids_x, shape.centroid.x) & np.isclose(centroids_y, shape.centroid.y))\
.any():
x = shape.centroid.x + 0.0
y = shape.centroid.y + 0.8
plt.plot((shape.centroid.x, x), (shape.centroid.y, y), "k-")
else:
x = shape.centroid.x
y = shape.centroid.y
entity_points[entity] = (x, y)
centroids_x.append(x)
centroids_y.append(y)
plt.plot(*points, alpha=.6)
if auto.auto.get_ontology(auto.auto.Ontology.Physics, scenario).Dynamical_Object in \
entity.INDIRECT_is_a:
plt.fill(*points, alpha=.3)
if entity.has_yaw is not None:
x_dir = (0.9 * math.cos(math.radians(entity.has_yaw)))
y_dir = (0.9 * math.sin(math.radians(entity.has_yaw)))
plt.arrow(shape.centroid.x, shape.centroid.y, dx=x_dir, dy=y_dir, shape="full",
length_includes_head=True, color="gray", alpha=0.6, head_width=1)
entity_labels.append(_describe_entity(entity))
# Plot CPs
entity_scene_cps = list(filter(lambda scp: entity in scp.subjects, scene_cps))
if len(entity_scene_cps) > 0:
plt.plot(x, y, "o", color="r", mec="k", markersize=3, alpha=1)
ent_color = "red"
else:
ent_color = "black"
if entity.identifier and len(entity.identifier) > 0 and not entity.is_persistent and not \
(isinstance(entity.identifier[0], str) and entity.identifier[0].startswith("repr")):
plt.annotate(entity.identifier[0], (x+0.2, y+0.2), color=ent_color)
already_drawn_cps = []
# init dict
for cp in entity_scene_cps:
if cp.predicate not in relations_per_cp_class.keys():
relations_per_cp_class[cp.predicate] = []
for cp in entity_scene_cps:
if cp not in already_drawn_cps:
same_line_cps = [x for x in entity_scene_cps if
[y for z in x.objects.values() for y in z] ==
[y for z in cp.objects.values() for y in z]]
labels = [(x.predicate.split("(")[0],
(x.predicate.split("(")[1].replace(")", ""), str(x)))
for x in same_line_cps]
already_drawn_cps += same_line_cps
subj_x = x
subj_y = y
for objs in cp.objects.values():
for obj in objs:
if len(obj.hasGeometry) > 0:
if obj in entity_points.keys():
obj_x = entity_points[obj][0]
obj_y = entity_points[obj][1]
else:
geom_o = wkt.loads(obj.hasGeometry[0].asWKT[0])
obj_x = geom_o.centroid.x
obj_y = geom_o.centroid.y
m = (obj_y - subj_y) / (obj_x - subj_x)
b = subj_y - m * subj_x
head_width = 0.2
head_length = 1.5 * head_width
arrow = plt.arrow(subj_x, subj_y, dx=(obj_x - subj_x), dy=(obj_y - subj_y),
color=cp_colors[cp_color], shape="full",
length_includes_head=True, head_width=head_width,
head_length=head_length)
if len(labels[0]) > 1:
label_row = " ".join([label[0] for label in labels])
else:
label_row = labels[0]
x_offset = (len(label_row) * 0.055) / 2 - 0.055
if subj_x > obj_x:
label_x = obj_x + abs(subj_x - obj_x) / 2 - x_offset
else:
label_x = obj_x - abs(subj_x - obj_x) / 2 - x_offset
a = math.degrees(math.atan(m))
for l_i, label in enumerate(labels):
label_string = label[0].replace("CP_", "")
label_len = (len(label_string) * 0.09 + 0.1)
label_x_offset = abs(math.cos(math.atan(m)) * label_len)
while True:
# Finds a free space to plot label
label_y = m * label_x + b + 0.05
label_x_1 = label_x - label_x_offset / 2 + 0.05
label_y_1 = m * label_x_1 + b
label_x_2 = label_x + label_x_offset / 2 + 0.05
label_y_2 = m * label_x_2 + b
label_line1 = geometry.LineString([(label_x_1, label_y_1),
(label_x_2, label_y_2)])
new_bb = label_line1.buffer(0.1, cap_style=2)
new_bb_rect = list(zip(*new_bb.exterior.xy))[:-1]
if not _AVOID_LABEL_COLLISIONS or not \
_has_collision_with_bbs(plotted_labels, new_bb_rect):
break
label_x += label_x_offset / 10
annot = plt.annotate(label_string,
(label_x, label_y), color=cp_colors[cp_color],
rotation=a, fontsize=2, rotation_mode="anchor")
entity_cp_relations.append(annot)
cps_relations.append(annot)
relations_per_cp_class[same_line_cps[l_i].predicate] += [annot, arrow]
cps_for_tooltips.append(same_line_cps[l_i])
plotted_labels.append(new_bb_rect)
label_x += label_x_offset
subj_x = obj_x
subj_y = obj_y
entity_cp_relations += [arrow]
cp_color = (cp_color + 1) % len(cp_colors)
entity_relations.append(entity_cp_relations)
elif len(set([str(y) for y in entity.INDIRECT_is_a]).intersection(_NO_PRINTING_CLASSES)) == 0:
no_geo_entities.append(_describe_entity(entity))
logger.info("Done with layout, creating MPLD3 plot, JS plugins, and HTML string")
pl2 = plt.plot(centroids_x, centroids_y, "o", color="b", mec="k", markersize=2, mew=1, alpha=.4)
tooltip_individuals = ToolTipAndClickInfo(pl2[0], labels=entity_labels, targets=entity_relations,
targets_per_cp=relations_per_cp_class)
fig.tight_layout()
mpld3.plugins.connect(fig, tooltip_individuals)
for h, cp_text in enumerate(cps_relations):
tooltip_cp = CPTooltip(cp_text, cps_for_tooltips[h])
mpld3.plugins.connect(fig, tooltip_cp)
html = "\n\t\t<div class=\"container-fluid scene-plot-container\" id=\"plt" + str(i + 1) + "\" style =\""
if i != 0:
html += "display: none;"
html += "\">"
html += """
<div class="row">
<div class="col-md-1">
"""
cp_count_total = len([x for x in cps if (isinstance(x.traffic_model, list) and scene in x.traffic_model) or
x.traffic_model == scenario_inst])
html += """<div class="">
<label class="btn btn-primary active" style="margin-bottom: 10px; width: %s">
<input type="checkbox" class="cp-all-button" id="cp-all-button-%s" autocomplete="off" onclick="toggle_cps_all_iframes();" checked>
<span>Show all criticality phenomena (%s)</span>
</label>""" % ("100%", str(i), str(cp_count_total))
for l, pred in enumerate(sorted(relations_per_cp_class.keys(), key=natural_sort_key)):
cp_count = len([x for x in cps if x.predicate == pred and ((isinstance(x.traffic_model, list) and
scene in x.traffic_model) or x.traffic_model == scenario_inst)])
html += """
<br />
<label class="btn btn-secondary active" style="margin-bottom: 5px; width: %s">
<input type="checkbox" class="cp-button" id="cp-button-%s-%s" autocomplete="off" onclick="toggle_cp_class(this, %s);" checked>
<span>%s (%s)</span>
</label>""" % ("100%", str(i), str(l), str(l), pred, str(cp_count))
html += """
</div>
</div>
<div class="col-md-11">
"""
html += "<div class=\"embed-responsive embed-responsive-16by9\">\n"
html += "\t\t\t\t\t\t<iframe class=\"scene-plot\" src=\"scene" + str(i + 1) + ".html\" class=\"embed-responsive-item\" style=\"width: 100%; height: " + str(height*1.27) + "in\" allowfullscreen></iframe>\n\t\t\t\t\t</div>\n"
iframe_html = """<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<meta HTTP-EQUIV="Access-Control-Allow-Origin" CONTENT="localhost">
<link href="https://cdn.jsdelivr.net/npm/bootstrap@5.1.3/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-1BmE4kWBq78iYhFldvKuhfTAU6auU8tT94WrHftjDbrCEXSU1oBoqyl2QvZ6jIW3" crossorigin="anonymous">
<script src="https://cdn.jsdelivr.net/npm/bootstrap@5.1.3/dist/js/bootstrap.bundle.min.js" integrity="sha384-ka7Sk0Gln4gmtz2MlQnikT1wXgYsOg+OMhuP+IlRH9sENBO0LRn5q+8nbTov4+1p" crossorigin="anonymous"></script>
<script src="https://code.jquery.com/jquery-3.6.0.min.js"></script>
</head>
<body>"""
iframe_html += scene_css
iframe_html += """
<div class="d-flex flex-row justify-content-center">
<div class="btn-group btn-group-toggle" data-bs-toggle="buttons">
<label class="btn btn-secondary active">
<input type="checkbox" id="tooltip_button" checked autocomplete="off" onclick="toggle_tooltips(this);"> Show tooltip with information of individuals
</label>
<label class="btn btn-secondary active">
<input type="checkbox" id="descr_button" checked autocomplete="off" onclick="toggle_all_ind_relations(this);"> Show full individual relations in tooltip
</label>
</div>
</div>
<script>
var show_tooltips = true
var show_long_ind = true
cps = []
cp_targets = []
cp_targets_per_class = []
function toggle_tooltips(ele) {
ele.parentElement.classList.toggle("active")
show_tooltips = !show_tooltips
}
function toggle_all_ind_relations(ele) {
ele.parentElement.classList.toggle("active")
show_long_ind = !show_long_ind
}
function toggle_cp_targets(targets, state) {
for (let j = 0; j < targets.length; j++) {
var x = mpld3.get_element(targets[j])
if (x) {
if ("path" in x) {
tog = x.path
} else if ("obj" in x) {
tog = x.obj
}
for (var k = 0; k < tog._groups.length; k++) {
for (var l = 0; l < tog._groups[k].length; l++){
if (state) {
tog._groups[k][l].style.display = "block"
} else {
tog._groups[k][l].style.display = "none"
}
}
}
}
}
}
function toggle_cps(state) {
for (let i = 0; i < cp_targets.length; i++) {
toggle_cp_targets(cp_targets[i], state)
}
}
function toggle_cp_class(cp_class, state) {
targets = cp_targets_per_class[cp_class]
toggle_cp_targets(targets, state)
}
</script>
<div class="card m-2">
<div class="card-title d-flex flex-row justify-content-center m-1">
<h5>"""
if len(scene.inTimePosition) > 0 and len(scene.inTimePosition[0].numericPosition) > 0:
time = "%.2f s" % scene.inTimePosition[0].numericPosition[0]
if scenario_inst and len(scenario_inst.hasEnd) > 0 and len(scenario_inst.hasEnd[0].inTimePosition) > 0 and \
len(scenario_inst.hasEnd[0].inTimePosition[0].numericPosition) > 0:
time += " / %.2f s" % scenario_inst.hasEnd[0].inTimePosition[0].numericPosition[0]
else:
time += " / " + str(len(scenes))
else:
time = str(i) + " / " + str(len(scenes))
iframe_html += "Scene " + time + "<br />"
iframe_html += """
</h5>
</div>
<div class="card-body m-0 p-0 d-flex justify-content-center">
"""
scene_html = mpld3.fig_to_html(fig)
iframe_html += ''.join("\t\t"+line+"\n" for line in scene_html.splitlines())
iframe_html += """
</div>
</div>"""
if len(no_geo_entities) > 0:
iframe_html += """
<div class="d-flex flex-row justify-content-center">
<a class="btn btn-primary" data-bs-toggle="collapse" href="#noGeoCollapse" role="button" aria-expanded="false" aria-controls="noGeoCollapse">
Show scene individuals with no geometric representation (%s)
</a>
</div>
<div class="container-fluid collapse" id="noGeoCollapse">
<div class="card card-body m-2">""" % str(len(no_geo_entities))
iframe_html += "".join(no_geo_entities)
iframe_html += """
</div>
</div>"""
iframe_html += "\t</body>\n</html>"
iframes.append(iframe_html)
html += "\t\t\t\t</div>\n\t\t\t</div>\n\t\t</div>"
pl_html.append(html)
# Assemble main HTML
pl_html.append("\n\t</body>\n</html>")
# Write main HTML to index.html
tmp_dir = tempfile.mkdtemp()
index_path = tmp_dir + "/index.html"
with open(index_path, "w") as file:
for html in pl_html:
file.write(html)
# Write each scene HTML to a single file
for i, iframe in enumerate(iframes):
frame_path = tmp_dir + "/scene" + str(i + 1) + ".html"
with open(frame_path, "w") as file:
for html in iframe:
file.write(html)
# Starts webserver
os.chdir(tmp_dir)
threading.Thread(target=socketserver.TCPServer(("", 8000),
http.server.SimpleHTTPRequestHandler).serve_forever).start()
logger.info("Visualization is available at: http://localhost:8000")
webbrowser.open("http://localhost:8000")
return tmp_dir
def _describe_entity(entity):
"""
Describes the given traffic entity as an HTML list.
:param entity: An object of an owlready2 class.
:return: The HTML-representation of entity.
"""
cls = phenomena_extraction.get_most_specific_classes([entity])
label = "<table class=\"m-2\"><thead><tr><th>Individual</th><th>" + str(entity)
label += " (" + ", ".join(cls[0][1]) + ")</th></tr></thead><tbody><tr><td>is_a</td><td>"
label += ", ".join([str(x) for x in entity.is_a])
label += "</td></tr>"
for prop in entity.get_properties():
if str(prop.python_name) not in _NO_PRINTING_PROPERTIES:
label += "<tr>"
label += "<td>"
label += str(prop.python_name)
label += "</td>"
label += "<td>"
label += ", ".join([str(x) for x in prop[entity][:_MAX_PROPS_DISPLAY]])
if len(prop[entity]) > _MAX_PROPS_DISPLAY:
label += "<text class=\"extended_ind_props\">"
label += ", ".join([str(x) for x in prop[entity][_MAX_PROPS_DISPLAY:]]) + "</text>"
label += "<text class=\"extended_ind_props_dots\" style=\"display: none;\">...</text>"
label += "</td>"
label += "</tr>"
label += "</tbody></table>"
return label
def _describe_cp(cp):
label = "<table class=\"m-2\"><thead><tr><th>Criticality Phenomenon</th><th>" + \
str(cp.predicate).split("(")[1].replace(")", "")
label += "</th></tr></thead><tbody><tr><td>Start time</td><td>"
time = cp.at_time()
if isinstance(time, tuple):
label += str(time[0])
else:
label += str(time)
label += "</td></tr><tr><td>End time</td><td>"
if isinstance(time, tuple):
label += str(time[1])
else:
label += str(time)
label += "</td></tr><tr><td>Subject(s)</td><td>"
if len(cp.subjects) > 0:
subj_and_classes = phenomena_extraction.get_most_specific_classes(cp.subjects)
label += "<br />".join([str(x[0]) + " (" + ", ".join(x[1]) + ")" for x in subj_and_classes])
label += "</td></tr><tr><td>Predicate</td><td>"
label += str(cp.predicate)
label += "</td></tr><tr><td>Object(s)</td><td>"
if len(cp.objects) > 0:
for obj_predicate in cp.objects.keys():
obj_and_classes = phenomena_extraction.get_most_specific_classes(cp.objects[obj_predicate])
label += obj_predicate + ":<br/>" + "<br />".join([str(x[0]) + " (" + ", ".join(x[1]) + ")" for x in
obj_and_classes])
if len(cp.objects.keys()) > 1:
label += "<br/>"
label += "</td></tr>"
label += "</tbody></table>"
return label
#################
# MPLD3 Plugins #
#################
class ToolTipAndClickInfo(mpld3.plugins.PointHTMLTooltip):
# Handles:
# 1. the criticality phenomena toggling when clicking on CP subjects (red circles)
# 2. the mouse-overs when hovering over subjects
# 3. the Ctrl+Click new window action when clicking on subjects
JAVASCRIPT = """
var scene_css = `""" + scene_css + """`
mpld3.register_plugin("htmltooltip", HtmlTooltipPlugin);
HtmlTooltipPlugin.prototype = Object.create(mpld3.Plugin.prototype);
HtmlTooltipPlugin.prototype.constructor = HtmlTooltipPlugin;
HtmlTooltipPlugin.prototype.requiredProps = ["id"];
HtmlTooltipPlugin.prototype.defaultProps = {labels:null,
targets_per_cp:null,
cps:null,
hoffset:0,
voffset:10,
targets:null};
function HtmlTooltipPlugin(fig, props){
mpld3.Plugin.call(this, fig, props);
};
HtmlTooltipPlugin.prototype.draw = function(){
var obj = mpld3.get_element(this.props.id)
var labels = this.props.labels
cps = obj.elements()
cp_targets = this.props.targets
cp_targets_per_class = this.props.targets_per_cp
cp_predicates = this.props.cps
var tooltip = d3.select("body").append("div")
.attr("class", "mpld3-tooltip")
.style("position", "absolute")
.style("z-index", "10")
.style("visibility", "hidden");
function show_cp(d, i) {
if (!window.event.ctrlKey) {
for (let j = 0; j < cp_targets[i].length; j++) {
var x = mpld3.get_element(cp_targets[i][j]);
if (x) {
if ("path" in x) {
tog = x.path
} else if ("obj" in x) {
tog = x.obj
}
for (var k = 0; k < tog._groups.length; k++){
for (var l = 0; l < tog._groups[k].length; l++){
if (tog._groups[k][l].style.display === "none"){
tog._groups[k][l].style.display = "block"
} else {
tog._groups[k][l].style.display = "none"
}
}
}
}
}
}
}
obj.elements()
.on("mouseover", function(d, i) {
if (show_tooltips) {
tooltip.html(labels[i]).style("visibility", "visible");
var long_descrs = document.getElementsByClassName("extended_ind_props")
var dots_descrs = document.getElementsByClassName("extended_ind_props_dots")
for (let i = 0; i < long_descrs.length; i++) {
if(!show_long_ind) {
long_descrs[i].style.display = "none";
} else {
long_descrs[i].style.display = "inline";
}
}
for (let i = 0; i < dots_descrs.length; i++) {
if(!show_long_ind) {
dots_descrs[i].style.display = "inline";
} else {
dots_descrs[i].style.display = "none";
}
}
}
})
.on("mousemove", function(d, i) {
tooltip
.style("top", d3.event.pageY + this.props.voffset + "px")
.style("left",d3.event.pageX + this.props.hoffset + "px");
}.bind(this))
.on("mousedown.callout", show_cp)
.on("mouseout", function(d, i){
tooltip.style("visibility", "hidden");
})
.on("click", function(d, i) {
if (window.event.ctrlKey) {
var newWindow = window.open();
newWindow.document.write(
`<link href="https://cdn.jsdelivr.net/npm/bootstrap@5.1.3/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-1BmE4kWBq78iYhFldvKuhfTAU6auU8tT94WrHftjDbrCEXSU1oBoqyl2QvZ6jIW3" crossorigin="anonymous">` + scene_css + tooltip.html(labels[i])._groups[0][0].innerHTML
);
}
});
};
"""
def __init__(self, points, labels=None, targets=None, targets_per_cp=None, hoffset=0, voffset=10, css=None):
targets_ = []
for x in targets or []:
x_ = []
for y in x:
x_.append(mpld3.utils.get_id(y))
targets_.append(x_)
self.targets_per_cp = []
self.cps = []
if targets_per_cp:
self.cps = sorted(targets_per_cp.keys(), key=natural_sort_key)
for cp in self.cps:
x_ = []
for y in targets_per_cp[cp]:
x_.append(mpld3.utils.get_id(y))
self.targets_per_cp.append(x_)
super().__init__(points, labels, targets_, hoffset, voffset, css)
self.dict_["targets_per_cp"] = self.targets_per_cp
self.dict_["cps"] = self.cps
class CPTooltip(mpld3.plugins.PluginBase):
# Handles the Ctrl+Click action on criticality phenomena ID (opens a new tab).
JAVASCRIPT = """
var scene_css = `""" + scene_css + """`
mpld3.register_plugin("cpstooltip", CPTooltip);
CPTooltip.prototype = Object.create(mpld3.Plugin.prototype);
CPTooltip.prototype.constructor = CPTooltip;
CPTooltip.prototype.requiredProps = ["id", "tooltip_html"];
function CPTooltip(fig, props){
mpld3.Plugin.call(this, fig, props);
};
CPTooltip.prototype.draw = function(){
var obj = mpld3.get_element(this.props.id);
var tooltip_html = this.props.tooltip_html;
var tooltip = d3.select("body").append("div")
.attr("class", "cp-tooltip")
.style("position", "absolute")
.style("z-index", "10")
.style("visibility", "hidden");
obj.obj._groups[0][0].onmouseover = function(d, i) {
tooltip.html(tooltip_html).style("visibility", "visible");
};
obj.obj._groups[0][0].onmousemove = function(d, i) {
tooltip
.style("top", d.clientY + 10 + "px")
.style("left", d.clientX + 0 + "px");
}.bind(this);
obj.obj._groups[0][0].onclick = function(d, i) {
if (window.event.ctrlKey) {
var newWindow = window.open();
newWindow.document.write(
`<link href="https://cdn.jsdelivr.net/npm/bootstrap@5.1.3/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-1BmE4kWBq78iYhFldvKuhfTAU6auU8tT94WrHftjDbrCEXSU1oBoqyl2QvZ6jIW3" crossorigin="anonymous">` + scene_css + tooltip_html
);
}
};
obj.obj._groups[0][0].onmouseout = function(d, i) {
tooltip.style("visibility", "hidden");
};
}
"""
def __init__(self, text, cp):
tooltip_html = _describe_cp(cp)
self.dict_ = {"type": "cpstooltip",
"id": mpld3.utils.get_id(text),
"tooltip_html": tooltip_html}
def _has_collision_with_bbs(existing_bbs, new_bb):
"""
Checks if the new rectangle (new_bb) collides with some existing rectangles.
"""
a_left = min([x[0] for x in new_bb])
a_right = max([x[0] for x in new_bb])
a_bottom = min([x[1] for x in new_bb])
a_top = max([x[1] for x in new_bb])
for bb in existing_bbs:
b_left = min([x[0] for x in bb])
b_right = max([x[0] for x in bb])
b_bottom = min([x[1] for x in bb])
b_top = max([x[1] for x in bb])
if a_left <= b_right and b_left <= a_right and a_top >= b_bottom and b_top >= a_bottom:
return True
return False
| 49.091995
| 298
| 0.497785
|
import logging
import math
import random
import threading
import http.server
import socketserver
import os
import re
from shapely import wkt
import matplotlib.pyplot as plt
import mpld3
import screeninfo
import tempfile
import webbrowser
import owlready2
from shapely import geometry
import numpy as np
from tqdm import tqdm
import time as pytime
import auto.auto
from criticality_recognition import phenomena_extraction
ng_property", "traffic_entity_property",
"automotive_urban_traffic_property", "L1_property", "L2_property", "L3_property",
"L4_property", "L5_property", "L6_property", "traffic_model_element_property",
"criticality_phenomenon_as_object_property", "has_positional_relation",
"has_spatial_relation", "has_dynamical_relation", "SF_spatial_relation",
"performance_spatial_relation", "EH_spatial_relation", "RCC8_spatial_relation", "rcc8dc",
"ehDisjoint"}
_MAX_PROPS_DISPLAY = 4
_AVOID_LABEL_COLLISIONS = False
logger = logging.getLogger(__name__)
def natural_sort_key(s, _nsre=re.compile("([0-9]+)")):
return [int(text) if text.isdigit() else text.lower() for text in _nsre.split(str(s))]
svg * {
font-size: 4pt;
}
table {
border: solid 1px #DDEEEE;
border-collapse: collapse;
border-spacing: 0;
font: normal 8px, sans-serif;
}
thead th {
background-color: #DDEFEF;
border: solid 1px #DDEEEE;
color: #336B6B;
padding: 3px;
text-align: left;
text-shadow: 1px 1px 1px #fff;
font-size: 10pt;
}
tbody td {
background-color: #FFFFFF;
border: solid 1px #DDEEEE;
color: #333;
padding: 3px;
text-shadow: 1px 1px 1px #fff;
font-size: 8pt;
}
.cp-tooltip {}
</style>
"""
scenario_css = """
<style>
.slider {
-webkit-appearance: none; /* Override default CSS styles */
appearance: none;
width: 100%; /* Full-width */
height: 25px; /* Specified height */
background: #d3d3d3; /* Grey background */
outline: none; /* Remove outline */
opacity: 0.7; /* Set transparency (for mouse-over effects on hover) */
-webkit-transition: .2s; /* 0.2 seconds transition on hover */
transition: opacity .2s;
}
.slider:hover {
opacity: 1; /* Fully shown on mouse-over */
}
.slider::-webkit-slider-thumb {
-webkit-appearance: none; /* Override default look */
appearance: none;
width: 25px; /* Set a specific slider handle width */
height: 25px; /* Slider handle height */
background: #04AA6D; /* Green background */
cursor: pointer; /* Cursor on hover */
}
.slider::-moz-range-thumb {
width: 25px; /* Set a specific slider handle width */
height: 25px; /* Slider handle height */
background: #04AA6D; /* Green background */
cursor: pointer; /* Cursor on hover */
}
</style>"""
def visualize_scenario(scenario, cps=None):
pl_html = []
scenario_inst = None
if cps is None:
cps = []
if type(scenario) == list:
scenes = [scene_world.search(type=auto.auto.get_ontology(auto.auto.Ontology.Traffic_Model, scene_world).Scene)
[0] for scene_world in scenario]
elif type(scenario) == owlready2.namespace.World or type(scenario) == owlready2.World:
tm = auto.auto.get_ontology(auto.auto.Ontology.Traffic_Model, scenario)
scenario_inst = scenario.search(type=tm.Scenario)[0]
scenes = list(filter(lambda x: tm.Scene in x.is_a, scenario_inst.has_traffic_model))
else:
raise ValueError
scenes = sorted(scenes, key=lambda x: x.inTimePosition[0].numericPosition[0])
title = "Scenario"
if scenario_inst and hasattr(scenario_inst, "identifier") and len(scenario_inst.identifier) > 0:
title += " " + str(scenario_inst.identifier[0])
scenario_info = "(" + str(len(scenes)) + " Scenes)"
html_body = """<!DOCTYPE html>
<html>
<head>
<link href="https://cdn.jsdelivr.net/npm/bootstrap@5.1.3/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-1BmE4kWBq78iYhFldvKuhfTAU6auU8tT94WrHftjDbrCEXSU1oBoqyl2QvZ6jIW3" crossorigin="anonymous">
<script src="https://cdn.jsdelivr.net/npm/bootstrap@5.1.3/dist/js/bootstrap.bundle.min.js" integrity="sha384-ka7Sk0Gln4gmtz2MlQnikT1wXgYsOg+OMhuP+IlRH9sENBO0LRn5q+8nbTov4+1p" crossorigin="anonymous"></script>
<script src="https://code.jquery.com/jquery-3.6.0.min.js"></script>
<meta charset="utf-8">""" + scenario_css + """
<title>""" + title + """</title>
</head>
<body>
<div class=\"d-flex flex-row justify-content-center\"><div class=\"mt-3 py-1 px-6 alert alert-info\" style=\"display: inline-block\" role=\"alert\"><center><h5>""" + title + """ """ + scenario_info + """</h5></center></div></div>
<div class="slidecontainer m-2">
<input type="range" min="1" max=\"""" + str(len(scenes)) + """\" value="1" class="slider" id="myRange">
</div>
<script>
var slider = document.getElementById("myRange");
var last_set = 1
var show_all_cps = true
slider.oninput = function() {
var output = document.getElementById("plt" + this.value);
var last_output = document.getElementById("plt" + last_set);
last_output.style.display = 'none';
output.style.display = 'block';
last_set = this.value
}
function toggle_cps_all_iframes() {
show_all_cps = !show_all_cps
$(".cp-all-button").each(function(i) {
if (show_all_cps) {
this.parentElement.classList.add("active")
this.checked = true
} else {
this.parentElement.classList.remove("active")
this.checked = false
}
})
$(".cp-button").each(function(i) {
if (show_all_cps) {
this.parentElement.classList.add("active")
this.checked = true
} else {
this.parentElement.classList.remove("active")
this.checked = false
}
})
$(".scene-plot").each(function(i) {
this.contentWindow.toggle_cps(show_all_cps)
})
}
function toggle_cp_class(ele, cp_cls_id) {
// 0. disable automatically checked checkbox (will be added again at step 3)
ele.checked = !ele.checked
// 1. find active scene plot
active_scene = $(".scene-plot-container").filter(function(i) {
return this.style.display !== "none"
})[0]
// 2. get CP pred. str for given cp_cls_id
cp_pred = active_scene.getElementsByClassName("scene-plot")[0].contentWindow.cp_predicates[cp_cls_id]
// 3. Toggle all buttons for this CP pred
$("label > span:contains(" + cp_pred + ")").each(function(i) {
this.parentElement.classList.toggle("active")
this.parentElement.querySelector(".cp-button").checked = !this.parentElement.querySelector(".cp-button").checked
})
// 4. check if (and where) CP pred. str is present in cp_predicates, pass the resulting index
$(".scene-plot").each(function(k) {
cp_cls_id_scene = -1
for (var i = 0; i < this.contentWindow.cp_predicates.length; i++) {
if (cp_pred === this.contentWindow.cp_predicates[i]) {
cp_cls_id_scene = i
}
}
if (cp_cls_id_scene >= 0) {
this.contentWindow.toggle_cp_class(cp_cls_id_scene, ele.checked)
}
})
}
</script>
"""
pl_html.append(html_body)
iframes = []
def get_color(p):
_LUMA_LIMIT = 170
color = 0
luma = _LUMA_LIMIT
while luma >= _LUMA_LIMIT:
color = random.randrange(0, 0xFFFFFF, 0xF)
luma = 0.2126 * ((color >> 16) & 0xff) + 0.7152 * ((color >> 8) & 0xff) + 0.0722 * ((color >> 0) & 0xff)
return "#" + "%06x" % color
for i, scene in enumerate(scenes):
logger.info("Plotting scene " + str(i + 1) + " / " + str(len(scenes)))
scene_cps = [cp for cp in cps if cp.is_representable_in_scene(scene)]
cp_colors = list(map(get_color, range(len([x for c in scene_cps for x in c.subjects]))))
cp_color = 0
no_geo_entities = []
width = 24.5
height = 10
try:
primary_screens = list(filter(lambda x: x.is_primary, screeninfo.get_monitors()))
if len(primary_screens) > 0:
width = (primary_screens[0].width_mm / 25.4) * 0.73
height = (primary_screens[0].height_mm / 25.4) * 0.73
except screeninfo.common.ScreenInfoError:
logger.info("No screens found, using default plot size of " + str(width) + " in x " + str(height) + " in")
fig = plt.figure(figsize=(width, height))
plt.axis("equal")
entity_labels = []
entity_relations = []
relations_per_cp_class = dict()
cps_relations = []
cps_for_tooltips = []
centroids_x = []
centroids_y = []
plotted_labels = []
entity_points = dict()
traffic_entities = tqdm(scene.has_traffic_entity)
for entity in traffic_entities:
traffic_entities.set_description(str(entity))
if len(entity.hasGeometry) > 0:
for geo in entity.hasGeometry:
shape = wkt.loads(geo.asWKT[0])
entity_cp_relations = []
points = None
if hasattr(shape, "exterior"):
points = shape.exterior.xy
try:
hasattr(shape, "coords")
points = shape.coords.xy
except NotImplementedError:
pass
if points:
if (np.isclose(centroids_x, shape.centroid.x) & np.isclose(centroids_y, shape.centroid.y))\
.any():
x = shape.centroid.x + 0.0
y = shape.centroid.y + 0.8
plt.plot((shape.centroid.x, x), (shape.centroid.y, y), "k-")
else:
x = shape.centroid.x
y = shape.centroid.y
entity_points[entity] = (x, y)
centroids_x.append(x)
centroids_y.append(y)
plt.plot(*points, alpha=.6)
if auto.auto.get_ontology(auto.auto.Ontology.Physics, scenario).Dynamical_Object in \
entity.INDIRECT_is_a:
plt.fill(*points, alpha=.3)
if entity.has_yaw is not None:
x_dir = (0.9 * math.cos(math.radians(entity.has_yaw)))
y_dir = (0.9 * math.sin(math.radians(entity.has_yaw)))
plt.arrow(shape.centroid.x, shape.centroid.y, dx=x_dir, dy=y_dir, shape="full",
length_includes_head=True, color="gray", alpha=0.6, head_width=1)
entity_labels.append(_describe_entity(entity))
entity_scene_cps = list(filter(lambda scp: entity in scp.subjects, scene_cps))
if len(entity_scene_cps) > 0:
plt.plot(x, y, "o", color="r", mec="k", markersize=3, alpha=1)
ent_color = "red"
else:
ent_color = "black"
if entity.identifier and len(entity.identifier) > 0 and not entity.is_persistent and not \
(isinstance(entity.identifier[0], str) and entity.identifier[0].startswith("repr")):
plt.annotate(entity.identifier[0], (x+0.2, y+0.2), color=ent_color)
already_drawn_cps = []
for cp in entity_scene_cps:
if cp.predicate not in relations_per_cp_class.keys():
relations_per_cp_class[cp.predicate] = []
for cp in entity_scene_cps:
if cp not in already_drawn_cps:
same_line_cps = [x for x in entity_scene_cps if
[y for z in x.objects.values() for y in z] ==
[y for z in cp.objects.values() for y in z]]
labels = [(x.predicate.split("(")[0],
(x.predicate.split("(")[1].replace(")", ""), str(x)))
for x in same_line_cps]
already_drawn_cps += same_line_cps
subj_x = x
subj_y = y
for objs in cp.objects.values():
for obj in objs:
if len(obj.hasGeometry) > 0:
if obj in entity_points.keys():
obj_x = entity_points[obj][0]
obj_y = entity_points[obj][1]
else:
geom_o = wkt.loads(obj.hasGeometry[0].asWKT[0])
obj_x = geom_o.centroid.x
obj_y = geom_o.centroid.y
m = (obj_y - subj_y) / (obj_x - subj_x)
b = subj_y - m * subj_x
head_width = 0.2
head_length = 1.5 * head_width
arrow = plt.arrow(subj_x, subj_y, dx=(obj_x - subj_x), dy=(obj_y - subj_y),
color=cp_colors[cp_color], shape="full",
length_includes_head=True, head_width=head_width,
head_length=head_length)
if len(labels[0]) > 1:
label_row = " ".join([label[0] for label in labels])
else:
label_row = labels[0]
x_offset = (len(label_row) * 0.055) / 2 - 0.055
if subj_x > obj_x:
label_x = obj_x + abs(subj_x - obj_x) / 2 - x_offset
else:
label_x = obj_x - abs(subj_x - obj_x) / 2 - x_offset
a = math.degrees(math.atan(m))
for l_i, label in enumerate(labels):
label_string = label[0].replace("CP_", "")
label_len = (len(label_string) * 0.09 + 0.1)
label_x_offset = abs(math.cos(math.atan(m)) * label_len)
while True:
label_y = m * label_x + b + 0.05
label_x_1 = label_x - label_x_offset / 2 + 0.05
label_y_1 = m * label_x_1 + b
label_x_2 = label_x + label_x_offset / 2 + 0.05
label_y_2 = m * label_x_2 + b
label_line1 = geometry.LineString([(label_x_1, label_y_1),
(label_x_2, label_y_2)])
new_bb = label_line1.buffer(0.1, cap_style=2)
new_bb_rect = list(zip(*new_bb.exterior.xy))[:-1]
if not _AVOID_LABEL_COLLISIONS or not \
_has_collision_with_bbs(plotted_labels, new_bb_rect):
break
label_x += label_x_offset / 10
annot = plt.annotate(label_string,
(label_x, label_y), color=cp_colors[cp_color],
rotation=a, fontsize=2, rotation_mode="anchor")
entity_cp_relations.append(annot)
cps_relations.append(annot)
relations_per_cp_class[same_line_cps[l_i].predicate] += [annot, arrow]
cps_for_tooltips.append(same_line_cps[l_i])
plotted_labels.append(new_bb_rect)
label_x += label_x_offset
subj_x = obj_x
subj_y = obj_y
entity_cp_relations += [arrow]
cp_color = (cp_color + 1) % len(cp_colors)
entity_relations.append(entity_cp_relations)
elif len(set([str(y) for y in entity.INDIRECT_is_a]).intersection(_NO_PRINTING_CLASSES)) == 0:
no_geo_entities.append(_describe_entity(entity))
logger.info("Done with layout, creating MPLD3 plot, JS plugins, and HTML string")
pl2 = plt.plot(centroids_x, centroids_y, "o", color="b", mec="k", markersize=2, mew=1, alpha=.4)
tooltip_individuals = ToolTipAndClickInfo(pl2[0], labels=entity_labels, targets=entity_relations,
targets_per_cp=relations_per_cp_class)
fig.tight_layout()
mpld3.plugins.connect(fig, tooltip_individuals)
for h, cp_text in enumerate(cps_relations):
tooltip_cp = CPTooltip(cp_text, cps_for_tooltips[h])
mpld3.plugins.connect(fig, tooltip_cp)
html = "\n\t\t<div class=\"container-fluid scene-plot-container\" id=\"plt" + str(i + 1) + "\" style =\""
if i != 0:
html += "display: none;"
html += "\">"
html += """
<div class="row">
<div class="col-md-1">
"""
cp_count_total = len([x for x in cps if (isinstance(x.traffic_model, list) and scene in x.traffic_model) or
x.traffic_model == scenario_inst])
html += """<div class="">
<label class="btn btn-primary active" style="margin-bottom: 10px; width: %s">
<input type="checkbox" class="cp-all-button" id="cp-all-button-%s" autocomplete="off" onclick="toggle_cps_all_iframes();" checked>
<span>Show all criticality phenomena (%s)</span>
</label>""" % ("100%", str(i), str(cp_count_total))
for l, pred in enumerate(sorted(relations_per_cp_class.keys(), key=natural_sort_key)):
cp_count = len([x for x in cps if x.predicate == pred and ((isinstance(x.traffic_model, list) and
scene in x.traffic_model) or x.traffic_model == scenario_inst)])
html += """
<br />
<label class="btn btn-secondary active" style="margin-bottom: 5px; width: %s">
<input type="checkbox" class="cp-button" id="cp-button-%s-%s" autocomplete="off" onclick="toggle_cp_class(this, %s);" checked>
<span>%s (%s)</span>
</label>""" % ("100%", str(i), str(l), str(l), pred, str(cp_count))
html += """
</div>
</div>
<div class="col-md-11">
"""
html += "<div class=\"embed-responsive embed-responsive-16by9\">\n"
html += "\t\t\t\t\t\t<iframe class=\"scene-plot\" src=\"scene" + str(i + 1) + ".html\" class=\"embed-responsive-item\" style=\"width: 100%; height: " + str(height*1.27) + "in\" allowfullscreen></iframe>\n\t\t\t\t\t</div>\n"
iframe_html = """<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<meta HTTP-EQUIV="Access-Control-Allow-Origin" CONTENT="localhost">
<link href="https://cdn.jsdelivr.net/npm/bootstrap@5.1.3/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-1BmE4kWBq78iYhFldvKuhfTAU6auU8tT94WrHftjDbrCEXSU1oBoqyl2QvZ6jIW3" crossorigin="anonymous">
<script src="https://cdn.jsdelivr.net/npm/bootstrap@5.1.3/dist/js/bootstrap.bundle.min.js" integrity="sha384-ka7Sk0Gln4gmtz2MlQnikT1wXgYsOg+OMhuP+IlRH9sENBO0LRn5q+8nbTov4+1p" crossorigin="anonymous"></script>
<script src="https://code.jquery.com/jquery-3.6.0.min.js"></script>
</head>
<body>"""
iframe_html += scene_css
iframe_html += """
<div class="d-flex flex-row justify-content-center">
<div class="btn-group btn-group-toggle" data-bs-toggle="buttons">
<label class="btn btn-secondary active">
<input type="checkbox" id="tooltip_button" checked autocomplete="off" onclick="toggle_tooltips(this);"> Show tooltip with information of individuals
</label>
<label class="btn btn-secondary active">
<input type="checkbox" id="descr_button" checked autocomplete="off" onclick="toggle_all_ind_relations(this);"> Show full individual relations in tooltip
</label>
</div>
</div>
<script>
var show_tooltips = true
var show_long_ind = true
cps = []
cp_targets = []
cp_targets_per_class = []
function toggle_tooltips(ele) {
ele.parentElement.classList.toggle("active")
show_tooltips = !show_tooltips
}
function toggle_all_ind_relations(ele) {
ele.parentElement.classList.toggle("active")
show_long_ind = !show_long_ind
}
function toggle_cp_targets(targets, state) {
for (let j = 0; j < targets.length; j++) {
var x = mpld3.get_element(targets[j])
if (x) {
if ("path" in x) {
tog = x.path
} else if ("obj" in x) {
tog = x.obj
}
for (var k = 0; k < tog._groups.length; k++) {
for (var l = 0; l < tog._groups[k].length; l++){
if (state) {
tog._groups[k][l].style.display = "block"
} else {
tog._groups[k][l].style.display = "none"
}
}
}
}
}
}
function toggle_cps(state) {
for (let i = 0; i < cp_targets.length; i++) {
toggle_cp_targets(cp_targets[i], state)
}
}
function toggle_cp_class(cp_class, state) {
targets = cp_targets_per_class[cp_class]
toggle_cp_targets(targets, state)
}
</script>
<div class="card m-2">
<div class="card-title d-flex flex-row justify-content-center m-1">
<h5>"""
if len(scene.inTimePosition) > 0 and len(scene.inTimePosition[0].numericPosition) > 0:
time = "%.2f s" % scene.inTimePosition[0].numericPosition[0]
if scenario_inst and len(scenario_inst.hasEnd) > 0 and len(scenario_inst.hasEnd[0].inTimePosition) > 0 and \
len(scenario_inst.hasEnd[0].inTimePosition[0].numericPosition) > 0:
time += " / %.2f s" % scenario_inst.hasEnd[0].inTimePosition[0].numericPosition[0]
else:
time += " / " + str(len(scenes))
else:
time = str(i) + " / " + str(len(scenes))
iframe_html += "Scene " + time + "<br />"
iframe_html += """
</h5>
</div>
<div class="card-body m-0 p-0 d-flex justify-content-center">
"""
scene_html = mpld3.fig_to_html(fig)
iframe_html += ''.join("\t\t"+line+"\n" for line in scene_html.splitlines())
iframe_html += """
</div>
</div>"""
if len(no_geo_entities) > 0:
iframe_html += """
<div class="d-flex flex-row justify-content-center">
<a class="btn btn-primary" data-bs-toggle="collapse" href="#noGeoCollapse" role="button" aria-expanded="false" aria-controls="noGeoCollapse">
Show scene individuals with no geometric representation (%s)
</a>
</div>
<div class="container-fluid collapse" id="noGeoCollapse">
<div class="card card-body m-2">""" % str(len(no_geo_entities))
iframe_html += "".join(no_geo_entities)
iframe_html += """
</div>
</div>"""
iframe_html += "\t</body>\n</html>"
iframes.append(iframe_html)
html += "\t\t\t\t</div>\n\t\t\t</div>\n\t\t</div>"
pl_html.append(html)
pl_html.append("\n\t</body>\n</html>")
tmp_dir = tempfile.mkdtemp()
index_path = tmp_dir + "/index.html"
with open(index_path, "w") as file:
for html in pl_html:
file.write(html)
for i, iframe in enumerate(iframes):
frame_path = tmp_dir + "/scene" + str(i + 1) + ".html"
with open(frame_path, "w") as file:
for html in iframe:
file.write(html)
os.chdir(tmp_dir)
threading.Thread(target=socketserver.TCPServer(("", 8000),
http.server.SimpleHTTPRequestHandler).serve_forever).start()
logger.info("Visualization is available at: http://localhost:8000")
webbrowser.open("http://localhost:8000")
return tmp_dir
def _describe_entity(entity):
cls = phenomena_extraction.get_most_specific_classes([entity])
label = "<table class=\"m-2\"><thead><tr><th>Individual</th><th>" + str(entity)
label += " (" + ", ".join(cls[0][1]) + ")</th></tr></thead><tbody><tr><td>is_a</td><td>"
label += ", ".join([str(x) for x in entity.is_a])
label += "</td></tr>"
for prop in entity.get_properties():
if str(prop.python_name) not in _NO_PRINTING_PROPERTIES:
label += "<tr>"
label += "<td>"
label += str(prop.python_name)
label += "</td>"
label += "<td>"
label += ", ".join([str(x) for x in prop[entity][:_MAX_PROPS_DISPLAY]])
if len(prop[entity]) > _MAX_PROPS_DISPLAY:
label += "<text class=\"extended_ind_props\">"
label += ", ".join([str(x) for x in prop[entity][_MAX_PROPS_DISPLAY:]]) + "</text>"
label += "<text class=\"extended_ind_props_dots\" style=\"display: none;\">...</text>"
label += "</td>"
label += "</tr>"
label += "</tbody></table>"
return label
def _describe_cp(cp):
label = "<table class=\"m-2\"><thead><tr><th>Criticality Phenomenon</th><th>" + \
str(cp.predicate).split("(")[1].replace(")", "")
label += "</th></tr></thead><tbody><tr><td>Start time</td><td>"
time = cp.at_time()
if isinstance(time, tuple):
label += str(time[0])
else:
label += str(time)
label += "</td></tr><tr><td>End time</td><td>"
if isinstance(time, tuple):
label += str(time[1])
else:
label += str(time)
label += "</td></tr><tr><td>Subject(s)</td><td>"
if len(cp.subjects) > 0:
subj_and_classes = phenomena_extraction.get_most_specific_classes(cp.subjects)
label += "<br />".join([str(x[0]) + " (" + ", ".join(x[1]) + ")" for x in subj_and_classes])
label += "</td></tr><tr><td>Predicate</td><td>"
label += str(cp.predicate)
label += "</td></tr><tr><td>Object(s)</td><td>"
if len(cp.objects) > 0:
for obj_predicate in cp.objects.keys():
obj_and_classes = phenomena_extraction.get_most_specific_classes(cp.objects[obj_predicate])
label += obj_predicate + ":<br/>" + "<br />".join([str(x[0]) + " (" + ", ".join(x[1]) + ")" for x in
obj_and_classes])
if len(cp.objects.keys()) > 1:
label += "<br/>"
label += "</td></tr>"
label += "</tbody></table>"
return label
ototype);
HtmlTooltipPlugin.prototype.constructor = HtmlTooltipPlugin;
HtmlTooltipPlugin.prototype.requiredProps = ["id"];
HtmlTooltipPlugin.prototype.defaultProps = {labels:null,
targets_per_cp:null,
cps:null,
hoffset:0,
voffset:10,
targets:null};
function HtmlTooltipPlugin(fig, props){
mpld3.Plugin.call(this, fig, props);
};
HtmlTooltipPlugin.prototype.draw = function(){
var obj = mpld3.get_element(this.props.id)
var labels = this.props.labels
cps = obj.elements()
cp_targets = this.props.targets
cp_targets_per_class = this.props.targets_per_cp
cp_predicates = this.props.cps
var tooltip = d3.select("body").append("div")
.attr("class", "mpld3-tooltip")
.style("position", "absolute")
.style("z-index", "10")
.style("visibility", "hidden");
function show_cp(d, i) {
if (!window.event.ctrlKey) {
for (let j = 0; j < cp_targets[i].length; j++) {
var x = mpld3.get_element(cp_targets[i][j]);
if (x) {
if ("path" in x) {
tog = x.path
} else if ("obj" in x) {
tog = x.obj
}
for (var k = 0; k < tog._groups.length; k++){
for (var l = 0; l < tog._groups[k].length; l++){
if (tog._groups[k][l].style.display === "none"){
tog._groups[k][l].style.display = "block"
} else {
tog._groups[k][l].style.display = "none"
}
}
}
}
}
}
}
obj.elements()
.on("mouseover", function(d, i) {
if (show_tooltips) {
tooltip.html(labels[i]).style("visibility", "visible");
var long_descrs = document.getElementsByClassName("extended_ind_props")
var dots_descrs = document.getElementsByClassName("extended_ind_props_dots")
for (let i = 0; i < long_descrs.length; i++) {
if(!show_long_ind) {
long_descrs[i].style.display = "none";
} else {
long_descrs[i].style.display = "inline";
}
}
for (let i = 0; i < dots_descrs.length; i++) {
if(!show_long_ind) {
dots_descrs[i].style.display = "inline";
} else {
dots_descrs[i].style.display = "none";
}
}
}
})
.on("mousemove", function(d, i) {
tooltip
.style("top", d3.event.pageY + this.props.voffset + "px")
.style("left",d3.event.pageX + this.props.hoffset + "px");
}.bind(this))
.on("mousedown.callout", show_cp)
.on("mouseout", function(d, i){
tooltip.style("visibility", "hidden");
})
.on("click", function(d, i) {
if (window.event.ctrlKey) {
var newWindow = window.open();
newWindow.document.write(
`<link href="https://cdn.jsdelivr.net/npm/bootstrap@5.1.3/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-1BmE4kWBq78iYhFldvKuhfTAU6auU8tT94WrHftjDbrCEXSU1oBoqyl2QvZ6jIW3" crossorigin="anonymous">` + scene_css + tooltip.html(labels[i])._groups[0][0].innerHTML
);
}
});
};
"""
def __init__(self, points, labels=None, targets=None, targets_per_cp=None, hoffset=0, voffset=10, css=None):
targets_ = []
for x in targets or []:
x_ = []
for y in x:
x_.append(mpld3.utils.get_id(y))
targets_.append(x_)
self.targets_per_cp = []
self.cps = []
if targets_per_cp:
self.cps = sorted(targets_per_cp.keys(), key=natural_sort_key)
for cp in self.cps:
x_ = []
for y in targets_per_cp[cp]:
x_.append(mpld3.utils.get_id(y))
self.targets_per_cp.append(x_)
super().__init__(points, labels, targets_, hoffset, voffset, css)
self.dict_["targets_per_cp"] = self.targets_per_cp
self.dict_["cps"] = self.cps
class CPTooltip(mpld3.plugins.PluginBase):
JAVASCRIPT = """
var scene_css = `""" + scene_css + """`
mpld3.register_plugin("cpstooltip", CPTooltip);
CPTooltip.prototype = Object.create(mpld3.Plugin.prototype);
CPTooltip.prototype.constructor = CPTooltip;
CPTooltip.prototype.requiredProps = ["id", "tooltip_html"];
function CPTooltip(fig, props){
mpld3.Plugin.call(this, fig, props);
};
CPTooltip.prototype.draw = function(){
var obj = mpld3.get_element(this.props.id);
var tooltip_html = this.props.tooltip_html;
var tooltip = d3.select("body").append("div")
.attr("class", "cp-tooltip")
.style("position", "absolute")
.style("z-index", "10")
.style("visibility", "hidden");
obj.obj._groups[0][0].onmouseover = function(d, i) {
tooltip.html(tooltip_html).style("visibility", "visible");
};
obj.obj._groups[0][0].onmousemove = function(d, i) {
tooltip
.style("top", d.clientY + 10 + "px")
.style("left", d.clientX + 0 + "px");
}.bind(this);
obj.obj._groups[0][0].onclick = function(d, i) {
if (window.event.ctrlKey) {
var newWindow = window.open();
newWindow.document.write(
`<link href="https://cdn.jsdelivr.net/npm/bootstrap@5.1.3/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-1BmE4kWBq78iYhFldvKuhfTAU6auU8tT94WrHftjDbrCEXSU1oBoqyl2QvZ6jIW3" crossorigin="anonymous">` + scene_css + tooltip_html
);
}
};
obj.obj._groups[0][0].onmouseout = function(d, i) {
tooltip.style("visibility", "hidden");
};
}
"""
def __init__(self, text, cp):
tooltip_html = _describe_cp(cp)
self.dict_ = {"type": "cpstooltip",
"id": mpld3.utils.get_id(text),
"tooltip_html": tooltip_html}
def _has_collision_with_bbs(existing_bbs, new_bb):
a_left = min([x[0] for x in new_bb])
a_right = max([x[0] for x in new_bb])
a_bottom = min([x[1] for x in new_bb])
a_top = max([x[1] for x in new_bb])
for bb in existing_bbs:
b_left = min([x[0] for x in bb])
b_right = max([x[0] for x in bb])
b_bottom = min([x[1] for x in bb])
b_top = max([x[1] for x in bb])
if a_left <= b_right and b_left <= a_right and a_top >= b_bottom and b_top >= a_bottom:
return True
return False
| true
| true
|
790d5992da630a40a86c8810a5401e44f3416c9f
| 693
|
py
|
Python
|
train.py
|
Wang-jiahao/SimDeblur
|
31d88e1fbec91d5cc9062f4a46538e4ba806ab29
|
[
"MIT"
] | 1
|
2021-04-30T16:47:40.000Z
|
2021-04-30T16:47:40.000Z
|
train.py
|
Wang-jiahao/SimDeblur
|
31d88e1fbec91d5cc9062f4a46538e4ba806ab29
|
[
"MIT"
] | null | null | null |
train.py
|
Wang-jiahao/SimDeblur
|
31d88e1fbec91d5cc9062f4a46538e4ba806ab29
|
[
"MIT"
] | null | null | null |
""" ************************************************
* fileName: train.py
* desc: The training file for SimDeblur,
pay much attention to your constructed configs.
* author: mingdeng_cao
* date: 2021/07/14 17:26
* last revised: Reformat the file
************************************************ """
from simdeblur.config import build_config, merge_args
from simdeblur.engine.parse_arguments import parse_arguments
from simdeblur.engine.trainer import Trainer
def main():
args = parse_arguments()
cfg = build_config(args.config_file)
cfg = merge_args(cfg, args)
cfg.args = args
trainer = Trainer(cfg)
trainer.train()
if __name__ == "__main__":
main()
| 23.896552
| 60
| 0.613276
|
from simdeblur.config import build_config, merge_args
from simdeblur.engine.parse_arguments import parse_arguments
from simdeblur.engine.trainer import Trainer
def main():
args = parse_arguments()
cfg = build_config(args.config_file)
cfg = merge_args(cfg, args)
cfg.args = args
trainer = Trainer(cfg)
trainer.train()
if __name__ == "__main__":
main()
| true
| true
|
790d59ca15f689024a7543b37ff6b13ba78de648
| 5,051
|
py
|
Python
|
buffer/in-vicinity-python/hci/PySide/TopPhonon/capabilities.py
|
zaqwes8811/coordinator-tasks
|
7f63fdf613eff5d441a3c2c7b52d2a3d02d9736a
|
[
"MIT"
] | null | null | null |
buffer/in-vicinity-python/hci/PySide/TopPhonon/capabilities.py
|
zaqwes8811/coordinator-tasks
|
7f63fdf613eff5d441a3c2c7b52d2a3d02d9736a
|
[
"MIT"
] | 15
|
2015-03-07T12:46:41.000Z
|
2015-04-11T09:08:36.000Z
|
buffer/in-vicinity-python/hci/PySide/TopPhonon/capabilities.py
|
zaqwes8811/micro-apps
|
7f63fdf613eff5d441a3c2c7b52d2a3d02d9736a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2007-2008 Trolltech ASA. All rights reserved.
##
## This file is part of the example classes of the Qt Toolkit.
##
## Licensees holding a valid Qt License Agreement may use this file in
## accordance with the rights, responsibilities and obligations
## contained therein. Please consult your licensing agreement or
## contact sales@trolltech.com if any conditions of this licensing
## agreement are not clear to you.
##
## Further information about Qt licensing is available at:
## http://www.trolltech.com/products/qt/licensing.html or by
## contacting info@trolltech.com.
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
#############################################################################
import sys
from PySide import QtCore, QtGui
try:
from PySide.phonon import Phonon
except ImportError:
app = QtGui.QApplication(sys.argv)
QtGui.QMessageBox.critical(None, "Phonon Capabilities",
"Your Qt installation does not have Phonon support.",
QtGui.QMessageBox.Ok | QtGui.QMessageBox.Default,
QtGui.QMessageBox.NoButton)
sys.exit(1)
class Window(QtGui.QWidget):
def __init__(self):
super(QtGui.QWidget, self).__init__()
self.setupUi()
self.updateWidgets()
notifier = Phonon.BackendCapabilities.notifier()
notifier.capabilitiesChanged.connect(self.updateWidgets)
notifier.availableAudioOutputDevicesChanged.connect(self.updateWidgets)
def updateWidgets(self):
# Output devices.
devices = Phonon.BackendCapabilities.availableAudioOutputDevices()
model = Phonon.AudioOutputDeviceModel(devices)
self.devicesListView.setModel(model)
# MIME types.
self.mimeListWidget.clear()
for mimeType in Phonon.BackendCapabilities.availableMimeTypes():
item = QtGui.QListWidgetItem(self.mimeListWidget)
item.setText(mimeType)
# Effects.
self.effectsTreeWidget.clear()
for effect in Phonon.BackendCapabilities.availableAudioEffects():
item = QtGui.QTreeWidgetItem(self.effectsTreeWidget)
item.setText(0, "Effect")
item.setText(1, effect.name())
item.setText(2, effect.description())
# Effects parameters.
for parameter in Phonon.Effect(effect, self).parameters():
defaultValue = parameter.defaultValue()
minimumValue = parameter.minimumValue()
maximumValue = parameter.maximumValue()
valueString = "%s / %s / %s" % (defaultValue, minimumValue, maximumValue)
parameterItem = QtGui.QTreeWidgetItem(item)
parameterItem.setText(0, "Parameter")
parameterItem.setText(1, parameter.name())
parameterItem.setText(2, parameter.description())
parameterItem.setText(3, str(parameter.type()))
parameterItem.setText(4, valueString)
for i in range(self.effectsTreeWidget.columnCount()):
if i == 0:
self.effectsTreeWidget.setColumnWidth(0, 150)
elif i == 2:
self.effectsTreeWidget.setColumnWidth(2, 350)
else:
self.effectsTreeWidget.resizeColumnToContents(i)
def setupUi(self):
self.setupBackendBox()
layout = QtGui.QVBoxLayout()
layout.addWidget(self.backendBox)
self.setLayout(layout)
self.setWindowTitle("Backend Capabilities Example")
def setupBackendBox(self):
self.devicesLabel = QtGui.QLabel("Available Audio Devices:")
self.devicesListView = QtGui.QListView()
self.mimeTypesLabel = QtGui.QLabel("Supported MIME Types:")
self.mimeListWidget = QtGui.QListWidget()
self.effectsLabel = QtGui.QLabel("Available Audio Effects:")
headerLabels = ("Type", "Name", "Description", "Value Type",
"Default/Min/Max Values")
self.effectsTreeWidget = QtGui.QTreeWidget()
self.effectsTreeWidget.setHeaderLabels(headerLabels)
self.effectsTreeWidget.setColumnCount(5)
layout = QtGui.QGridLayout()
layout.addWidget(self.devicesLabel, 0, 0)
layout.addWidget(self.devicesListView, 1, 0)
layout.addWidget(self.mimeTypesLabel, 0, 1)
layout.addWidget(self.mimeListWidget, 1, 1)
layout.addWidget(self.effectsLabel, 2, 0)
layout.addWidget(self.effectsTreeWidget, 3, 0, 2, 2)
layout.setRowStretch(3, 100)
self.backendBox = QtGui.QGroupBox("Backend Capabilities")
self.backendBox.setLayout(layout)
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
app.setApplicationName("Phonon Capabilities Example")
window = Window()
window.show()
sys.exit(app.exec_())
| 35.822695
| 89
| 0.643833
| true
| true
|
|
790d59e637abf096a630cb25b5eeb1af0ca229d7
| 3,474
|
py
|
Python
|
trainings/workshop1/step13/network_outage.py
|
jochenparm/moler
|
0253d677e0ef150206758c7991197ba5687d0965
|
[
"BSD-3-Clause"
] | 57
|
2018-02-20T08:16:47.000Z
|
2022-03-28T10:36:57.000Z
|
trainings/workshop1/step13/network_outage.py
|
jochenparm/moler
|
0253d677e0ef150206758c7991197ba5687d0965
|
[
"BSD-3-Clause"
] | 377
|
2018-07-19T11:56:27.000Z
|
2021-07-09T13:08:12.000Z
|
trainings/workshop1/step13/network_outage.py
|
jochenparm/moler
|
0253d677e0ef150206758c7991197ba5687d0965
|
[
"BSD-3-Clause"
] | 24
|
2018-04-14T20:49:40.000Z
|
2022-03-29T10:44:26.000Z
|
import os.path
import time
from moler.config import load_config
from moler.device.device import DeviceFactory
from moler.util.moler_test import MolerTest
def outage_callback(device_name, ping_times):
MolerTest.info("Network outage on {}".format(device_name))
ping_times["lost_connection_time"] = time.time()
def ping_is_on_callback(ping_times):
MolerTest.info("Ping works")
if ping_times["lost_connection_time"] > 0: # ping operable AFTER any net loss
if ping_times["reconnection_time"] == 0:
ping_times["reconnection_time"] = time.time()
outage_time = ping_times["reconnection_time"] - ping_times["lost_connection_time"]
MolerTest.info("Network outage time is {}".format(outage_time))
if outage_time > 3:
MolerTest.error("Network outage duration exceeded threshold")
else:
MolerTest.info("Network outage duration is acceptable")
def test_network_outage():
load_config(config=os.path.abspath('config/my_devices.yml'))
unix1 = DeviceFactory.get_device(name='MyMachine1')
unix2 = DeviceFactory.get_device(name='MyMachine2')
#######################################################
# TEST GOAL: network outage should not exceed 3 seconds
#######################################################
# test setup
ping_times = {"lost_connection_time": 0,
"reconnection_time": 0}
# ensure network is up before running test
net_up = unix2.get_cmd(cmd_name="ifconfig", cmd_params={"options": "lo up"})
sudo_ensure_net_up = unix2.get_cmd(cmd_name="sudo", cmd_params={"password": "moler", "cmd_object": net_up})
sudo_ensure_net_up()
# run event observing "network down/up"
no_ping = unix1.get_event(event_name="ping_no_response", event_params={"till_occurs_times": 1})
no_ping.add_event_occurred_callback(callback=outage_callback,
callback_params={'device_name': 'MyMachine1',
'ping_times': ping_times})
no_ping.start()
ping_is_on = unix1.get_event(event_name="ping_response")
ping_is_on.add_event_occurred_callback(callback=ping_is_on_callback,
callback_params={'ping_times': ping_times})
ping_is_on.start()
# run test
ping = unix1.get_cmd(cmd_name="ping", cmd_params={"destination": "localhost", "options": "-O"})
ping.start(timeout=120)
time.sleep(3)
ifconfig_down = unix2.get_cmd(cmd_name="ifconfig", cmd_params={"options": "lo down"})
sudo_ifconfig_down = unix2.get_cmd(cmd_name="sudo", cmd_params={"password": "moler", "cmd_object": ifconfig_down})
sudo_ifconfig_down()
time.sleep(5)
ifconfig_up = unix2.get_cmd(cmd_name="ifconfig", cmd_params={"options": "lo up"})
sudo_ifconfig_up = unix2.get_cmd(cmd_name="sudo", cmd_params={"password": "moler", "cmd_object": ifconfig_up})
sudo_ifconfig_up()
time.sleep(3)
# test teardown
ping.cancel()
no_ping.cancel()
if __name__ == '__main__':
test_network_outage()
"""
copy this file into workshop1/network_outage.py
*** validate/assert network outage time - MolerTest.error() usage ***
1. run it
2. see logs - look for "Network outage duration"
But yes, we do have error in logs but test doesn't fail
(we expect exception)
4. try to decorate test function with @MolerTest.raise_background_exceptions()
"""
| 38.6
| 118
| 0.658031
|
import os.path
import time
from moler.config import load_config
from moler.device.device import DeviceFactory
from moler.util.moler_test import MolerTest
def outage_callback(device_name, ping_times):
MolerTest.info("Network outage on {}".format(device_name))
ping_times["lost_connection_time"] = time.time()
def ping_is_on_callback(ping_times):
MolerTest.info("Ping works")
if ping_times["lost_connection_time"] > 0:
if ping_times["reconnection_time"] == 0:
ping_times["reconnection_time"] = time.time()
outage_time = ping_times["reconnection_time"] - ping_times["lost_connection_time"]
MolerTest.info("Network outage time is {}".format(outage_time))
if outage_time > 3:
MolerTest.error("Network outage duration exceeded threshold")
else:
MolerTest.info("Network outage duration is acceptable")
def test_network_outage():
load_config(config=os.path.abspath('config/my_devices.yml'))
unix1 = DeviceFactory.get_device(name='MyMachine1')
unix2 = DeviceFactory.get_device(name='MyMachine2')
| true
| true
|
790d5a216fcf8c3cfe59aec5e71b20db983da110
| 696
|
py
|
Python
|
Modulo-03/ex105/ex105.py
|
Matheus-Henrique-Burey/Curso-de-Python
|
448aebaab96527affa1e45897a662bb0407c11c6
|
[
"MIT"
] | null | null | null |
Modulo-03/ex105/ex105.py
|
Matheus-Henrique-Burey/Curso-de-Python
|
448aebaab96527affa1e45897a662bb0407c11c6
|
[
"MIT"
] | null | null | null |
Modulo-03/ex105/ex105.py
|
Matheus-Henrique-Burey/Curso-de-Python
|
448aebaab96527affa1e45897a662bb0407c11c6
|
[
"MIT"
] | null | null | null |
def notas(*n, sit=False):
"""
Função para analisar notas e situação de varios alunos.
:param n: Uma ou mais notas dos alunos (aceita varias)
:param sit: Valor opcional, indicando se deve ou não adicionar a situação.
:return: Dicionario com varias informações sobre a situação da turma.
"""
dic = dict()
dic["total"] = len(n)
dic["maior"] = max(n)
dic["menor"] = min(n)
dic["media"] = sum(n) / len(n)
if sit:
if media < 5:
dic["situação"] = "Critica"
elif media < 7:
dic["situação"] = "Rasoavel"
else:
dic["situação"] = "Boa"
return dic
resp = notas(5, 4, 3, sit=True)
print(resp)
| 25.777778
| 78
| 0.566092
|
def notas(*n, sit=False):
dic = dict()
dic["total"] = len(n)
dic["maior"] = max(n)
dic["menor"] = min(n)
dic["media"] = sum(n) / len(n)
if sit:
if media < 5:
dic["situação"] = "Critica"
elif media < 7:
dic["situação"] = "Rasoavel"
else:
dic["situação"] = "Boa"
return dic
resp = notas(5, 4, 3, sit=True)
print(resp)
| true
| true
|
790d5a28dd4f45cfc5d34c37f4be62843139231e
| 2,482
|
py
|
Python
|
src/wi/utils/__init__.py
|
cc1-cloud/cc1
|
8113673fa13b6fe195cea99dedab9616aeca3ae8
|
[
"Apache-2.0"
] | 11
|
2015-05-06T14:16:54.000Z
|
2022-02-08T23:21:31.000Z
|
src/wi/utils/__init__.py
|
fortress-shell/cc1
|
8113673fa13b6fe195cea99dedab9616aeca3ae8
|
[
"Apache-2.0"
] | 1
|
2015-10-30T21:08:11.000Z
|
2015-10-30T21:08:11.000Z
|
src/wi/utils/__init__.py
|
fortress-shell/cc1
|
8113673fa13b6fe195cea99dedab9616aeca3ae8
|
[
"Apache-2.0"
] | 5
|
2016-02-12T22:01:38.000Z
|
2021-12-06T16:56:54.000Z
|
# -*- coding: utf-8 -*-
# @COPYRIGHT_begin
#
# Copyright [2010-2014] Institute of Nuclear Physics PAN, Krakow, Poland
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @COPYRIGHT_end
"""@package src.wi.utils
@author Piotr Wójcik
@date 24.03.2011
"""
import logging
import os
from time import time
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from common.utils import ServerProxy
from wi.utils.exceptions import RestErrorException
from wi.utils.messages_ajax import error, success
from wi.utils.messages_codes import get_error, auth_error_text
REDIRECT_FIELD_NAME = 'next'
CLM = ServerProxy(settings.CLOUD_MANAGER_ADDRESS)
def check_response_errors(response, session):
"""
Checks status of response response and throws appropriate error.
"""
if response['status'] != 'ok':
from wi.utils.auth import logout
error_code = response['status']
error_msg = get_error(error_code)
raise RestErrorException(error_msg)
return response
def get_dict_from_list(list_of_dicts, key_value, key='id'):
"""
Returns dictionary with key: @prm{key} equal to @prm{key_value} from a
list of dictionaries: @prm{list_of_dicts}.
"""
for dictionary in list_of_dicts:
if dictionary.get(key) == None:
raise Exception("No key: " + key + " in dictionary.")
if dictionary.get(key) == key_value:
return dictionary
return None
def get_dicts_from_list(list_of_dicts, list_of_key_values, key='id'):
"""
Returns list of dictionaries with keys: @prm{key} equal to one from list
@prm{list_of_key_values} from a list of dictionaries: @prm{list_of_dicts}.
"""
ret = []
for dictionary in list_of_dicts:
if dictionary.get(key) == None:
raise Exception("No key: " + key + " in dictionary.")
if dictionary.get(key) in list_of_key_values:
ret.append(dictionary)
return ret
| 30.268293
| 78
| 0.705077
|
import logging
import os
from time import time
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from common.utils import ServerProxy
from wi.utils.exceptions import RestErrorException
from wi.utils.messages_ajax import error, success
from wi.utils.messages_codes import get_error, auth_error_text
REDIRECT_FIELD_NAME = 'next'
CLM = ServerProxy(settings.CLOUD_MANAGER_ADDRESS)
def check_response_errors(response, session):
if response['status'] != 'ok':
from wi.utils.auth import logout
error_code = response['status']
error_msg = get_error(error_code)
raise RestErrorException(error_msg)
return response
def get_dict_from_list(list_of_dicts, key_value, key='id'):
for dictionary in list_of_dicts:
if dictionary.get(key) == None:
raise Exception("No key: " + key + " in dictionary.")
if dictionary.get(key) == key_value:
return dictionary
return None
def get_dicts_from_list(list_of_dicts, list_of_key_values, key='id'):
ret = []
for dictionary in list_of_dicts:
if dictionary.get(key) == None:
raise Exception("No key: " + key + " in dictionary.")
if dictionary.get(key) in list_of_key_values:
ret.append(dictionary)
return ret
| true
| true
|
790d5a6dd53adfe9d64ffe524359d21bd2324394
| 1,658
|
py
|
Python
|
ellcircle_detect.py
|
Thinkin99/intelligent_visionforce_assemble
|
bc3a443ae1c242b1bc83ec670630d46f7403a17f
|
[
"Apache-2.0"
] | null | null | null |
ellcircle_detect.py
|
Thinkin99/intelligent_visionforce_assemble
|
bc3a443ae1c242b1bc83ec670630d46f7403a17f
|
[
"Apache-2.0"
] | null | null | null |
ellcircle_detect.py
|
Thinkin99/intelligent_visionforce_assemble
|
bc3a443ae1c242b1bc83ec670630d46f7403a17f
|
[
"Apache-2.0"
] | null | null | null |
import time
import cv2
import numpy as np
j = 1
while 1:
path = 'Bearing/' + str(j) + '.jpg'
img = cv2.imread(path)
img_copy = img.copy()
img = cv2.blur(img, (1, 1))
gray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY)
# flag, img_copy = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)
imgray = cv2.Canny(img_copy, 600, 100, 3) # Canny边缘检测,参数可更改
# cv2.imshow("imgray",imgray)
ret, thresh = cv2.threshold(imgray, 127, 255, cv2.THRESH_BINARY)
cv2.imshow("thresh", thresh)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # contours为轮廓集,可以计算轮廓的长度、面积等
ux = 0
uy = 0
for cnt in contours:
if len(cnt) > 50:
# S1 = cv2.contourArea(cnt) # 格林公式计算的实际面积
ell = cv2.fitEllipse(cnt) # 拟合椭圆 ellipse = [ center(x, y) , long short (a, b), angle ]
x = int(ell[0][0])
y = int(ell[0][1])
a = ell[1][0]
b = ell[1][1]
# S2 = math.pi * ell[1][0] * ell[1][1] # 理论面积
if (b / a) < 1.2: # and a > 0 and b > 0 and a < 0 and b < 0: # 面积比例
uy = y
ux = x
img = cv2.ellipse(img, ell, (0, 0, 200), 2)
cv2.circle(img, (x, y), 2, (255, 255, 255), 3)
cv2.putText(img, str((x, y)), (x + 20, y + 10), 0, 0.5,
[225, 255, 255], thickness=1, lineType=cv2.LINE_AA)
print("长轴: " + str(a) + " " + "短轴: " + str(b) + " " + str(ell[0][0]) + " " + str(ell[0][1]))
cv2.imshow("ell", img)
j+=1
if j==44:
j=1
time.sleep(0.5)
cv2.waitKey(20)
| 34.541667
| 120
| 0.496984
|
import time
import cv2
import numpy as np
j = 1
while 1:
path = 'Bearing/' + str(j) + '.jpg'
img = cv2.imread(path)
img_copy = img.copy()
img = cv2.blur(img, (1, 1))
gray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY)
imgray = cv2.Canny(img_copy, 600, 100, 3)
ret, thresh = cv2.threshold(imgray, 127, 255, cv2.THRESH_BINARY)
cv2.imshow("thresh", thresh)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
ux = 0
uy = 0
for cnt in contours:
if len(cnt) > 50:
ell = cv2.fitEllipse(cnt)
x = int(ell[0][0])
y = int(ell[0][1])
a = ell[1][0]
b = ell[1][1]
if (b / a) < 1.2: uy = y
ux = x
img = cv2.ellipse(img, ell, (0, 0, 200), 2)
cv2.circle(img, (x, y), 2, (255, 255, 255), 3)
cv2.putText(img, str((x, y)), (x + 20, y + 10), 0, 0.5,
[225, 255, 255], thickness=1, lineType=cv2.LINE_AA)
print("长轴: " + str(a) + " " + "短轴: " + str(b) + " " + str(ell[0][0]) + " " + str(ell[0][1]))
cv2.imshow("ell", img)
j+=1
if j==44:
j=1
time.sleep(0.5)
cv2.waitKey(20)
| true
| true
|
790d5aed153b8ce9ec1d61f79005155787745b8a
| 3,174
|
py
|
Python
|
docker/nwchem/src/run.py
|
bnmajor/mongochemdeploy
|
84179082889664140c4f0133c70bd839663dd307
|
[
"BSD-3-Clause"
] | 9
|
2017-03-27T19:22:09.000Z
|
2021-06-28T11:45:50.000Z
|
docker/nwchem/src/run.py
|
bnmajor/mongochemdeploy
|
84179082889664140c4f0133c70bd839663dd307
|
[
"BSD-3-Clause"
] | 50
|
2015-09-25T20:11:41.000Z
|
2021-12-22T19:39:10.000Z
|
docker/nwchem/src/run.py
|
bnmajor/mongochemdeploy
|
84179082889664140c4f0133c70bd839663dd307
|
[
"BSD-3-Clause"
] | 7
|
2017-11-02T17:20:46.000Z
|
2021-03-10T07:36:00.000Z
|
import os
import subprocess
import jinja2
import json
import openchemistry as oc
def run_calculation(geometry_file, output_file, params, scratch_dir):
# Read in the geometry from the geometry file
# This container expects the geometry file to be in .xyz format
with open(geometry_file) as f:
xyz_structure = f.read()
# remove the first two lines in the xyz file
# (i.e. number of atom and optional comment)
xyz_structure = xyz_structure.split('\n')[2:]
xyz_structure = '\n '.join(xyz_structure)
# Read the input parameters
theory = params.get('theory', 'hf')
task = params.get('task', 'energy')
basis = params.get('basis', 'cc-pvdz')
functional = params.get('functional', 'b3lyp')
charge = params.get('charge', 0)
multiplicity = params.get('multiplicity', 1)
theory = theory.lower()
if theory == 'hf':
_theory = 'scf'
# We update the multiplicity key when using scf. SCF accept names and
# not numbers.
multiplicities = {'1': 'singlet', '2': 'doublet', '3': 'triplet'}
_multiplicity = multiplicities.get(str(multiplicity), 'singlet')
else:
_theory = theory
_multiplicity = multiplicity
task = task.lower()
if task == 'frequencies':
_task = 'task {0} {1}\ntask {0} {2}'.format(_theory, 'optimize', 'freq')
elif task == 'optimize':
_task = 'task {0} {1}'.format(_theory, 'optimize')
else: # single point energy
_task = 'task {0}'.format(_theory)
context = {
'task': _task,
'theory': _theory,
'functional': functional,
'charge': charge,
'multiplicity': _multiplicity,
'basis': basis,
}
# Combine the input parameters and geometry into a concrete input file
# that can be executed by the simulation code
template_path = os.path.dirname(__file__)
jinja2_env = \
jinja2.Environment(loader=jinja2.FileSystemLoader(template_path),
trim_blocks=True)
os.makedirs(scratch_dir, exist_ok=True)
os.chdir(scratch_dir)
raw_input_file = os.path.join(scratch_dir, 'raw.in')
raw_output_file = os.path.join(scratch_dir, 'raw.json')
with open(raw_input_file, 'wb') as f:
if _theory == 'dft':
jinja2_env.get_template('nwchem.in.j2').stream(**context, xyz_structure=xyz_structure).dump(f, encoding='utf8')
else:
jinja2_env.get_template('nwchem.sfc.in.j2').stream(**context, xyz_structure=xyz_structure).dump(f, encoding='utf8')
# Execute the code and write to output
cpus = 4
subprocess.run(['mpirun', '-np', str(cpus), "/opt/nwchem/bin/LINUX64/nwchem",
raw_input_file, raw_output_file])
# Convert the raw output file generated by the code execution, into the
# output format declared in the container description (cjson)
with open(raw_output_file) as f:
cjson = oc.NWChemJsonReader(f).read()
# Save the calculation parameters in the cjson output for future reference
cjson['inputParameters'] = params
with open(output_file, 'w') as f:
json.dump(cjson, f)
| 36.068182
| 127
| 0.640832
|
import os
import subprocess
import jinja2
import json
import openchemistry as oc
def run_calculation(geometry_file, output_file, params, scratch_dir):
with open(geometry_file) as f:
xyz_structure = f.read()
xyz_structure = xyz_structure.split('\n')[2:]
xyz_structure = '\n '.join(xyz_structure)
theory = params.get('theory', 'hf')
task = params.get('task', 'energy')
basis = params.get('basis', 'cc-pvdz')
functional = params.get('functional', 'b3lyp')
charge = params.get('charge', 0)
multiplicity = params.get('multiplicity', 1)
theory = theory.lower()
if theory == 'hf':
_theory = 'scf'
multiplicities = {'1': 'singlet', '2': 'doublet', '3': 'triplet'}
_multiplicity = multiplicities.get(str(multiplicity), 'singlet')
else:
_theory = theory
_multiplicity = multiplicity
task = task.lower()
if task == 'frequencies':
_task = 'task {0} {1}\ntask {0} {2}'.format(_theory, 'optimize', 'freq')
elif task == 'optimize':
_task = 'task {0} {1}'.format(_theory, 'optimize')
else:
_task = 'task {0}'.format(_theory)
context = {
'task': _task,
'theory': _theory,
'functional': functional,
'charge': charge,
'multiplicity': _multiplicity,
'basis': basis,
}
template_path = os.path.dirname(__file__)
jinja2_env = \
jinja2.Environment(loader=jinja2.FileSystemLoader(template_path),
trim_blocks=True)
os.makedirs(scratch_dir, exist_ok=True)
os.chdir(scratch_dir)
raw_input_file = os.path.join(scratch_dir, 'raw.in')
raw_output_file = os.path.join(scratch_dir, 'raw.json')
with open(raw_input_file, 'wb') as f:
if _theory == 'dft':
jinja2_env.get_template('nwchem.in.j2').stream(**context, xyz_structure=xyz_structure).dump(f, encoding='utf8')
else:
jinja2_env.get_template('nwchem.sfc.in.j2').stream(**context, xyz_structure=xyz_structure).dump(f, encoding='utf8')
cpus = 4
subprocess.run(['mpirun', '-np', str(cpus), "/opt/nwchem/bin/LINUX64/nwchem",
raw_input_file, raw_output_file])
with open(raw_output_file) as f:
cjson = oc.NWChemJsonReader(f).read()
cjson['inputParameters'] = params
with open(output_file, 'w') as f:
json.dump(cjson, f)
| true
| true
|
790d5bdef23bef149e8eb1afa9cdecb9ce458e6e
| 43,170
|
py
|
Python
|
research/object_detection/metrics/coco_tools.py
|
bamdada/UdacityProj10FinaltfModels
|
4701bfbc924539860f610fa4ceae484a7bf194c6
|
[
"Apache-2.0"
] | 549
|
2020-01-02T05:14:57.000Z
|
2022-03-29T18:34:12.000Z
|
research/object_detection/metrics/coco_tools.py
|
akshayjaryal603/models
|
db39ef826193d0802f644ba30397242a7272676e
|
[
"Apache-2.0"
] | 98
|
2020-01-21T09:41:30.000Z
|
2022-03-12T00:53:06.000Z
|
research/object_detection/metrics/coco_tools.py
|
akshayjaryal603/models
|
db39ef826193d0802f644ba30397242a7272676e
|
[
"Apache-2.0"
] | 233
|
2020-01-18T03:46:27.000Z
|
2022-03-19T03:17:47.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrappers for third party pycocotools to be used within object_detection.
Note that nothing in this file is tensorflow related and thus cannot
be called directly as a slim metric, for example.
TODO(jonathanhuang): wrap as a slim metric in metrics.py
Usage example: given a set of images with ids in the list image_ids
and corresponding lists of numpy arrays encoding groundtruth (boxes and classes)
and detections (boxes, scores and classes), where elements of each list
correspond to detections/annotations of a single image,
then evaluation (in multi-class mode) can be invoked as follows:
groundtruth_dict = coco_tools.ExportGroundtruthToCOCO(
image_ids, groundtruth_boxes_list, groundtruth_classes_list,
max_num_classes, output_path=None)
detections_list = coco_tools.ExportDetectionsToCOCO(
image_ids, detection_boxes_list, detection_scores_list,
detection_classes_list, output_path=None)
groundtruth = coco_tools.COCOWrapper(groundtruth_dict)
detections = groundtruth.LoadAnnotations(detections_list)
evaluator = coco_tools.COCOEvalWrapper(groundtruth, detections,
agnostic_mode=False)
metrics = evaluator.ComputeMetrics()
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import OrderedDict
import copy
import time
import numpy as np
from pycocotools import coco
from pycocotools import cocoeval
from pycocotools import mask
import six
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
from object_detection.utils import json_utils
class COCOWrapper(coco.COCO):
"""Wrapper for the pycocotools COCO class."""
def __init__(self, dataset, detection_type='bbox'):
"""COCOWrapper constructor.
See http://mscoco.org/dataset/#format for a description of the format.
By default, the coco.COCO class constructor reads from a JSON file.
This function duplicates the same behavior but loads from a dictionary,
allowing us to perform evaluation without writing to external storage.
Args:
dataset: a dictionary holding bounding box annotations in the COCO format.
detection_type: type of detections being wrapped. Can be one of ['bbox',
'segmentation']
Raises:
ValueError: if detection_type is unsupported.
"""
supported_detection_types = ['bbox', 'segmentation']
if detection_type not in supported_detection_types:
raise ValueError('Unsupported detection type: {}. '
'Supported values are: {}'.format(
detection_type, supported_detection_types))
self._detection_type = detection_type
coco.COCO.__init__(self)
self.dataset = dataset
self.createIndex()
def LoadAnnotations(self, annotations):
"""Load annotations dictionary into COCO datastructure.
See http://mscoco.org/dataset/#format for a description of the annotations
format. As above, this function replicates the default behavior of the API
but does not require writing to external storage.
Args:
annotations: python list holding object detection results where each
detection is encoded as a dict with required keys ['image_id',
'category_id', 'score'] and one of ['bbox', 'segmentation'] based on
`detection_type`.
Returns:
a coco.COCO datastructure holding object detection annotations results
Raises:
ValueError: if annotations is not a list
ValueError: if annotations do not correspond to the images contained
in self.
"""
results = coco.COCO()
results.dataset['images'] = [img for img in self.dataset['images']]
tf.logging.info('Loading and preparing annotation results...')
tic = time.time()
if not isinstance(annotations, list):
raise ValueError('annotations is not a list of objects')
annotation_img_ids = [ann['image_id'] for ann in annotations]
if (set(annotation_img_ids) != (set(annotation_img_ids)
& set(self.getImgIds()))):
raise ValueError('Results do not correspond to current coco set')
results.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
if self._detection_type == 'bbox':
for idx, ann in enumerate(annotations):
bb = ann['bbox']
ann['area'] = bb[2] * bb[3]
ann['id'] = idx + 1
ann['iscrowd'] = 0
elif self._detection_type == 'segmentation':
for idx, ann in enumerate(annotations):
ann['area'] = mask.area(ann['segmentation'])
ann['bbox'] = mask.toBbox(ann['segmentation'])
ann['id'] = idx + 1
ann['iscrowd'] = 0
tf.logging.info('DONE (t=%0.2fs)', (time.time() - tic))
results.dataset['annotations'] = annotations
results.createIndex()
return results
class COCOEvalWrapper(cocoeval.COCOeval):
"""Wrapper for the pycocotools COCOeval class.
To evaluate, create two objects (groundtruth_dict and detections_list)
using the conventions listed at http://mscoco.org/dataset/#format.
Then call evaluation as follows:
groundtruth = coco_tools.COCOWrapper(groundtruth_dict)
detections = groundtruth.LoadAnnotations(detections_list)
evaluator = coco_tools.COCOEvalWrapper(groundtruth, detections,
agnostic_mode=False)
metrics = evaluator.ComputeMetrics()
"""
def __init__(self, groundtruth=None, detections=None, agnostic_mode=False,
iou_type='bbox', oks_sigmas=None):
"""COCOEvalWrapper constructor.
Note that for the area-based metrics to be meaningful, detection and
groundtruth boxes must be in image coordinates measured in pixels.
Args:
groundtruth: a coco.COCO (or coco_tools.COCOWrapper) object holding
groundtruth annotations
detections: a coco.COCO (or coco_tools.COCOWrapper) object holding
detections
agnostic_mode: boolean (default: False). If True, evaluation ignores
class labels, treating all detections as proposals.
iou_type: IOU type to use for evaluation. Supports `bbox', `segm`,
`keypoints`.
oks_sigmas: Float numpy array holding the OKS variances for keypoints.
"""
cocoeval.COCOeval.__init__(self, groundtruth, detections, iouType=iou_type)
if oks_sigmas is not None:
self.params.kpt_oks_sigmas = oks_sigmas
if agnostic_mode:
self.params.useCats = 0
self._iou_type = iou_type
def GetCategory(self, category_id):
"""Fetches dictionary holding category information given category id.
Args:
category_id: integer id
Returns:
dictionary holding 'id', 'name'.
"""
return self.cocoGt.cats[category_id]
def GetAgnosticMode(self):
"""Returns true if COCO Eval is configured to evaluate in agnostic mode."""
return self.params.useCats == 0
def GetCategoryIdList(self):
"""Returns list of valid category ids."""
return self.params.catIds
def ComputeMetrics(self,
include_metrics_per_category=False,
all_metrics_per_category=False):
"""Computes detection/keypoint metrics.
Args:
include_metrics_per_category: If True, will include metrics per category.
all_metrics_per_category: If true, include all the summery metrics for
each category in per_category_ap. Be careful with setting it to true if
you have more than handful of categories, because it will pollute
your mldash.
Returns:
1. summary_metrics: a dictionary holding:
'Precision/mAP': mean average precision over classes averaged over IOU
thresholds ranging from .5 to .95 with .05 increments
'Precision/mAP@.50IOU': mean average precision at 50% IOU
'Precision/mAP@.75IOU': mean average precision at 75% IOU
'Precision/mAP (small)': mean average precision for small objects
(area < 32^2 pixels). NOTE: not present for 'keypoints'
'Precision/mAP (medium)': mean average precision for medium sized
objects (32^2 pixels < area < 96^2 pixels)
'Precision/mAP (large)': mean average precision for large objects
(96^2 pixels < area < 10000^2 pixels)
'Recall/AR@1': average recall with 1 detection
'Recall/AR@10': average recall with 10 detections
'Recall/AR@100': average recall with 100 detections
'Recall/AR@100 (small)': average recall for small objects with 100
detections. NOTE: not present for 'keypoints'
'Recall/AR@100 (medium)': average recall for medium objects with 100
detections
'Recall/AR@100 (large)': average recall for large objects with 100
detections
2. per_category_ap: a dictionary holding category specific results with
keys of the form: 'Precision mAP ByCategory/category'
(without the supercategory part if no supercategories exist).
For backward compatibility 'PerformanceByCategory' is included in the
output regardless of all_metrics_per_category.
If evaluating class-agnostic mode, per_category_ap is an empty
dictionary.
Raises:
ValueError: If category_stats does not exist.
"""
self.evaluate()
self.accumulate()
self.summarize()
summary_metrics = {}
if self._iou_type in ['bbox', 'segm']:
summary_metrics = OrderedDict([('Precision/mAP', self.stats[0]),
('Precision/mAP@.50IOU', self.stats[1]),
('Precision/mAP@.75IOU', self.stats[2]),
('Precision/mAP (small)', self.stats[3]),
('Precision/mAP (medium)', self.stats[4]),
('Precision/mAP (large)', self.stats[5]),
('Recall/AR@1', self.stats[6]),
('Recall/AR@10', self.stats[7]),
('Recall/AR@100', self.stats[8]),
('Recall/AR@100 (small)', self.stats[9]),
('Recall/AR@100 (medium)', self.stats[10]),
('Recall/AR@100 (large)', self.stats[11])])
elif self._iou_type == 'keypoints':
category_id = self.GetCategoryIdList()[0]
category_name = self.GetCategory(category_id)['name']
summary_metrics = OrderedDict([])
summary_metrics['Precision/mAP ByCategory/{}'.format(
category_name)] = self.stats[0]
summary_metrics['Precision/mAP@.50IOU ByCategory/{}'.format(
category_name)] = self.stats[1]
summary_metrics['Precision/mAP@.75IOU ByCategory/{}'.format(
category_name)] = self.stats[2]
summary_metrics['Precision/mAP (medium) ByCategory/{}'.format(
category_name)] = self.stats[3]
summary_metrics['Precision/mAP (large) ByCategory/{}'.format(
category_name)] = self.stats[4]
summary_metrics['Recall/AR@1 ByCategory/{}'.format(
category_name)] = self.stats[5]
summary_metrics['Recall/AR@10 ByCategory/{}'.format(
category_name)] = self.stats[6]
summary_metrics['Recall/AR@100 ByCategory/{}'.format(
category_name)] = self.stats[7]
summary_metrics['Recall/AR@100 (medium) ByCategory/{}'.format(
category_name)] = self.stats[8]
summary_metrics['Recall/AR@100 (large) ByCategory/{}'.format(
category_name)] = self.stats[9]
if not include_metrics_per_category:
return summary_metrics, {}
if not hasattr(self, 'category_stats'):
raise ValueError('Category stats do not exist')
per_category_ap = OrderedDict([])
if self.GetAgnosticMode():
return summary_metrics, per_category_ap
for category_index, category_id in enumerate(self.GetCategoryIdList()):
category = self.GetCategory(category_id)['name']
# Kept for backward compatilbility
per_category_ap['PerformanceByCategory/mAP/{}'.format(
category)] = self.category_stats[0][category_index]
if all_metrics_per_category:
per_category_ap['Precision mAP ByCategory/{}'.format(
category)] = self.category_stats[0][category_index]
per_category_ap['Precision mAP@.50IOU ByCategory/{}'.format(
category)] = self.category_stats[1][category_index]
per_category_ap['Precision mAP@.75IOU ByCategory/{}'.format(
category)] = self.category_stats[2][category_index]
per_category_ap['Precision mAP (small) ByCategory/{}'.format(
category)] = self.category_stats[3][category_index]
per_category_ap['Precision mAP (medium) ByCategory/{}'.format(
category)] = self.category_stats[4][category_index]
per_category_ap['Precision mAP (large) ByCategory/{}'.format(
category)] = self.category_stats[5][category_index]
per_category_ap['Recall AR@1 ByCategory/{}'.format(
category)] = self.category_stats[6][category_index]
per_category_ap['Recall AR@10 ByCategory/{}'.format(
category)] = self.category_stats[7][category_index]
per_category_ap['Recall AR@100 ByCategory/{}'.format(
category)] = self.category_stats[8][category_index]
per_category_ap['Recall AR@100 (small) ByCategory/{}'.format(
category)] = self.category_stats[9][category_index]
per_category_ap['Recall AR@100 (medium) ByCategory/{}'.format(
category)] = self.category_stats[10][category_index]
per_category_ap['Recall AR@100 (large) ByCategory/{}'.format(
category)] = self.category_stats[11][category_index]
return summary_metrics, per_category_ap
def _ConvertBoxToCOCOFormat(box):
"""Converts a box in [ymin, xmin, ymax, xmax] format to COCO format.
This is a utility function for converting from our internal
[ymin, xmin, ymax, xmax] convention to the convention used by the COCO API
i.e., [xmin, ymin, width, height].
Args:
box: a [ymin, xmin, ymax, xmax] numpy array
Returns:
a list of floats representing [xmin, ymin, width, height]
"""
return [float(box[1]), float(box[0]), float(box[3] - box[1]),
float(box[2] - box[0])]
def _RleCompress(masks):
"""Compresses mask using Run-length encoding provided by pycocotools.
Args:
masks: uint8 numpy array of shape [mask_height, mask_width] with values in
{0, 1}.
Returns:
A pycocotools Run-length encoding of the mask.
"""
rle = mask.encode(np.asfortranarray(masks))
rle['counts'] = six.ensure_str(rle['counts'])
return rle
def ExportSingleImageGroundtruthToCoco(image_id,
next_annotation_id,
category_id_set,
groundtruth_boxes,
groundtruth_classes,
groundtruth_keypoints=None,
groundtruth_keypoint_visibilities=None,
groundtruth_masks=None,
groundtruth_is_crowd=None,
groundtruth_area=None):
"""Export groundtruth of a single image to COCO format.
This function converts groundtruth detection annotations represented as numpy
arrays to dictionaries that can be ingested by the COCO evaluation API. Note
that the image_ids provided here must match the ones given to
ExportSingleImageDetectionsToCoco. We assume that boxes and classes are in
correspondence - that is: groundtruth_boxes[i, :], and
groundtruth_classes[i] are associated with the same groundtruth annotation.
In the exported result, "area" fields are always set to the area of the
groundtruth bounding box.
Args:
image_id: a unique image identifier either of type integer or string.
next_annotation_id: integer specifying the first id to use for the
groundtruth annotations. All annotations are assigned a continuous integer
id starting from this value.
category_id_set: A set of valid class ids. Groundtruth with classes not in
category_id_set are dropped.
groundtruth_boxes: numpy array (float32) with shape [num_gt_boxes, 4]
groundtruth_classes: numpy array (int) with shape [num_gt_boxes]
groundtruth_keypoints: optional float numpy array of keypoints
with shape [num_gt_boxes, num_keypoints, 2].
groundtruth_keypoint_visibilities: optional integer numpy array of keypoint
visibilities with shape [num_gt_boxes, num_keypoints]. Integer is treated
as an enum with 0=not labels, 1=labeled but not visible and 2=labeled and
visible.
groundtruth_masks: optional uint8 numpy array of shape [num_detections,
image_height, image_width] containing detection_masks.
groundtruth_is_crowd: optional numpy array (int) with shape [num_gt_boxes]
indicating whether groundtruth boxes are crowd.
groundtruth_area: numpy array (float32) with shape [num_gt_boxes]. If
provided, then the area values (in the original absolute coordinates) will
be populated instead of calculated from bounding box coordinates.
Returns:
a list of groundtruth annotations for a single image in the COCO format.
Raises:
ValueError: if (1) groundtruth_boxes and groundtruth_classes do not have the
right lengths or (2) if each of the elements inside these lists do not
have the correct shapes or (3) if image_ids are not integers
"""
if len(groundtruth_classes.shape) != 1:
raise ValueError('groundtruth_classes is '
'expected to be of rank 1.')
if len(groundtruth_boxes.shape) != 2:
raise ValueError('groundtruth_boxes is expected to be of '
'rank 2.')
if groundtruth_boxes.shape[1] != 4:
raise ValueError('groundtruth_boxes should have '
'shape[1] == 4.')
num_boxes = groundtruth_classes.shape[0]
if num_boxes != groundtruth_boxes.shape[0]:
raise ValueError('Corresponding entries in groundtruth_classes, '
'and groundtruth_boxes should have '
'compatible shapes (i.e., agree on the 0th dimension).'
'Classes shape: %d. Boxes shape: %d. Image ID: %s' % (
groundtruth_classes.shape[0],
groundtruth_boxes.shape[0], image_id))
has_is_crowd = groundtruth_is_crowd is not None
if has_is_crowd and len(groundtruth_is_crowd.shape) != 1:
raise ValueError('groundtruth_is_crowd is expected to be of rank 1.')
has_keypoints = groundtruth_keypoints is not None
has_keypoint_visibilities = groundtruth_keypoint_visibilities is not None
if has_keypoints and not has_keypoint_visibilities:
groundtruth_keypoint_visibilities = np.full(
(num_boxes, groundtruth_keypoints.shape[1]), 2)
groundtruth_list = []
for i in range(num_boxes):
if groundtruth_classes[i] in category_id_set:
iscrowd = groundtruth_is_crowd[i] if has_is_crowd else 0
if groundtruth_area is not None and groundtruth_area[i] > 0:
area = float(groundtruth_area[i])
else:
area = float((groundtruth_boxes[i, 2] - groundtruth_boxes[i, 0]) *
(groundtruth_boxes[i, 3] - groundtruth_boxes[i, 1]))
export_dict = {
'id':
next_annotation_id + i,
'image_id':
image_id,
'category_id':
int(groundtruth_classes[i]),
'bbox':
list(_ConvertBoxToCOCOFormat(groundtruth_boxes[i, :])),
'area': area,
'iscrowd':
iscrowd
}
if groundtruth_masks is not None:
export_dict['segmentation'] = _RleCompress(groundtruth_masks[i])
if has_keypoints:
keypoints = groundtruth_keypoints[i]
visibilities = np.reshape(groundtruth_keypoint_visibilities[i], [-1])
coco_keypoints = []
num_valid_keypoints = 0
for keypoint, visibility in zip(keypoints, visibilities):
# Convert from [y, x] to [x, y] as mandated by COCO.
coco_keypoints.append(float(keypoint[1]))
coco_keypoints.append(float(keypoint[0]))
coco_keypoints.append(int(visibility))
if int(visibility) > 0:
num_valid_keypoints = num_valid_keypoints + 1
export_dict['keypoints'] = coco_keypoints
export_dict['num_keypoints'] = num_valid_keypoints
groundtruth_list.append(export_dict)
return groundtruth_list
def ExportGroundtruthToCOCO(image_ids,
groundtruth_boxes,
groundtruth_classes,
categories,
output_path=None):
"""Export groundtruth detection annotations in numpy arrays to COCO API.
This function converts a set of groundtruth detection annotations represented
as numpy arrays to dictionaries that can be ingested by the COCO API.
Inputs to this function are three lists: image ids for each groundtruth image,
groundtruth boxes for each image and groundtruth classes respectively.
Note that the image_ids provided here must match the ones given to the
ExportDetectionsToCOCO function in order for evaluation to work properly.
We assume that for each image, boxes, scores and classes are in
correspondence --- that is: image_id[i], groundtruth_boxes[i, :] and
groundtruth_classes[i] are associated with the same groundtruth annotation.
In the exported result, "area" fields are always set to the area of the
groundtruth bounding box and "iscrowd" fields are always set to 0.
TODO(jonathanhuang): pass in "iscrowd" array for evaluating on COCO dataset.
Args:
image_ids: a list of unique image identifier either of type integer or
string.
groundtruth_boxes: list of numpy arrays with shape [num_gt_boxes, 4]
(note that num_gt_boxes can be different for each entry in the list)
groundtruth_classes: list of numpy arrays (int) with shape [num_gt_boxes]
(note that num_gt_boxes can be different for each entry in the list)
categories: a list of dictionaries representing all possible categories.
Each dict in this list has the following keys:
'id': (required) an integer id uniquely identifying this category
'name': (required) string representing category name
e.g., 'cat', 'dog', 'pizza'
'supercategory': (optional) string representing the supercategory
e.g., 'animal', 'vehicle', 'food', etc
output_path: (optional) path for exporting result to JSON
Returns:
dictionary that can be read by COCO API
Raises:
ValueError: if (1) groundtruth_boxes and groundtruth_classes do not have the
right lengths or (2) if each of the elements inside these lists do not
have the correct shapes or (3) if image_ids are not integers
"""
category_id_set = set([cat['id'] for cat in categories])
groundtruth_export_list = []
image_export_list = []
if not len(image_ids) == len(groundtruth_boxes) == len(groundtruth_classes):
raise ValueError('Input lists must have the same length')
# For reasons internal to the COCO API, it is important that annotation ids
# are not equal to zero; we thus start counting from 1.
annotation_id = 1
for image_id, boxes, classes in zip(image_ids, groundtruth_boxes,
groundtruth_classes):
image_export_list.append({'id': image_id})
groundtruth_export_list.extend(ExportSingleImageGroundtruthToCoco(
image_id,
annotation_id,
category_id_set,
boxes,
classes))
num_boxes = classes.shape[0]
annotation_id += num_boxes
groundtruth_dict = {
'annotations': groundtruth_export_list,
'images': image_export_list,
'categories': categories
}
if output_path:
with tf.gfile.GFile(output_path, 'w') as fid:
json_utils.Dump(groundtruth_dict, fid, float_digits=4, indent=2)
return groundtruth_dict
def ExportSingleImageDetectionBoxesToCoco(image_id,
category_id_set,
detection_boxes,
detection_scores,
detection_classes,
detection_keypoints=None,
detection_keypoint_visibilities=None):
"""Export detections of a single image to COCO format.
This function converts detections represented as numpy arrays to dictionaries
that can be ingested by the COCO evaluation API. Note that the image_ids
provided here must match the ones given to the
ExporSingleImageDetectionBoxesToCoco. We assume that boxes, and classes are in
correspondence - that is: boxes[i, :], and classes[i]
are associated with the same groundtruth annotation.
Args:
image_id: unique image identifier either of type integer or string.
category_id_set: A set of valid class ids. Detections with classes not in
category_id_set are dropped.
detection_boxes: float numpy array of shape [num_detections, 4] containing
detection boxes.
detection_scores: float numpy array of shape [num_detections] containing
scored for the detection boxes.
detection_classes: integer numpy array of shape [num_detections] containing
the classes for detection boxes.
detection_keypoints: optional float numpy array of keypoints
with shape [num_detections, num_keypoints, 2].
detection_keypoint_visibilities: optional integer numpy array of keypoint
visibilities with shape [num_detections, num_keypoints]. Integer is
treated as an enum with 0=not labels, 1=labeled but not visible and
2=labeled and visible.
Returns:
a list of detection annotations for a single image in the COCO format.
Raises:
ValueError: if (1) detection_boxes, detection_scores and detection_classes
do not have the right lengths or (2) if each of the elements inside these
lists do not have the correct shapes or (3) if image_ids are not integers.
"""
if len(detection_classes.shape) != 1 or len(detection_scores.shape) != 1:
raise ValueError('All entries in detection_classes and detection_scores'
'expected to be of rank 1.')
if len(detection_boxes.shape) != 2:
raise ValueError('All entries in detection_boxes expected to be of '
'rank 2.')
if detection_boxes.shape[1] != 4:
raise ValueError('All entries in detection_boxes should have '
'shape[1] == 4.')
num_boxes = detection_classes.shape[0]
if not num_boxes == detection_boxes.shape[0] == detection_scores.shape[0]:
raise ValueError('Corresponding entries in detection_classes, '
'detection_scores and detection_boxes should have '
'compatible shapes (i.e., agree on the 0th dimension). '
'Classes shape: %d. Boxes shape: %d. '
'Scores shape: %d' % (
detection_classes.shape[0], detection_boxes.shape[0],
detection_scores.shape[0]
))
detections_list = []
for i in range(num_boxes):
if detection_classes[i] in category_id_set:
export_dict = {
'image_id':
image_id,
'category_id':
int(detection_classes[i]),
'bbox':
list(_ConvertBoxToCOCOFormat(detection_boxes[i, :])),
'score':
float(detection_scores[i]),
}
if detection_keypoints is not None:
keypoints = detection_keypoints[i]
num_keypoints = keypoints.shape[0]
if detection_keypoint_visibilities is None:
detection_keypoint_visibilities = np.full((num_boxes, num_keypoints),
2)
visibilities = np.reshape(detection_keypoint_visibilities[i], [-1])
coco_keypoints = []
for keypoint, visibility in zip(keypoints, visibilities):
# Convert from [y, x] to [x, y] as mandated by COCO.
coco_keypoints.append(float(keypoint[1]))
coco_keypoints.append(float(keypoint[0]))
coco_keypoints.append(int(visibility))
export_dict['keypoints'] = coco_keypoints
export_dict['num_keypoints'] = num_keypoints
detections_list.append(export_dict)
return detections_list
def ExportSingleImageDetectionMasksToCoco(image_id,
category_id_set,
detection_masks,
detection_scores,
detection_classes):
"""Export detection masks of a single image to COCO format.
This function converts detections represented as numpy arrays to dictionaries
that can be ingested by the COCO evaluation API. We assume that
detection_masks, detection_scores, and detection_classes are in correspondence
- that is: detection_masks[i, :], detection_classes[i] and detection_scores[i]
are associated with the same annotation.
Args:
image_id: unique image identifier either of type integer or string.
category_id_set: A set of valid class ids. Detections with classes not in
category_id_set are dropped.
detection_masks: uint8 numpy array of shape [num_detections, image_height,
image_width] containing detection_masks.
detection_scores: float numpy array of shape [num_detections] containing
scores for detection masks.
detection_classes: integer numpy array of shape [num_detections] containing
the classes for detection masks.
Returns:
a list of detection mask annotations for a single image in the COCO format.
Raises:
ValueError: if (1) detection_masks, detection_scores and detection_classes
do not have the right lengths or (2) if each of the elements inside these
lists do not have the correct shapes or (3) if image_ids are not integers.
"""
if len(detection_classes.shape) != 1 or len(detection_scores.shape) != 1:
raise ValueError('All entries in detection_classes and detection_scores'
'expected to be of rank 1.')
num_boxes = detection_classes.shape[0]
if not num_boxes == len(detection_masks) == detection_scores.shape[0]:
raise ValueError('Corresponding entries in detection_classes, '
'detection_scores and detection_masks should have '
'compatible lengths and shapes '
'Classes length: %d. Masks length: %d. '
'Scores length: %d' % (
detection_classes.shape[0], len(detection_masks),
detection_scores.shape[0]
))
detections_list = []
for i in range(num_boxes):
if detection_classes[i] in category_id_set:
detections_list.append({
'image_id': image_id,
'category_id': int(detection_classes[i]),
'segmentation': _RleCompress(detection_masks[i]),
'score': float(detection_scores[i])
})
return detections_list
def ExportDetectionsToCOCO(image_ids,
detection_boxes,
detection_scores,
detection_classes,
categories,
output_path=None):
"""Export detection annotations in numpy arrays to COCO API.
This function converts a set of predicted detections represented
as numpy arrays to dictionaries that can be ingested by the COCO API.
Inputs to this function are lists, consisting of boxes, scores and
classes, respectively, corresponding to each image for which detections
have been produced. Note that the image_ids provided here must
match the ones given to the ExportGroundtruthToCOCO function in order
for evaluation to work properly.
We assume that for each image, boxes, scores and classes are in
correspondence --- that is: detection_boxes[i, :], detection_scores[i] and
detection_classes[i] are associated with the same detection.
Args:
image_ids: a list of unique image identifier either of type integer or
string.
detection_boxes: list of numpy arrays with shape [num_detection_boxes, 4]
detection_scores: list of numpy arrays (float) with shape
[num_detection_boxes]. Note that num_detection_boxes can be different
for each entry in the list.
detection_classes: list of numpy arrays (int) with shape
[num_detection_boxes]. Note that num_detection_boxes can be different
for each entry in the list.
categories: a list of dictionaries representing all possible categories.
Each dict in this list must have an integer 'id' key uniquely identifying
this category.
output_path: (optional) path for exporting result to JSON
Returns:
list of dictionaries that can be read by COCO API, where each entry
corresponds to a single detection and has keys from:
['image_id', 'category_id', 'bbox', 'score'].
Raises:
ValueError: if (1) detection_boxes and detection_classes do not have the
right lengths or (2) if each of the elements inside these lists do not
have the correct shapes or (3) if image_ids are not integers.
"""
category_id_set = set([cat['id'] for cat in categories])
detections_export_list = []
if not (len(image_ids) == len(detection_boxes) == len(detection_scores) ==
len(detection_classes)):
raise ValueError('Input lists must have the same length')
for image_id, boxes, scores, classes in zip(image_ids, detection_boxes,
detection_scores,
detection_classes):
detections_export_list.extend(ExportSingleImageDetectionBoxesToCoco(
image_id,
category_id_set,
boxes,
scores,
classes))
if output_path:
with tf.gfile.GFile(output_path, 'w') as fid:
json_utils.Dump(detections_export_list, fid, float_digits=4, indent=2)
return detections_export_list
def ExportSegmentsToCOCO(image_ids,
detection_masks,
detection_scores,
detection_classes,
categories,
output_path=None):
"""Export segmentation masks in numpy arrays to COCO API.
This function converts a set of predicted instance masks represented
as numpy arrays to dictionaries that can be ingested by the COCO API.
Inputs to this function are lists, consisting of segments, scores and
classes, respectively, corresponding to each image for which detections
have been produced.
Note this function is recommended to use for small dataset.
For large dataset, it should be used with a merge function
(e.g. in map reduce), otherwise the memory consumption is large.
We assume that for each image, masks, scores and classes are in
correspondence --- that is: detection_masks[i, :, :, :], detection_scores[i]
and detection_classes[i] are associated with the same detection.
Args:
image_ids: list of image ids (typically ints or strings)
detection_masks: list of numpy arrays with shape [num_detection, h, w, 1]
and type uint8. The height and width should match the shape of
corresponding image.
detection_scores: list of numpy arrays (float) with shape
[num_detection]. Note that num_detection can be different
for each entry in the list.
detection_classes: list of numpy arrays (int) with shape
[num_detection]. Note that num_detection can be different
for each entry in the list.
categories: a list of dictionaries representing all possible categories.
Each dict in this list must have an integer 'id' key uniquely identifying
this category.
output_path: (optional) path for exporting result to JSON
Returns:
list of dictionaries that can be read by COCO API, where each entry
corresponds to a single detection and has keys from:
['image_id', 'category_id', 'segmentation', 'score'].
Raises:
ValueError: if detection_masks and detection_classes do not have the
right lengths or if each of the elements inside these lists do not
have the correct shapes.
"""
if not (len(image_ids) == len(detection_masks) == len(detection_scores) ==
len(detection_classes)):
raise ValueError('Input lists must have the same length')
segment_export_list = []
for image_id, masks, scores, classes in zip(image_ids, detection_masks,
detection_scores,
detection_classes):
if len(classes.shape) != 1 or len(scores.shape) != 1:
raise ValueError('All entries in detection_classes and detection_scores'
'expected to be of rank 1.')
if len(masks.shape) != 4:
raise ValueError('All entries in masks expected to be of '
'rank 4. Given {}'.format(masks.shape))
num_boxes = classes.shape[0]
if not num_boxes == masks.shape[0] == scores.shape[0]:
raise ValueError('Corresponding entries in segment_classes, '
'detection_scores and detection_boxes should have '
'compatible shapes (i.e., agree on the 0th dimension).')
category_id_set = set([cat['id'] for cat in categories])
segment_export_list.extend(ExportSingleImageDetectionMasksToCoco(
image_id, category_id_set, np.squeeze(masks, axis=3), scores, classes))
if output_path:
with tf.gfile.GFile(output_path, 'w') as fid:
json_utils.Dump(segment_export_list, fid, float_digits=4, indent=2)
return segment_export_list
def ExportKeypointsToCOCO(image_ids,
detection_keypoints,
detection_scores,
detection_classes,
categories,
output_path=None):
"""Exports keypoints in numpy arrays to COCO API.
This function converts a set of predicted keypoints represented
as numpy arrays to dictionaries that can be ingested by the COCO API.
Inputs to this function are lists, consisting of keypoints, scores and
classes, respectively, corresponding to each image for which detections
have been produced.
We assume that for each image, keypoints, scores and classes are in
correspondence --- that is: detection_keypoints[i, :, :, :],
detection_scores[i] and detection_classes[i] are associated with the same
detection.
Args:
image_ids: list of image ids (typically ints or strings)
detection_keypoints: list of numpy arrays with shape
[num_detection, num_keypoints, 2] and type float32 in absolute
x-y coordinates.
detection_scores: list of numpy arrays (float) with shape
[num_detection]. Note that num_detection can be different
for each entry in the list.
detection_classes: list of numpy arrays (int) with shape
[num_detection]. Note that num_detection can be different
for each entry in the list.
categories: a list of dictionaries representing all possible categories.
Each dict in this list must have an integer 'id' key uniquely identifying
this category and an integer 'num_keypoints' key specifying the number of
keypoints the category has.
output_path: (optional) path for exporting result to JSON
Returns:
list of dictionaries that can be read by COCO API, where each entry
corresponds to a single detection and has keys from:
['image_id', 'category_id', 'keypoints', 'score'].
Raises:
ValueError: if detection_keypoints and detection_classes do not have the
right lengths or if each of the elements inside these lists do not
have the correct shapes.
"""
if not (len(image_ids) == len(detection_keypoints) ==
len(detection_scores) == len(detection_classes)):
raise ValueError('Input lists must have the same length')
keypoints_export_list = []
for image_id, keypoints, scores, classes in zip(
image_ids, detection_keypoints, detection_scores, detection_classes):
if len(classes.shape) != 1 or len(scores.shape) != 1:
raise ValueError('All entries in detection_classes and detection_scores'
'expected to be of rank 1.')
if len(keypoints.shape) != 3:
raise ValueError('All entries in keypoints expected to be of '
'rank 3. Given {}'.format(keypoints.shape))
num_boxes = classes.shape[0]
if not num_boxes == keypoints.shape[0] == scores.shape[0]:
raise ValueError('Corresponding entries in detection_classes, '
'detection_keypoints, and detection_scores should have '
'compatible shapes (i.e., agree on the 0th dimension).')
category_id_set = set([cat['id'] for cat in categories])
category_id_to_num_keypoints_map = {
cat['id']: cat['num_keypoints'] for cat in categories
if 'num_keypoints' in cat}
for i in range(num_boxes):
if classes[i] not in category_id_set:
raise ValueError('class id should be in category_id_set\n')
if classes[i] in category_id_to_num_keypoints_map:
num_keypoints = category_id_to_num_keypoints_map[classes[i]]
# Adds extra ones to indicate the visibility for each keypoint as is
# recommended by MSCOCO.
instance_keypoints = np.concatenate(
[keypoints[i, 0:num_keypoints, :],
np.expand_dims(np.ones(num_keypoints), axis=1)],
axis=1).astype(int)
instance_keypoints = instance_keypoints.flatten().tolist()
keypoints_export_list.append({
'image_id': image_id,
'category_id': int(classes[i]),
'keypoints': instance_keypoints,
'score': float(scores[i])
})
if output_path:
with tf.gfile.GFile(output_path, 'w') as fid:
json_utils.Dump(keypoints_export_list, fid, float_digits=4, indent=2)
return keypoints_export_list
| 45.346639
| 80
| 0.668937
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import OrderedDict
import copy
import time
import numpy as np
from pycocotools import coco
from pycocotools import cocoeval
from pycocotools import mask
import six
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
from object_detection.utils import json_utils
class COCOWrapper(coco.COCO):
def __init__(self, dataset, detection_type='bbox'):
supported_detection_types = ['bbox', 'segmentation']
if detection_type not in supported_detection_types:
raise ValueError('Unsupported detection type: {}. '
'Supported values are: {}'.format(
detection_type, supported_detection_types))
self._detection_type = detection_type
coco.COCO.__init__(self)
self.dataset = dataset
self.createIndex()
def LoadAnnotations(self, annotations):
results = coco.COCO()
results.dataset['images'] = [img for img in self.dataset['images']]
tf.logging.info('Loading and preparing annotation results...')
tic = time.time()
if not isinstance(annotations, list):
raise ValueError('annotations is not a list of objects')
annotation_img_ids = [ann['image_id'] for ann in annotations]
if (set(annotation_img_ids) != (set(annotation_img_ids)
& set(self.getImgIds()))):
raise ValueError('Results do not correspond to current coco set')
results.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
if self._detection_type == 'bbox':
for idx, ann in enumerate(annotations):
bb = ann['bbox']
ann['area'] = bb[2] * bb[3]
ann['id'] = idx + 1
ann['iscrowd'] = 0
elif self._detection_type == 'segmentation':
for idx, ann in enumerate(annotations):
ann['area'] = mask.area(ann['segmentation'])
ann['bbox'] = mask.toBbox(ann['segmentation'])
ann['id'] = idx + 1
ann['iscrowd'] = 0
tf.logging.info('DONE (t=%0.2fs)', (time.time() - tic))
results.dataset['annotations'] = annotations
results.createIndex()
return results
class COCOEvalWrapper(cocoeval.COCOeval):
def __init__(self, groundtruth=None, detections=None, agnostic_mode=False,
iou_type='bbox', oks_sigmas=None):
cocoeval.COCOeval.__init__(self, groundtruth, detections, iouType=iou_type)
if oks_sigmas is not None:
self.params.kpt_oks_sigmas = oks_sigmas
if agnostic_mode:
self.params.useCats = 0
self._iou_type = iou_type
def GetCategory(self, category_id):
return self.cocoGt.cats[category_id]
def GetAgnosticMode(self):
return self.params.useCats == 0
def GetCategoryIdList(self):
return self.params.catIds
def ComputeMetrics(self,
include_metrics_per_category=False,
all_metrics_per_category=False):
self.evaluate()
self.accumulate()
self.summarize()
summary_metrics = {}
if self._iou_type in ['bbox', 'segm']:
summary_metrics = OrderedDict([('Precision/mAP', self.stats[0]),
('Precision/mAP@.50IOU', self.stats[1]),
('Precision/mAP@.75IOU', self.stats[2]),
('Precision/mAP (small)', self.stats[3]),
('Precision/mAP (medium)', self.stats[4]),
('Precision/mAP (large)', self.stats[5]),
('Recall/AR@1', self.stats[6]),
('Recall/AR@10', self.stats[7]),
('Recall/AR@100', self.stats[8]),
('Recall/AR@100 (small)', self.stats[9]),
('Recall/AR@100 (medium)', self.stats[10]),
('Recall/AR@100 (large)', self.stats[11])])
elif self._iou_type == 'keypoints':
category_id = self.GetCategoryIdList()[0]
category_name = self.GetCategory(category_id)['name']
summary_metrics = OrderedDict([])
summary_metrics['Precision/mAP ByCategory/{}'.format(
category_name)] = self.stats[0]
summary_metrics['Precision/mAP@.50IOU ByCategory/{}'.format(
category_name)] = self.stats[1]
summary_metrics['Precision/mAP@.75IOU ByCategory/{}'.format(
category_name)] = self.stats[2]
summary_metrics['Precision/mAP (medium) ByCategory/{}'.format(
category_name)] = self.stats[3]
summary_metrics['Precision/mAP (large) ByCategory/{}'.format(
category_name)] = self.stats[4]
summary_metrics['Recall/AR@1 ByCategory/{}'.format(
category_name)] = self.stats[5]
summary_metrics['Recall/AR@10 ByCategory/{}'.format(
category_name)] = self.stats[6]
summary_metrics['Recall/AR@100 ByCategory/{}'.format(
category_name)] = self.stats[7]
summary_metrics['Recall/AR@100 (medium) ByCategory/{}'.format(
category_name)] = self.stats[8]
summary_metrics['Recall/AR@100 (large) ByCategory/{}'.format(
category_name)] = self.stats[9]
if not include_metrics_per_category:
return summary_metrics, {}
if not hasattr(self, 'category_stats'):
raise ValueError('Category stats do not exist')
per_category_ap = OrderedDict([])
if self.GetAgnosticMode():
return summary_metrics, per_category_ap
for category_index, category_id in enumerate(self.GetCategoryIdList()):
category = self.GetCategory(category_id)['name']
per_category_ap['PerformanceByCategory/mAP/{}'.format(
category)] = self.category_stats[0][category_index]
if all_metrics_per_category:
per_category_ap['Precision mAP ByCategory/{}'.format(
category)] = self.category_stats[0][category_index]
per_category_ap['Precision mAP@.50IOU ByCategory/{}'.format(
category)] = self.category_stats[1][category_index]
per_category_ap['Precision mAP@.75IOU ByCategory/{}'.format(
category)] = self.category_stats[2][category_index]
per_category_ap['Precision mAP (small) ByCategory/{}'.format(
category)] = self.category_stats[3][category_index]
per_category_ap['Precision mAP (medium) ByCategory/{}'.format(
category)] = self.category_stats[4][category_index]
per_category_ap['Precision mAP (large) ByCategory/{}'.format(
category)] = self.category_stats[5][category_index]
per_category_ap['Recall AR@1 ByCategory/{}'.format(
category)] = self.category_stats[6][category_index]
per_category_ap['Recall AR@10 ByCategory/{}'.format(
category)] = self.category_stats[7][category_index]
per_category_ap['Recall AR@100 ByCategory/{}'.format(
category)] = self.category_stats[8][category_index]
per_category_ap['Recall AR@100 (small) ByCategory/{}'.format(
category)] = self.category_stats[9][category_index]
per_category_ap['Recall AR@100 (medium) ByCategory/{}'.format(
category)] = self.category_stats[10][category_index]
per_category_ap['Recall AR@100 (large) ByCategory/{}'.format(
category)] = self.category_stats[11][category_index]
return summary_metrics, per_category_ap
def _ConvertBoxToCOCOFormat(box):
return [float(box[1]), float(box[0]), float(box[3] - box[1]),
float(box[2] - box[0])]
def _RleCompress(masks):
rle = mask.encode(np.asfortranarray(masks))
rle['counts'] = six.ensure_str(rle['counts'])
return rle
def ExportSingleImageGroundtruthToCoco(image_id,
next_annotation_id,
category_id_set,
groundtruth_boxes,
groundtruth_classes,
groundtruth_keypoints=None,
groundtruth_keypoint_visibilities=None,
groundtruth_masks=None,
groundtruth_is_crowd=None,
groundtruth_area=None):
if len(groundtruth_classes.shape) != 1:
raise ValueError('groundtruth_classes is '
'expected to be of rank 1.')
if len(groundtruth_boxes.shape) != 2:
raise ValueError('groundtruth_boxes is expected to be of '
'rank 2.')
if groundtruth_boxes.shape[1] != 4:
raise ValueError('groundtruth_boxes should have '
'shape[1] == 4.')
num_boxes = groundtruth_classes.shape[0]
if num_boxes != groundtruth_boxes.shape[0]:
raise ValueError('Corresponding entries in groundtruth_classes, '
'and groundtruth_boxes should have '
'compatible shapes (i.e., agree on the 0th dimension).'
'Classes shape: %d. Boxes shape: %d. Image ID: %s' % (
groundtruth_classes.shape[0],
groundtruth_boxes.shape[0], image_id))
has_is_crowd = groundtruth_is_crowd is not None
if has_is_crowd and len(groundtruth_is_crowd.shape) != 1:
raise ValueError('groundtruth_is_crowd is expected to be of rank 1.')
has_keypoints = groundtruth_keypoints is not None
has_keypoint_visibilities = groundtruth_keypoint_visibilities is not None
if has_keypoints and not has_keypoint_visibilities:
groundtruth_keypoint_visibilities = np.full(
(num_boxes, groundtruth_keypoints.shape[1]), 2)
groundtruth_list = []
for i in range(num_boxes):
if groundtruth_classes[i] in category_id_set:
iscrowd = groundtruth_is_crowd[i] if has_is_crowd else 0
if groundtruth_area is not None and groundtruth_area[i] > 0:
area = float(groundtruth_area[i])
else:
area = float((groundtruth_boxes[i, 2] - groundtruth_boxes[i, 0]) *
(groundtruth_boxes[i, 3] - groundtruth_boxes[i, 1]))
export_dict = {
'id':
next_annotation_id + i,
'image_id':
image_id,
'category_id':
int(groundtruth_classes[i]),
'bbox':
list(_ConvertBoxToCOCOFormat(groundtruth_boxes[i, :])),
'area': area,
'iscrowd':
iscrowd
}
if groundtruth_masks is not None:
export_dict['segmentation'] = _RleCompress(groundtruth_masks[i])
if has_keypoints:
keypoints = groundtruth_keypoints[i]
visibilities = np.reshape(groundtruth_keypoint_visibilities[i], [-1])
coco_keypoints = []
num_valid_keypoints = 0
for keypoint, visibility in zip(keypoints, visibilities):
coco_keypoints.append(float(keypoint[1]))
coco_keypoints.append(float(keypoint[0]))
coco_keypoints.append(int(visibility))
if int(visibility) > 0:
num_valid_keypoints = num_valid_keypoints + 1
export_dict['keypoints'] = coco_keypoints
export_dict['num_keypoints'] = num_valid_keypoints
groundtruth_list.append(export_dict)
return groundtruth_list
def ExportGroundtruthToCOCO(image_ids,
groundtruth_boxes,
groundtruth_classes,
categories,
output_path=None):
category_id_set = set([cat['id'] for cat in categories])
groundtruth_export_list = []
image_export_list = []
if not len(image_ids) == len(groundtruth_boxes) == len(groundtruth_classes):
raise ValueError('Input lists must have the same length')
annotation_id = 1
for image_id, boxes, classes in zip(image_ids, groundtruth_boxes,
groundtruth_classes):
image_export_list.append({'id': image_id})
groundtruth_export_list.extend(ExportSingleImageGroundtruthToCoco(
image_id,
annotation_id,
category_id_set,
boxes,
classes))
num_boxes = classes.shape[0]
annotation_id += num_boxes
groundtruth_dict = {
'annotations': groundtruth_export_list,
'images': image_export_list,
'categories': categories
}
if output_path:
with tf.gfile.GFile(output_path, 'w') as fid:
json_utils.Dump(groundtruth_dict, fid, float_digits=4, indent=2)
return groundtruth_dict
def ExportSingleImageDetectionBoxesToCoco(image_id,
category_id_set,
detection_boxes,
detection_scores,
detection_classes,
detection_keypoints=None,
detection_keypoint_visibilities=None):
if len(detection_classes.shape) != 1 or len(detection_scores.shape) != 1:
raise ValueError('All entries in detection_classes and detection_scores'
'expected to be of rank 1.')
if len(detection_boxes.shape) != 2:
raise ValueError('All entries in detection_boxes expected to be of '
'rank 2.')
if detection_boxes.shape[1] != 4:
raise ValueError('All entries in detection_boxes should have '
'shape[1] == 4.')
num_boxes = detection_classes.shape[0]
if not num_boxes == detection_boxes.shape[0] == detection_scores.shape[0]:
raise ValueError('Corresponding entries in detection_classes, '
'detection_scores and detection_boxes should have '
'compatible shapes (i.e., agree on the 0th dimension). '
'Classes shape: %d. Boxes shape: %d. '
'Scores shape: %d' % (
detection_classes.shape[0], detection_boxes.shape[0],
detection_scores.shape[0]
))
detections_list = []
for i in range(num_boxes):
if detection_classes[i] in category_id_set:
export_dict = {
'image_id':
image_id,
'category_id':
int(detection_classes[i]),
'bbox':
list(_ConvertBoxToCOCOFormat(detection_boxes[i, :])),
'score':
float(detection_scores[i]),
}
if detection_keypoints is not None:
keypoints = detection_keypoints[i]
num_keypoints = keypoints.shape[0]
if detection_keypoint_visibilities is None:
detection_keypoint_visibilities = np.full((num_boxes, num_keypoints),
2)
visibilities = np.reshape(detection_keypoint_visibilities[i], [-1])
coco_keypoints = []
for keypoint, visibility in zip(keypoints, visibilities):
coco_keypoints.append(float(keypoint[1]))
coco_keypoints.append(float(keypoint[0]))
coco_keypoints.append(int(visibility))
export_dict['keypoints'] = coco_keypoints
export_dict['num_keypoints'] = num_keypoints
detections_list.append(export_dict)
return detections_list
def ExportSingleImageDetectionMasksToCoco(image_id,
category_id_set,
detection_masks,
detection_scores,
detection_classes):
if len(detection_classes.shape) != 1 or len(detection_scores.shape) != 1:
raise ValueError('All entries in detection_classes and detection_scores'
'expected to be of rank 1.')
num_boxes = detection_classes.shape[0]
if not num_boxes == len(detection_masks) == detection_scores.shape[0]:
raise ValueError('Corresponding entries in detection_classes, '
'detection_scores and detection_masks should have '
'compatible lengths and shapes '
'Classes length: %d. Masks length: %d. '
'Scores length: %d' % (
detection_classes.shape[0], len(detection_masks),
detection_scores.shape[0]
))
detections_list = []
for i in range(num_boxes):
if detection_classes[i] in category_id_set:
detections_list.append({
'image_id': image_id,
'category_id': int(detection_classes[i]),
'segmentation': _RleCompress(detection_masks[i]),
'score': float(detection_scores[i])
})
return detections_list
def ExportDetectionsToCOCO(image_ids,
detection_boxes,
detection_scores,
detection_classes,
categories,
output_path=None):
category_id_set = set([cat['id'] for cat in categories])
detections_export_list = []
if not (len(image_ids) == len(detection_boxes) == len(detection_scores) ==
len(detection_classes)):
raise ValueError('Input lists must have the same length')
for image_id, boxes, scores, classes in zip(image_ids, detection_boxes,
detection_scores,
detection_classes):
detections_export_list.extend(ExportSingleImageDetectionBoxesToCoco(
image_id,
category_id_set,
boxes,
scores,
classes))
if output_path:
with tf.gfile.GFile(output_path, 'w') as fid:
json_utils.Dump(detections_export_list, fid, float_digits=4, indent=2)
return detections_export_list
def ExportSegmentsToCOCO(image_ids,
detection_masks,
detection_scores,
detection_classes,
categories,
output_path=None):
if not (len(image_ids) == len(detection_masks) == len(detection_scores) ==
len(detection_classes)):
raise ValueError('Input lists must have the same length')
segment_export_list = []
for image_id, masks, scores, classes in zip(image_ids, detection_masks,
detection_scores,
detection_classes):
if len(classes.shape) != 1 or len(scores.shape) != 1:
raise ValueError('All entries in detection_classes and detection_scores'
'expected to be of rank 1.')
if len(masks.shape) != 4:
raise ValueError('All entries in masks expected to be of '
'rank 4. Given {}'.format(masks.shape))
num_boxes = classes.shape[0]
if not num_boxes == masks.shape[0] == scores.shape[0]:
raise ValueError('Corresponding entries in segment_classes, '
'detection_scores and detection_boxes should have '
'compatible shapes (i.e., agree on the 0th dimension).')
category_id_set = set([cat['id'] for cat in categories])
segment_export_list.extend(ExportSingleImageDetectionMasksToCoco(
image_id, category_id_set, np.squeeze(masks, axis=3), scores, classes))
if output_path:
with tf.gfile.GFile(output_path, 'w') as fid:
json_utils.Dump(segment_export_list, fid, float_digits=4, indent=2)
return segment_export_list
def ExportKeypointsToCOCO(image_ids,
detection_keypoints,
detection_scores,
detection_classes,
categories,
output_path=None):
if not (len(image_ids) == len(detection_keypoints) ==
len(detection_scores) == len(detection_classes)):
raise ValueError('Input lists must have the same length')
keypoints_export_list = []
for image_id, keypoints, scores, classes in zip(
image_ids, detection_keypoints, detection_scores, detection_classes):
if len(classes.shape) != 1 or len(scores.shape) != 1:
raise ValueError('All entries in detection_classes and detection_scores'
'expected to be of rank 1.')
if len(keypoints.shape) != 3:
raise ValueError('All entries in keypoints expected to be of '
'rank 3. Given {}'.format(keypoints.shape))
num_boxes = classes.shape[0]
if not num_boxes == keypoints.shape[0] == scores.shape[0]:
raise ValueError('Corresponding entries in detection_classes, '
'detection_keypoints, and detection_scores should have '
'compatible shapes (i.e., agree on the 0th dimension).')
category_id_set = set([cat['id'] for cat in categories])
category_id_to_num_keypoints_map = {
cat['id']: cat['num_keypoints'] for cat in categories
if 'num_keypoints' in cat}
for i in range(num_boxes):
if classes[i] not in category_id_set:
raise ValueError('class id should be in category_id_set\n')
if classes[i] in category_id_to_num_keypoints_map:
num_keypoints = category_id_to_num_keypoints_map[classes[i]]
instance_keypoints = np.concatenate(
[keypoints[i, 0:num_keypoints, :],
np.expand_dims(np.ones(num_keypoints), axis=1)],
axis=1).astype(int)
instance_keypoints = instance_keypoints.flatten().tolist()
keypoints_export_list.append({
'image_id': image_id,
'category_id': int(classes[i]),
'keypoints': instance_keypoints,
'score': float(scores[i])
})
if output_path:
with tf.gfile.GFile(output_path, 'w') as fid:
json_utils.Dump(keypoints_export_list, fid, float_digits=4, indent=2)
return keypoints_export_list
| true
| true
|
790d5bec496a69c428cf6f382ef3dd9adebad7fa
| 1,085
|
py
|
Python
|
learning/model/keras_model_resave.py
|
eugene-vasilev/Automatic-Tool-Annotation-for-CATARACT-Surgery
|
795be1dea5af28919e8696103f801d5d529f6067
|
[
"MIT"
] | 1
|
2020-02-22T17:39:09.000Z
|
2020-02-22T17:39:09.000Z
|
learning/model/keras_model_resave.py
|
eugene-vasilev/Automatic-Tool-Annotation-for-CATARACT-Surgery
|
795be1dea5af28919e8696103f801d5d529f6067
|
[
"MIT"
] | null | null | null |
learning/model/keras_model_resave.py
|
eugene-vasilev/Automatic-Tool-Annotation-for-CATARACT-Surgery
|
795be1dea5af28919e8696103f801d5d529f6067
|
[
"MIT"
] | null | null | null |
from keras.models import load_model
from glob import glob
from metrics import auc, precision, recall, f1
def save_json(model, path):
model_json = model.to_json()
with open(path, "w") as json_file:
json_file.write(model_json)
def save_weights(model, path):
model.save_weights(path)
def resave_model(model_path, save_path):
model = load_model(model_path, custom_objects={"auc": auc,
"precision": precision,
"recall": recall,
"f1": f1})
save_json(model, save_path + '/model.json')
save_weights(model, save_path + '/model.h5')
if __name__ == '__main__':
model_folders = glob('./model/saved_models/*')
for model_folder in model_folders:
models = sorted(glob(model_folder + '/*.hdf5'))
last_model = models[-1]
resave_model(last_model, model_folder)
model_name = model_folder[model_folder.rfind('/') + 1:]
print('Model {} resaved!'.format(model_name))
| 33.90625
| 74
| 0.588018
|
from keras.models import load_model
from glob import glob
from metrics import auc, precision, recall, f1
def save_json(model, path):
model_json = model.to_json()
with open(path, "w") as json_file:
json_file.write(model_json)
def save_weights(model, path):
model.save_weights(path)
def resave_model(model_path, save_path):
model = load_model(model_path, custom_objects={"auc": auc,
"precision": precision,
"recall": recall,
"f1": f1})
save_json(model, save_path + '/model.json')
save_weights(model, save_path + '/model.h5')
if __name__ == '__main__':
model_folders = glob('./model/saved_models/*')
for model_folder in model_folders:
models = sorted(glob(model_folder + '/*.hdf5'))
last_model = models[-1]
resave_model(last_model, model_folder)
model_name = model_folder[model_folder.rfind('/') + 1:]
print('Model {} resaved!'.format(model_name))
| true
| true
|
790d5c2e844e7a3211b9acadc48b08cf64d7732d
| 8,952
|
py
|
Python
|
tests/components/script/test_init.py
|
shanbs/home-assistant
|
818776d2b4f11e4f51992dc88bc0a6f9055833b2
|
[
"Apache-2.0"
] | 4
|
2019-01-10T14:47:54.000Z
|
2021-04-22T02:06:27.000Z
|
tests/components/script/test_init.py
|
shanbs/home-assistant
|
818776d2b4f11e4f51992dc88bc0a6f9055833b2
|
[
"Apache-2.0"
] | 6
|
2021-02-08T21:02:40.000Z
|
2022-03-12T00:52:16.000Z
|
tests/components/script/test_init.py
|
shanbs/home-assistant
|
818776d2b4f11e4f51992dc88bc0a6f9055833b2
|
[
"Apache-2.0"
] | 3
|
2019-04-28T16:35:45.000Z
|
2020-05-28T15:21:59.000Z
|
"""The tests for the Script component."""
# pylint: disable=protected-access
import unittest
from unittest.mock import patch, Mock
from homeassistant.components import script
from homeassistant.components.script import DOMAIN
from homeassistant.const import (
ATTR_ENTITY_ID, ATTR_NAME, SERVICE_RELOAD, SERVICE_TOGGLE,
SERVICE_TURN_OFF, SERVICE_TURN_ON, EVENT_SCRIPT_STARTED)
from homeassistant.core import Context, callback, split_entity_id
from homeassistant.loader import bind_hass
from homeassistant.setup import setup_component, async_setup_component
from tests.common import get_test_home_assistant
ENTITY_ID = 'script.test'
@bind_hass
def turn_on(hass, entity_id, variables=None, context=None):
"""Turn script on.
This is a legacy helper method. Do not use it for new tests.
"""
_, object_id = split_entity_id(entity_id)
hass.services.call(DOMAIN, object_id, variables, context=context)
@bind_hass
def turn_off(hass, entity_id):
"""Turn script on.
This is a legacy helper method. Do not use it for new tests.
"""
hass.services.call(DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: entity_id})
@bind_hass
def toggle(hass, entity_id):
"""Toggle the script.
This is a legacy helper method. Do not use it for new tests.
"""
hass.services.call(DOMAIN, SERVICE_TOGGLE, {ATTR_ENTITY_ID: entity_id})
@bind_hass
def reload(hass):
"""Reload script component.
This is a legacy helper method. Do not use it for new tests.
"""
hass.services.call(DOMAIN, SERVICE_RELOAD)
class TestScriptComponent(unittest.TestCase):
"""Test the Script component."""
# pylint: disable=invalid-name
def setUp(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
# pylint: disable=invalid-name
def tearDown(self):
"""Stop down everything that was started."""
self.hass.stop()
def test_setup_with_invalid_configs(self):
"""Test setup with invalid configs."""
for value in (
{'test': {}},
{
'test hello world': {
'sequence': [{'event': 'bla'}]
}
},
{
'test': {
'sequence': {
'event': 'test_event',
'service': 'homeassistant.turn_on',
}
}
},
):
assert not setup_component(self.hass, 'script', {
'script': value
}), 'Script loaded with wrong config {}'.format(value)
assert 0 == len(self.hass.states.entity_ids('script'))
def test_turn_on_service(self):
"""Verify that the turn_on service."""
event = 'test_event'
events = []
@callback
def record_event(event):
"""Add recorded event to set."""
events.append(event)
self.hass.bus.listen(event, record_event)
assert setup_component(self.hass, 'script', {
'script': {
'test': {
'sequence': [{
'delay': {
'seconds': 5
}
}, {
'event': event,
}]
}
}
})
turn_on(self.hass, ENTITY_ID)
self.hass.block_till_done()
assert script.is_on(self.hass, ENTITY_ID)
assert 0 == len(events)
# Calling turn_on a second time should not advance the script
turn_on(self.hass, ENTITY_ID)
self.hass.block_till_done()
assert 0 == len(events)
turn_off(self.hass, ENTITY_ID)
self.hass.block_till_done()
assert not script.is_on(self.hass, ENTITY_ID)
assert 0 == len(events)
state = self.hass.states.get('group.all_scripts')
assert state is not None
assert state.attributes.get('entity_id') == (ENTITY_ID,)
def test_toggle_service(self):
"""Test the toggling of a service."""
event = 'test_event'
events = []
@callback
def record_event(event):
"""Add recorded event to set."""
events.append(event)
self.hass.bus.listen(event, record_event)
assert setup_component(self.hass, 'script', {
'script': {
'test': {
'sequence': [{
'delay': {
'seconds': 5
}
}, {
'event': event,
}]
}
}
})
toggle(self.hass, ENTITY_ID)
self.hass.block_till_done()
assert script.is_on(self.hass, ENTITY_ID)
assert 0 == len(events)
toggle(self.hass, ENTITY_ID)
self.hass.block_till_done()
assert not script.is_on(self.hass, ENTITY_ID)
assert 0 == len(events)
def test_passing_variables(self):
"""Test different ways of passing in variables."""
calls = []
context = Context()
@callback
def record_call(service):
"""Add recorded event to set."""
calls.append(service)
self.hass.services.register('test', 'script', record_call)
assert setup_component(self.hass, 'script', {
'script': {
'test': {
'sequence': {
'service': 'test.script',
'data_template': {
'hello': '{{ greeting }}',
},
},
},
},
})
turn_on(self.hass, ENTITY_ID, {
'greeting': 'world'
}, context=context)
self.hass.block_till_done()
assert len(calls) == 1
assert calls[0].context is context
assert calls[0].data['hello'] == 'world'
self.hass.services.call('script', 'test', {
'greeting': 'universe',
}, context=context)
self.hass.block_till_done()
assert len(calls) == 2
assert calls[1].context is context
assert calls[1].data['hello'] == 'universe'
def test_reload_service(self):
"""Verify that the turn_on service."""
assert setup_component(self.hass, 'script', {
'script': {
'test': {
'sequence': [{
'delay': {
'seconds': 5
}
}]
}
}
})
assert self.hass.states.get(ENTITY_ID) is not None
assert self.hass.services.has_service(script.DOMAIN, 'test')
with patch('homeassistant.config.load_yaml_config_file', return_value={
'script': {
'test2': {
'sequence': [{
'delay': {
'seconds': 5
}
}]
}}}):
with patch('homeassistant.config.find_config_file',
return_value=''):
reload(self.hass)
self.hass.block_till_done()
assert self.hass.states.get(ENTITY_ID) is None
assert not self.hass.services.has_service(script.DOMAIN, 'test')
assert self.hass.states.get("script.test2") is not None
assert self.hass.services.has_service(script.DOMAIN, 'test2')
async def test_shared_context(hass):
"""Test that the shared context is passed down the chain."""
event = 'test_event'
context = Context()
event_mock = Mock()
run_mock = Mock()
hass.bus.async_listen(event, event_mock)
hass.bus.async_listen(EVENT_SCRIPT_STARTED, run_mock)
assert await async_setup_component(hass, 'script', {
'script': {
'test': {
'sequence': [
{'event': event}
]
}
}
})
await hass.services.async_call(DOMAIN, SERVICE_TURN_ON,
{ATTR_ENTITY_ID: ENTITY_ID},
context=context)
await hass.async_block_till_done()
assert event_mock.call_count == 1
assert run_mock.call_count == 1
args, kwargs = run_mock.call_args
assert args[0].context == context
# Ensure event data has all attributes set
assert args[0].data.get(ATTR_NAME) == 'test'
assert args[0].data.get(ATTR_ENTITY_ID) == 'script.test'
# Ensure context carries through the event
args, kwargs = event_mock.call_args
assert args[0].context == context
# Ensure the script state shares the same context
state = hass.states.get('script.test')
assert state is not None
assert state.context == context
| 29.544554
| 79
| 0.535188
|
import unittest
from unittest.mock import patch, Mock
from homeassistant.components import script
from homeassistant.components.script import DOMAIN
from homeassistant.const import (
ATTR_ENTITY_ID, ATTR_NAME, SERVICE_RELOAD, SERVICE_TOGGLE,
SERVICE_TURN_OFF, SERVICE_TURN_ON, EVENT_SCRIPT_STARTED)
from homeassistant.core import Context, callback, split_entity_id
from homeassistant.loader import bind_hass
from homeassistant.setup import setup_component, async_setup_component
from tests.common import get_test_home_assistant
ENTITY_ID = 'script.test'
@bind_hass
def turn_on(hass, entity_id, variables=None, context=None):
_, object_id = split_entity_id(entity_id)
hass.services.call(DOMAIN, object_id, variables, context=context)
@bind_hass
def turn_off(hass, entity_id):
hass.services.call(DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: entity_id})
@bind_hass
def toggle(hass, entity_id):
hass.services.call(DOMAIN, SERVICE_TOGGLE, {ATTR_ENTITY_ID: entity_id})
@bind_hass
def reload(hass):
hass.services.call(DOMAIN, SERVICE_RELOAD)
class TestScriptComponent(unittest.TestCase):
def setUp(self):
self.hass = get_test_home_assistant()
def tearDown(self):
self.hass.stop()
def test_setup_with_invalid_configs(self):
for value in (
{'test': {}},
{
'test hello world': {
'sequence': [{'event': 'bla'}]
}
},
{
'test': {
'sequence': {
'event': 'test_event',
'service': 'homeassistant.turn_on',
}
}
},
):
assert not setup_component(self.hass, 'script', {
'script': value
}), 'Script loaded with wrong config {}'.format(value)
assert 0 == len(self.hass.states.entity_ids('script'))
def test_turn_on_service(self):
event = 'test_event'
events = []
@callback
def record_event(event):
events.append(event)
self.hass.bus.listen(event, record_event)
assert setup_component(self.hass, 'script', {
'script': {
'test': {
'sequence': [{
'delay': {
'seconds': 5
}
}, {
'event': event,
}]
}
}
})
turn_on(self.hass, ENTITY_ID)
self.hass.block_till_done()
assert script.is_on(self.hass, ENTITY_ID)
assert 0 == len(events)
turn_on(self.hass, ENTITY_ID)
self.hass.block_till_done()
assert 0 == len(events)
turn_off(self.hass, ENTITY_ID)
self.hass.block_till_done()
assert not script.is_on(self.hass, ENTITY_ID)
assert 0 == len(events)
state = self.hass.states.get('group.all_scripts')
assert state is not None
assert state.attributes.get('entity_id') == (ENTITY_ID,)
def test_toggle_service(self):
event = 'test_event'
events = []
@callback
def record_event(event):
events.append(event)
self.hass.bus.listen(event, record_event)
assert setup_component(self.hass, 'script', {
'script': {
'test': {
'sequence': [{
'delay': {
'seconds': 5
}
}, {
'event': event,
}]
}
}
})
toggle(self.hass, ENTITY_ID)
self.hass.block_till_done()
assert script.is_on(self.hass, ENTITY_ID)
assert 0 == len(events)
toggle(self.hass, ENTITY_ID)
self.hass.block_till_done()
assert not script.is_on(self.hass, ENTITY_ID)
assert 0 == len(events)
def test_passing_variables(self):
calls = []
context = Context()
@callback
def record_call(service):
calls.append(service)
self.hass.services.register('test', 'script', record_call)
assert setup_component(self.hass, 'script', {
'script': {
'test': {
'sequence': {
'service': 'test.script',
'data_template': {
'hello': '{{ greeting }}',
},
},
},
},
})
turn_on(self.hass, ENTITY_ID, {
'greeting': 'world'
}, context=context)
self.hass.block_till_done()
assert len(calls) == 1
assert calls[0].context is context
assert calls[0].data['hello'] == 'world'
self.hass.services.call('script', 'test', {
'greeting': 'universe',
}, context=context)
self.hass.block_till_done()
assert len(calls) == 2
assert calls[1].context is context
assert calls[1].data['hello'] == 'universe'
def test_reload_service(self):
assert setup_component(self.hass, 'script', {
'script': {
'test': {
'sequence': [{
'delay': {
'seconds': 5
}
}]
}
}
})
assert self.hass.states.get(ENTITY_ID) is not None
assert self.hass.services.has_service(script.DOMAIN, 'test')
with patch('homeassistant.config.load_yaml_config_file', return_value={
'script': {
'test2': {
'sequence': [{
'delay': {
'seconds': 5
}
}]
}}}):
with patch('homeassistant.config.find_config_file',
return_value=''):
reload(self.hass)
self.hass.block_till_done()
assert self.hass.states.get(ENTITY_ID) is None
assert not self.hass.services.has_service(script.DOMAIN, 'test')
assert self.hass.states.get("script.test2") is not None
assert self.hass.services.has_service(script.DOMAIN, 'test2')
async def test_shared_context(hass):
event = 'test_event'
context = Context()
event_mock = Mock()
run_mock = Mock()
hass.bus.async_listen(event, event_mock)
hass.bus.async_listen(EVENT_SCRIPT_STARTED, run_mock)
assert await async_setup_component(hass, 'script', {
'script': {
'test': {
'sequence': [
{'event': event}
]
}
}
})
await hass.services.async_call(DOMAIN, SERVICE_TURN_ON,
{ATTR_ENTITY_ID: ENTITY_ID},
context=context)
await hass.async_block_till_done()
assert event_mock.call_count == 1
assert run_mock.call_count == 1
args, kwargs = run_mock.call_args
assert args[0].context == context
assert args[0].data.get(ATTR_NAME) == 'test'
assert args[0].data.get(ATTR_ENTITY_ID) == 'script.test'
args, kwargs = event_mock.call_args
assert args[0].context == context
state = hass.states.get('script.test')
assert state is not None
assert state.context == context
| true
| true
|
790d5c7a78474a22a866df13bf200475baa77d34
| 13,928
|
py
|
Python
|
tests/contrib/psycopg/test_psycopg.py
|
p7g/dd-trace-py
|
141ac0ab6e9962e3b3bafc9de172076075289a19
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/contrib/psycopg/test_psycopg.py
|
p7g/dd-trace-py
|
141ac0ab6e9962e3b3bafc9de172076075289a19
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 3
|
2022-02-16T09:35:37.000Z
|
2022-03-04T16:48:45.000Z
|
tests/contrib/psycopg/test_psycopg.py
|
p7g/dd-trace-py
|
141ac0ab6e9962e3b3bafc9de172076075289a19
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 1
|
2022-02-11T16:34:22.000Z
|
2022-02-11T16:34:22.000Z
|
# stdlib
import time
from unittest import skipIf
# 3p
import psycopg2
from psycopg2 import extensions
from psycopg2 import extras
from ddtrace import Pin
from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY
from ddtrace.contrib.psycopg.patch import PSYCOPG2_VERSION
from ddtrace.contrib.psycopg.patch import patch
from ddtrace.contrib.psycopg.patch import unpatch
from tests.contrib.config import POSTGRES_CONFIG
from tests.opentracer.utils import init_tracer
from tests.utils import TracerTestCase
from tests.utils import assert_is_measured
from tests.utils import snapshot
if PSYCOPG2_VERSION >= (2, 7):
from psycopg2.sql import Identifier
from psycopg2.sql import Literal
from psycopg2.sql import SQL
TEST_PORT = POSTGRES_CONFIG["port"]
class PsycopgCore(TracerTestCase):
# default service
TEST_SERVICE = "postgres"
def setUp(self):
super(PsycopgCore, self).setUp()
patch()
def tearDown(self):
super(PsycopgCore, self).tearDown()
unpatch()
def _get_conn(self, service=None):
conn = psycopg2.connect(**POSTGRES_CONFIG)
pin = Pin.get_from(conn)
if pin:
pin.clone(service=service, tracer=self.tracer).onto(conn)
return conn
def test_patch_unpatch(self):
# Test patch idempotence
patch()
patch()
service = "fo"
conn = self._get_conn(service=service)
conn.cursor().execute("""select 'blah'""")
self.assert_structure(dict(name="postgres.query", service=service))
self.reset()
# Test unpatch
unpatch()
conn = self._get_conn()
conn.cursor().execute("""select 'blah'""")
self.assert_has_no_spans()
# Test patch again
patch()
conn = self._get_conn(service=service)
conn.cursor().execute("""select 'blah'""")
self.assert_structure(dict(name="postgres.query", service=service))
def assert_conn_is_traced(self, db, service):
# ensure the trace pscyopg client doesn't add non-standard
# methods
try:
db.execute("""select 'foobar'""")
except AttributeError:
pass
# Ensure we can run a query and it's correctly traced
q = """select 'foobarblah'"""
start = time.time()
cursor = db.cursor()
res = cursor.execute(q)
self.assertIsNone(res)
rows = cursor.fetchall()
end = time.time()
self.assertEquals(rows, [("foobarblah",)])
self.assert_structure(
dict(name="postgres.query", resource=q, service=service, error=0, span_type="sql"),
)
root = self.get_root_span()
self.assertIsNone(root.get_tag("sql.query"))
assert start <= root.start <= end
assert root.duration <= end - start
# confirm analytics disabled by default
self.reset()
# run a query with an error and ensure all is well
q = """select * from some_non_existant_table"""
cur = db.cursor()
try:
cur.execute(q)
except Exception:
pass
else:
assert 0, "should have an error"
self.assert_structure(
dict(
name="postgres.query",
resource=q,
service=service,
error=1,
span_type="sql",
meta={
"out.host": "127.0.0.1",
},
metrics={
"out.port": TEST_PORT,
},
),
)
root = self.get_root_span()
assert_is_measured(root)
self.assertIsNone(root.get_tag("sql.query"))
self.reset()
def test_opentracing_propagation(self):
# ensure OpenTracing plays well with our integration
query = """SELECT 'tracing'"""
db = self._get_conn()
ot_tracer = init_tracer("psycopg-svc", self.tracer)
with ot_tracer.start_active_span("db.access"):
cursor = db.cursor()
cursor.execute(query)
rows = cursor.fetchall()
self.assertEquals(rows, [("tracing",)])
self.assert_structure(
dict(name="db.access", service="psycopg-svc"),
(dict(name="postgres.query", resource=query, service="postgres", error=0, span_type="sql"),),
)
assert_is_measured(self.get_spans()[1])
self.reset()
with self.override_config("psycopg", dict(trace_fetch_methods=True)):
db = self._get_conn()
ot_tracer = init_tracer("psycopg-svc", self.tracer)
with ot_tracer.start_active_span("db.access"):
cursor = db.cursor()
cursor.execute(query)
rows = cursor.fetchall()
self.assertEquals(rows, [("tracing",)])
self.assert_structure(
dict(name="db.access", service="psycopg-svc"),
(
dict(name="postgres.query", resource=query, service="postgres", error=0, span_type="sql"),
dict(name="postgres.query.fetchall", resource=query, service="postgres", error=0, span_type="sql"),
),
)
assert_is_measured(self.get_spans()[1])
@skipIf(PSYCOPG2_VERSION < (2, 5), "context manager not available in psycopg2==2.4")
def test_cursor_ctx_manager(self):
# ensure cursors work with context managers
# https://github.com/DataDog/dd-trace-py/issues/228
conn = self._get_conn()
t = type(conn.cursor())
with conn.cursor() as cur:
assert t == type(cur), "{} != {}".format(t, type(cur))
cur.execute(query="""select 'blah'""")
rows = cur.fetchall()
assert len(rows) == 1, rows
assert rows[0][0] == "blah"
assert_is_measured(self.get_root_span())
self.assert_structure(
dict(name="postgres.query"),
)
def test_disabled_execute(self):
conn = self._get_conn()
self.tracer.enabled = False
# these calls were crashing with a previous version of the code.
conn.cursor().execute(query="""select 'blah'""")
conn.cursor().execute("""select 'blah'""")
self.assert_has_no_spans()
@skipIf(PSYCOPG2_VERSION < (2, 5), "_json is not available in psycopg2==2.4")
def test_manual_wrap_extension_types(self):
conn = self._get_conn()
# NOTE: this will crash if it doesn't work.
# _ext.register_type(_ext.UUID, conn_or_curs)
# TypeError: argument 2 must be a connection, cursor or None
extras.register_uuid(conn_or_curs=conn)
# NOTE: this will crash if it doesn't work.
# _ext.register_default_json(conn)
# TypeError: argument 2 must be a connection, cursor or None
extras.register_default_json(conn)
def test_manual_wrap_extension_adapt(self):
conn = self._get_conn()
# NOTE: this will crash if it doesn't work.
# items = _ext.adapt([1, 2, 3])
# items.prepare(conn)
# TypeError: argument 2 must be a connection, cursor or None
items = extensions.adapt([1, 2, 3])
items.prepare(conn)
# NOTE: this will crash if it doesn't work.
# binary = _ext.adapt(b'12345)
# binary.prepare(conn)
# TypeError: argument 2 must be a connection, cursor or None
binary = extensions.adapt(b"12345")
binary.prepare(conn)
@skipIf(PSYCOPG2_VERSION < (2, 7), "quote_ident not available in psycopg2<2.7")
def test_manual_wrap_extension_quote_ident(self):
from ddtrace import patch_all
patch_all()
from psycopg2.extensions import quote_ident
# NOTE: this will crash if it doesn't work.
# TypeError: argument 2 must be a connection or a cursor
conn = psycopg2.connect(**POSTGRES_CONFIG)
quote_ident("foo", conn)
def test_connect_factory(self):
services = ["db", "another"]
for service in services:
conn = self._get_conn(service=service)
self.assert_conn_is_traced(conn, service)
def test_commit(self):
conn = self._get_conn()
conn.commit()
self.assert_structure(dict(name="postgres.connection.commit", service=self.TEST_SERVICE))
def test_rollback(self):
conn = self._get_conn()
conn.rollback()
self.assert_structure(dict(name="postgres.connection.rollback", service=self.TEST_SERVICE))
@skipIf(PSYCOPG2_VERSION < (2, 7), "SQL string composition not available in psycopg2<2.7")
def test_composed_query(self):
"""Checks whether execution of composed SQL string is traced"""
query = SQL(" union all ").join(
[SQL("""select {} as x""").format(Literal("one")), SQL("""select {} as x""").format(Literal("two"))]
)
db = self._get_conn()
with db.cursor() as cur:
cur.execute(query=query)
rows = cur.fetchall()
assert len(rows) == 2, rows
assert rows[0][0] == "one"
assert rows[1][0] == "two"
assert_is_measured(self.get_root_span())
self.assert_structure(
dict(name="postgres.query", resource=query.as_string(db)),
)
@skipIf(PSYCOPG2_VERSION < (2, 7), "SQL string composition not available in psycopg2<2.7")
def test_composed_query_identifier(self):
"""Checks whether execution of composed SQL string is traced"""
db = self._get_conn()
with db.cursor() as cur:
# DEV: Use a temp table so it is removed after this session
cur.execute("CREATE TEMP TABLE test (id serial PRIMARY KEY, name varchar(12) NOT NULL UNIQUE);")
cur.execute("INSERT INTO test (name) VALUES (%s);", ("test_case",))
spans = self.get_spans()
assert len(spans) == 2
self.reset()
query = SQL("""select {}, {} from {}""").format(Identifier("id"), Identifier("name"), Identifier("test"))
cur.execute(query=query)
rows = cur.fetchall()
assert rows == [(1, "test_case")]
assert_is_measured(self.get_root_span())
self.assert_structure(
dict(name="postgres.query", resource=query.as_string(db)),
)
@snapshot()
@skipIf(PSYCOPG2_VERSION < (2, 7), "SQL string composition not available in psycopg2<2.7")
def test_composed_query_encoding(self):
"""Checks whether execution of composed SQL string is traced"""
import logging
logger = logging.getLogger()
logger.level = logging.DEBUG
query = SQL(" union all ").join([SQL("""select 'one' as x"""), SQL("""select 'two' as x""")])
conn = psycopg2.connect(**POSTGRES_CONFIG)
with conn.cursor() as cur:
cur.execute(query=query)
rows = cur.fetchall()
assert len(rows) == 2, rows
assert rows[0][0] == "one"
assert rows[1][0] == "two"
def test_analytics_default(self):
conn = self._get_conn()
conn.cursor().execute("""select 'blah'""")
spans = self.get_spans()
self.assertEqual(len(spans), 1)
span = spans[0]
self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY))
def test_analytics_with_rate(self):
with self.override_config("psycopg", dict(analytics_enabled=True, analytics_sample_rate=0.5)):
conn = self._get_conn()
conn.cursor().execute("""select 'blah'""")
spans = self.get_spans()
self.assertEqual(len(spans), 1)
span = spans[0]
self.assertEqual(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY), 0.5)
def test_analytics_without_rate(self):
with self.override_config("psycopg", dict(analytics_enabled=True)):
conn = self._get_conn()
conn.cursor().execute("""select 'blah'""")
spans = self.get_spans()
self.assertEqual(len(spans), 1)
span = spans[0]
self.assertEqual(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY), 1.0)
@TracerTestCase.run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc"))
def test_user_specified_app_service(self):
"""
When a user specifies a service for the app
The psycopg integration should not use it.
"""
# Ensure that the service name was configured
from ddtrace import config
assert config.service == "mysvc"
conn = self._get_conn()
conn.cursor().execute("""select 'blah'""")
spans = self.get_spans()
self.assertEqual(len(spans), 1)
assert spans[0].service != "mysvc"
@TracerTestCase.run_in_subprocess(env_overrides=dict(DD_PSYCOPG_SERVICE="mysvc"))
def test_user_specified_service(self):
conn = self._get_conn()
conn.cursor().execute("""select 'blah'""")
spans = self.get_spans()
self.assertEqual(len(spans), 1)
assert spans[0].service == "mysvc"
@skipIf(PSYCOPG2_VERSION < (2, 5), "Connection context managers not defined in <2.5.")
def test_contextmanager_connection(self):
service = "fo"
with self._get_conn(service=service) as conn:
conn.cursor().execute("""select 'blah'""")
self.assert_structure(dict(name="postgres.query", service=service))
@skipIf(PSYCOPG2_VERSION < (2, 7), "quote_ident not available in psycopg2<2.7")
def test_manual_wrap_extension_quote_ident_standalone():
from ddtrace import patch_all
patch_all()
from psycopg2.extensions import quote_ident
# NOTE: this will crash if it doesn't work.
# TypeError: argument 2 must be a connection or a cursor
conn = psycopg2.connect(**POSTGRES_CONFIG)
quote_ident("foo", conn)
| 34.733167
| 119
| 0.603389
|
import time
from unittest import skipIf
import psycopg2
from psycopg2 import extensions
from psycopg2 import extras
from ddtrace import Pin
from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY
from ddtrace.contrib.psycopg.patch import PSYCOPG2_VERSION
from ddtrace.contrib.psycopg.patch import patch
from ddtrace.contrib.psycopg.patch import unpatch
from tests.contrib.config import POSTGRES_CONFIG
from tests.opentracer.utils import init_tracer
from tests.utils import TracerTestCase
from tests.utils import assert_is_measured
from tests.utils import snapshot
if PSYCOPG2_VERSION >= (2, 7):
from psycopg2.sql import Identifier
from psycopg2.sql import Literal
from psycopg2.sql import SQL
TEST_PORT = POSTGRES_CONFIG["port"]
class PsycopgCore(TracerTestCase):
TEST_SERVICE = "postgres"
def setUp(self):
super(PsycopgCore, self).setUp()
patch()
def tearDown(self):
super(PsycopgCore, self).tearDown()
unpatch()
def _get_conn(self, service=None):
conn = psycopg2.connect(**POSTGRES_CONFIG)
pin = Pin.get_from(conn)
if pin:
pin.clone(service=service, tracer=self.tracer).onto(conn)
return conn
def test_patch_unpatch(self):
patch()
patch()
service = "fo"
conn = self._get_conn(service=service)
conn.cursor().execute("""select 'blah'""")
self.assert_structure(dict(name="postgres.query", service=service))
self.reset()
unpatch()
conn = self._get_conn()
conn.cursor().execute("""select 'blah'""")
self.assert_has_no_spans()
patch()
conn = self._get_conn(service=service)
conn.cursor().execute("""select 'blah'""")
self.assert_structure(dict(name="postgres.query", service=service))
def assert_conn_is_traced(self, db, service):
# methods
try:
db.execute("""select 'foobar'""")
except AttributeError:
pass
# Ensure we can run a query and it's correctly traced
q = """select 'foobarblah'"""
start = time.time()
cursor = db.cursor()
res = cursor.execute(q)
self.assertIsNone(res)
rows = cursor.fetchall()
end = time.time()
self.assertEquals(rows, [("foobarblah",)])
self.assert_structure(
dict(name="postgres.query", resource=q, service=service, error=0, span_type="sql"),
)
root = self.get_root_span()
self.assertIsNone(root.get_tag("sql.query"))
assert start <= root.start <= end
assert root.duration <= end - start
self.reset()
q = """select * from some_non_existant_table"""
cur = db.cursor()
try:
cur.execute(q)
except Exception:
pass
else:
assert 0, "should have an error"
self.assert_structure(
dict(
name="postgres.query",
resource=q,
service=service,
error=1,
span_type="sql",
meta={
"out.host": "127.0.0.1",
},
metrics={
"out.port": TEST_PORT,
},
),
)
root = self.get_root_span()
assert_is_measured(root)
self.assertIsNone(root.get_tag("sql.query"))
self.reset()
def test_opentracing_propagation(self):
query = """SELECT 'tracing'"""
db = self._get_conn()
ot_tracer = init_tracer("psycopg-svc", self.tracer)
with ot_tracer.start_active_span("db.access"):
cursor = db.cursor()
cursor.execute(query)
rows = cursor.fetchall()
self.assertEquals(rows, [("tracing",)])
self.assert_structure(
dict(name="db.access", service="psycopg-svc"),
(dict(name="postgres.query", resource=query, service="postgres", error=0, span_type="sql"),),
)
assert_is_measured(self.get_spans()[1])
self.reset()
with self.override_config("psycopg", dict(trace_fetch_methods=True)):
db = self._get_conn()
ot_tracer = init_tracer("psycopg-svc", self.tracer)
with ot_tracer.start_active_span("db.access"):
cursor = db.cursor()
cursor.execute(query)
rows = cursor.fetchall()
self.assertEquals(rows, [("tracing",)])
self.assert_structure(
dict(name="db.access", service="psycopg-svc"),
(
dict(name="postgres.query", resource=query, service="postgres", error=0, span_type="sql"),
dict(name="postgres.query.fetchall", resource=query, service="postgres", error=0, span_type="sql"),
),
)
assert_is_measured(self.get_spans()[1])
@skipIf(PSYCOPG2_VERSION < (2, 5), "context manager not available in psycopg2==2.4")
def test_cursor_ctx_manager(self):
conn = self._get_conn()
t = type(conn.cursor())
with conn.cursor() as cur:
assert t == type(cur), "{} != {}".format(t, type(cur))
cur.execute(query="""select 'blah'""")
rows = cur.fetchall()
assert len(rows) == 1, rows
assert rows[0][0] == "blah"
assert_is_measured(self.get_root_span())
self.assert_structure(
dict(name="postgres.query"),
)
def test_disabled_execute(self):
conn = self._get_conn()
self.tracer.enabled = False
conn.cursor().execute(query="""select 'blah'""")
conn.cursor().execute("""select 'blah'""")
self.assert_has_no_spans()
@skipIf(PSYCOPG2_VERSION < (2, 5), "_json is not available in psycopg2==2.4")
def test_manual_wrap_extension_types(self):
conn = self._get_conn()
# _ext.register_type(_ext.UUID, conn_or_curs)
# TypeError: argument 2 must be a connection, cursor or None
extras.register_uuid(conn_or_curs=conn)
# NOTE: this will crash if it doesn't work.
extras.register_default_json(conn)
def test_manual_wrap_extension_adapt(self):
conn = self._get_conn()
# items = _ext.adapt([1, 2, 3])
# items.prepare(conn)
# TypeError: argument 2 must be a connection, cursor or None
items = extensions.adapt([1, 2, 3])
items.prepare(conn)
# NOTE: this will crash if it doesn't work.
# binary.prepare(conn)
# TypeError: argument 2 must be a connection, cursor or None
binary = extensions.adapt(b"12345")
binary.prepare(conn)
@skipIf(PSYCOPG2_VERSION < (2, 7), "quote_ident not available in psycopg2<2.7")
def test_manual_wrap_extension_quote_ident(self):
from ddtrace import patch_all
patch_all()
from psycopg2.extensions import quote_ident
# NOTE: this will crash if it doesn't work.
conn = psycopg2.connect(**POSTGRES_CONFIG)
quote_ident("foo", conn)
def test_connect_factory(self):
services = ["db", "another"]
for service in services:
conn = self._get_conn(service=service)
self.assert_conn_is_traced(conn, service)
def test_commit(self):
conn = self._get_conn()
conn.commit()
self.assert_structure(dict(name="postgres.connection.commit", service=self.TEST_SERVICE))
def test_rollback(self):
conn = self._get_conn()
conn.rollback()
self.assert_structure(dict(name="postgres.connection.rollback", service=self.TEST_SERVICE))
@skipIf(PSYCOPG2_VERSION < (2, 7), "SQL string composition not available in psycopg2<2.7")
def test_composed_query(self):
query = SQL(" union all ").join(
[SQL("""select {} as x""").format(Literal("one")), SQL("""select {} as x""").format(Literal("two"))]
)
db = self._get_conn()
with db.cursor() as cur:
cur.execute(query=query)
rows = cur.fetchall()
assert len(rows) == 2, rows
assert rows[0][0] == "one"
assert rows[1][0] == "two"
assert_is_measured(self.get_root_span())
self.assert_structure(
dict(name="postgres.query", resource=query.as_string(db)),
)
@skipIf(PSYCOPG2_VERSION < (2, 7), "SQL string composition not available in psycopg2<2.7")
def test_composed_query_identifier(self):
db = self._get_conn()
with db.cursor() as cur:
cur.execute("CREATE TEMP TABLE test (id serial PRIMARY KEY, name varchar(12) NOT NULL UNIQUE);")
cur.execute("INSERT INTO test (name) VALUES (%s);", ("test_case",))
spans = self.get_spans()
assert len(spans) == 2
self.reset()
query = SQL("""select {}, {} from {}""").format(Identifier("id"), Identifier("name"), Identifier("test"))
cur.execute(query=query)
rows = cur.fetchall()
assert rows == [(1, "test_case")]
assert_is_measured(self.get_root_span())
self.assert_structure(
dict(name="postgres.query", resource=query.as_string(db)),
)
@snapshot()
@skipIf(PSYCOPG2_VERSION < (2, 7), "SQL string composition not available in psycopg2<2.7")
def test_composed_query_encoding(self):
import logging
logger = logging.getLogger()
logger.level = logging.DEBUG
query = SQL(" union all ").join([SQL("""select 'one' as x"""), SQL("""select 'two' as x""")])
conn = psycopg2.connect(**POSTGRES_CONFIG)
with conn.cursor() as cur:
cur.execute(query=query)
rows = cur.fetchall()
assert len(rows) == 2, rows
assert rows[0][0] == "one"
assert rows[1][0] == "two"
def test_analytics_default(self):
conn = self._get_conn()
conn.cursor().execute("""select 'blah'""")
spans = self.get_spans()
self.assertEqual(len(spans), 1)
span = spans[0]
self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY))
def test_analytics_with_rate(self):
with self.override_config("psycopg", dict(analytics_enabled=True, analytics_sample_rate=0.5)):
conn = self._get_conn()
conn.cursor().execute("""select 'blah'""")
spans = self.get_spans()
self.assertEqual(len(spans), 1)
span = spans[0]
self.assertEqual(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY), 0.5)
def test_analytics_without_rate(self):
with self.override_config("psycopg", dict(analytics_enabled=True)):
conn = self._get_conn()
conn.cursor().execute("""select 'blah'""")
spans = self.get_spans()
self.assertEqual(len(spans), 1)
span = spans[0]
self.assertEqual(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY), 1.0)
@TracerTestCase.run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc"))
def test_user_specified_app_service(self):
from ddtrace import config
assert config.service == "mysvc"
conn = self._get_conn()
conn.cursor().execute("""select 'blah'""")
spans = self.get_spans()
self.assertEqual(len(spans), 1)
assert spans[0].service != "mysvc"
@TracerTestCase.run_in_subprocess(env_overrides=dict(DD_PSYCOPG_SERVICE="mysvc"))
def test_user_specified_service(self):
conn = self._get_conn()
conn.cursor().execute("""select 'blah'""")
spans = self.get_spans()
self.assertEqual(len(spans), 1)
assert spans[0].service == "mysvc"
@skipIf(PSYCOPG2_VERSION < (2, 5), "Connection context managers not defined in <2.5.")
def test_contextmanager_connection(self):
service = "fo"
with self._get_conn(service=service) as conn:
conn.cursor().execute("""select 'blah'""")
self.assert_structure(dict(name="postgres.query", service=service))
@skipIf(PSYCOPG2_VERSION < (2, 7), "quote_ident not available in psycopg2<2.7")
def test_manual_wrap_extension_quote_ident_standalone():
from ddtrace import patch_all
patch_all()
from psycopg2.extensions import quote_ident
# TypeError: argument 2 must be a connection or a cursor
conn = psycopg2.connect(**POSTGRES_CONFIG)
quote_ident("foo", conn)
| true
| true
|
790d5c7b4e13d0e262feacd50602645e6abc25e3
| 403
|
py
|
Python
|
exceptions.py
|
Adriantsh/astr-119
|
e4ffd18f62d47a06a89732294cdd425fe487a8b0
|
[
"MIT"
] | null | null | null |
exceptions.py
|
Adriantsh/astr-119
|
e4ffd18f62d47a06a89732294cdd425fe487a8b0
|
[
"MIT"
] | 3
|
2020-10-08T04:18:57.000Z
|
2020-10-08T23:05:59.000Z
|
exceptions.py
|
Adriantsh/astr-119
|
e4ffd18f62d47a06a89732294cdd425fe487a8b0
|
[
"MIT"
] | null | null | null |
#python exceptions let you deal with
#unexpected results
try:
print(a) #this will throw an exception since a is not found
except:
print("a is not defined!")
#there are specific errors in python
try:
print(a) #this will throw a NameError
except NameError:
print("a is still not defined")
except:
print("Something else went wrong.")
#this will break our program
#since a is not defined
print(a)
| 21.210526
| 61
| 0.744417
|
try:
print(a)
except:
print("a is not defined!")
try:
print(a)
except NameError:
print("a is still not defined")
except:
print("Something else went wrong.")
print(a)
| true
| true
|
790d5d1d5ad05b3877b8430984bb1e4af05d3d0e
| 8,799
|
py
|
Python
|
FWCore/GuiBrowsers/python/Vispa/Plugins/EdmBrowser/ParticleDataList.py
|
NTrevisani/cmssw
|
a212a27526f34eb9507cf8b875c93896e6544781
|
[
"Apache-2.0"
] | 3
|
2018-08-24T19:10:26.000Z
|
2019-02-19T11:45:32.000Z
|
FWCore/GuiBrowsers/python/Vispa/Plugins/EdmBrowser/ParticleDataList.py
|
NTrevisani/cmssw
|
a212a27526f34eb9507cf8b875c93896e6544781
|
[
"Apache-2.0"
] | 7
|
2016-07-17T02:34:54.000Z
|
2019-08-13T07:58:37.000Z
|
FWCore/GuiBrowsers/python/Vispa/Plugins/EdmBrowser/ParticleDataList.py
|
NTrevisani/cmssw
|
a212a27526f34eb9507cf8b875c93896e6544781
|
[
"Apache-2.0"
] | 5
|
2018-08-21T16:37:52.000Z
|
2020-01-09T13:33:17.000Z
|
class ParticleData(object):
""" Class for holding particle data such as charge.
"""
def __init__(self, charge=0):
self.charge=charge
def __repr__(self):
return "charge="+str(self.charge)
class ParticleDataList(object):
""" Class for generic handling particle ids, names and properties.
Multiple ids can be mapped to multiple names of particle.
First name/id in the list is the default name. But additional names/ids can be given.
An examples can be found in the defaultParticleDataList.
"""
def __init__(self, list=None):
""" A list of particle ids and names can be given to the constructor.
"""
self._list = []
if list != None:
self._list = list
def setList(self, list):
self._list = list
def getList(self):
return self._list
def addParticle(self, ids, names, particleData):
""" Add a paricle with (multiple) ids and names to the list.
"""
if not (isinstance(ids,list) and isinstance(names,list)):
raise TypeError("addParticle needs to lists as input: e.g. [1,-1],['d','dbar']")
self._list += [(ids, names, particleData)]
def getDefaultName(self, name):
""" Return the default (first in list) name given any of the particle's names.
"""
for items in self._list:
if name in items[1]:
return items[1][0]
return name
def getDefaultId(self, id):
""" Return the default (first in list) id given any of the particle's ids.
"""
for items in self._list:
if id in items[0]:
return items[0][0]
return id
def getIdFromName(self, name):
""" Return the default (first in list) id given any of the particle's names.
"""
for items in self._list:
if name in items[1]:
return items[0][0]
return 0
def getNameFromId(self, id):
""" Return the default (first in list) name given any of the particle's ids.
"""
for items in self._list:
if id in items[0]:
return items[1][0]
return "unknown"
def getParticleDataFromId(self, id):
for items in self._list:
if id in items[0]:
return items[2]
def isQuarkId(self, id):
return abs(id) in [1, 2, 3, 4, 5, 6]
def isLeptonId(self, id):
return abs(id) in [11, 12, 13, 14, 15, 16]
def isGluonId(self, id):
return abs(id) in [21, 9]
def isBosonId(self, id):
return abs(id) in [21, 9, 22, 23, 24, 25, 32, 33, 34, 35, 36, 37]
def isPhotonId(self, id):
return id == 22
def isHiggsId(self, id):
return abs(id) in [25, 35, 36, 37]
def isSusyId(self, id):
return abs(id) in [1000001, 1000002, 1000003, 1000004, 1000005, 1000006, 1000011, 1000012, 1000013, 1000014, 1000015, 1000016, 2000001, 2000002, 2000003, 2000004, 2000005, 2000006, 2000011, 2000013, 1000021, 1000022, 1000023, 1000024, 1000025, 1000035, 1000037, 1000039]
defaultQuarkDataList = ParticleDataList([
([1, - 1], ["d", "d_quark", "dbar"], ParticleData(1.0/3.0)),
([2, - 2], ["u", "u_quark", "ubar"], ParticleData(2.0/3.0)),
([3, - 3], ["s", "s_quark", "sbar"], ParticleData(1.0/3.0)),
([4, - 4], ["c", "c_quark", "cbar"], ParticleData(2.0/3.0)),
([5, - 5], ["b", "b_quark", "bbar"], ParticleData(1.0/3.0)),
([6, - 6], ["t", "t_quark", "tbar"], ParticleData(2.0/3.0))
])
defaultLeptonDataList = ParticleDataList([
([11, - 11], ["e","electron", "Electron", "e+", "e-"], ParticleData(1)),
([12, - 12], ["nu_e", "Electron_neutrino", "electron_neutrino", "nu_electron"], ParticleData(0)),
([13, - 13], ["mu", "Muon", "muon", "mu+", "mu-"], ParticleData(1)),
([14, - 14], ["nu_mu", "nu_muon", "Muon_neutrino", "muon_neutrino"], ParticleData(0)),
([15, - 15], ["tau", "Tau", "tau+", "tau-"], ParticleData(1)),
([16, - 16], ["nu_tau", "Tau_neutrino", "tau_neutrino"], ParticleData(0))
])
defaultBosonDataList = ParticleDataList([
([21, 9], ["g", "Gluon", "gluon"], ParticleData(0)),
([22], ["gamma", "Photon", "photon"], ParticleData(0)),
([23], ["Z", "Z_boson"], ParticleData(0)),
([24, - 24], ["W", "W_boson", "W+", "W-"], ParticleData(1)),
([25], ["h", "Higgs_boson", "Higgs", "higgs_boson"], ParticleData(0))
])
defaultHadronDataList = ParticleDataList([
([111], ["pi0", "Pi0"], ParticleData(0)),
([112], ["pi+", "Pi+"], ParticleData(1)),
([221], ["eta", "Eta"], ParticleData(0)),
([130], ["K0_L"], ParticleData(0)),
([310], ["K0_S"], ParticleData(0)),
([311], ["K0"], ParticleData(0)),
([321], ["K+"], ParticleData(1)),
([411], ["D0"], ParticleData(0)),
([421], ["D+"], ParticleData(1)),
([511], ["B0"], ParticleData(0)),
([521], ["B+"], ParticleData(1)),
([2212], ["p","Proton","proton"], ParticleData(1)),
([2112], ["n","Neutron","neutron"], ParticleData(0)),
([2224], ["Delta++"], ParticleData(2)),
([2214], ["Delta+"], ParticleData(1)),
([2114], ["Delta0"], ParticleData(0)),
([1114], ["Delta-"], ParticleData(1))
])
defaultExtensionDataList = ParticleDataList([
([32], ["Z'", "Z_prime"], ParticleData(0)),
([33], ["Z''", "Z_primeprime"], ParticleData(0)),
([34, - 34], ["W'", "W_prime", "W'+", "W'-"], ParticleData(1)),
([37, - 37], ["H+", "Charged_Higgs", "H+", "H-"], ParticleData(1)),
([35], ["H0", "Neutral_Higgs_H", "H"], ParticleData(0)),
([36], ["A0", "Neutral_Higgs_A", "A"], ParticleData(0))
])
defaultSusyDataList = ParticleDataList([
([1000001, - 1000001], ["d_squark_L", "d~_L", "d~_L_bar"], ParticleData(1.0/3.0)),
([1000002, - 1000002], ["u_squark_L", "u~_L", "u~_L_bar"], ParticleData(2.0/3.0)),
([1000003, - 1000003], ["s_squark_L", "s~_L", "s~_L_bar"], ParticleData(1.0/3.0)),
([1000004, - 1000004], ["c_squark_L", "c~_L", "c~_L_bar"], ParticleData(2.0/3.0)),
([1000005, - 1000005], ["sbottom_L", "b~_1", "b~_1_bar"], ParticleData(1.0/3.0)),
([1000006, - 1000006], ["stop_L", "t~_1", "t~_1_bar"], ParticleData(2.0/3.0)),
([1000011, - 1000011], ["Selectron_L", "selectron_L", "e~_L", "e~_L+", "e~_L-"], ParticleData(1)),
([1000012, - 1000012], ["Electron_sneutrino", "electron_sneutrino", "nu~_e_L"], ParticleData(0)),
([1000013, - 1000013], ["Smuon_L", "smuon_L", "mu~_L", "mu~_L+", "mu~_L-"], ParticleData(1)),
([1000014, - 1000014], ["Muon_sneutrino", "muon_sneutrino", "nu~_mu_L"], ParticleData(0)),
([1000015, - 1000015], ["Stau_1", "stau_1", "tau~_1+", "tau~_1-"], ParticleData(1)),
([1000016, - 1000016], ["Tau_sneutrino", "tau_sneutrino", "nu~_tau_L"], ParticleData(0)),
([2000001, - 2000001], ["d_squark_R", "d~_L", "d~_L_bar"], ParticleData(1.0/3.0)),
([2000002, - 2000002], ["u_squark_R", "u~_L", "u~_L_bar"], ParticleData(2.0/3.0)),
([2000003, - 2000003], ["s_squark_R", "s~_L", "s~_L_bar"], ParticleData(1.0/3.0)),
([2000004, - 2000004], ["c_squark_R", "c~_L", "c~_L_bar"], ParticleData(2.0/3.0)),
([2000005, - 2000005], ["sbottom_R", "b~_2", "b~_2_bar"], ParticleData(1.0/3.0)),
([2000006, - 2000006], ["stop_R", "t~_2", "t~_2_bar"], ParticleData(2.0/3.0)),
([2000011, - 2000011], ["Selectron_R", "selectron_R", "e~_R", "e~_R+", "e~_R-"], ParticleData(1)),
([1000013, - 1000013], ["Smuon_R", "smuon_R", "mu~_L", "mu~_R+", "mu~_R-"], ParticleData(1)),
([1000015, - 1000015], ["Stau_2", "stau_2", "tau~_2+", "tau~_2 -"], ParticleData(1)),
([1000021], ["Gluino", "gluino", "g~"], ParticleData(0)),
([1000022, - 1000022], ["Neutralino_1", "neutralino_1", "chi~_1"], ParticleData(0)),
([1000023, - 1000023], ["Neutralino_2", "neutralino_2", "chi~_2"], ParticleData(0)),
([1000025, - 1000025], ["Neutralino_3", "neutralino_3", "chi~_3"], ParticleData(0)),
([1000035, - 1000035], ["Neutralino_4", "neutralino4", "chi~_4"], ParticleData(0)),
([1000024, - 1000024], ["Chargino_1", "chargino_1", "chi~_1+", "chi~_1-"], ParticleData(1)),
([1000037, - 1000037], ["Chargino_2", "chargino_2", "chi~_2+", "chi~_2-"], ParticleData(1)),
([1000039], ["Gravitino", "gravitino", "G"], ParticleData(0))
])
defaultParticleDataList = ParticleDataList(
defaultQuarkDataList.getList() +
defaultLeptonDataList.getList() +
defaultBosonDataList.getList() +
defaultHadronDataList.getList() +
defaultExtensionDataList.getList() +
defaultSusyDataList.getList())
partonParticleDataList = ParticleDataList([
([1, - 1, 2, - 2, 3, - 3, 4, - 4, 21, 9], ["parton", "d", "dbar", "u", "ubar", "s", "sbar", "c", "cbar", "b", "bbar", "t", "tbar", "gluon", "g"], ParticleData())
] +
defaultLeptonDataList.getList() + [
([22], ["gamma", "Photon", "photon"], ParticleData(0)),
([23], ["Z", "Z_boson"], ParticleData(0)),
([24, - 24], ["W", "W_boson", "W+", "W-"], ParticleData(1)),
([25], ["h", "Higgs_boson", "Higgs", "higgs_boson"], ParticleData(1))
])
| 42.921951
| 278
| 0.585521
|
class ParticleData(object):
def __init__(self, charge=0):
self.charge=charge
def __repr__(self):
return "charge="+str(self.charge)
class ParticleDataList(object):
def __init__(self, list=None):
self._list = []
if list != None:
self._list = list
def setList(self, list):
self._list = list
def getList(self):
return self._list
def addParticle(self, ids, names, particleData):
if not (isinstance(ids,list) and isinstance(names,list)):
raise TypeError("addParticle needs to lists as input: e.g. [1,-1],['d','dbar']")
self._list += [(ids, names, particleData)]
def getDefaultName(self, name):
for items in self._list:
if name in items[1]:
return items[1][0]
return name
def getDefaultId(self, id):
for items in self._list:
if id in items[0]:
return items[0][0]
return id
def getIdFromName(self, name):
for items in self._list:
if name in items[1]:
return items[0][0]
return 0
def getNameFromId(self, id):
for items in self._list:
if id in items[0]:
return items[1][0]
return "unknown"
def getParticleDataFromId(self, id):
for items in self._list:
if id in items[0]:
return items[2]
def isQuarkId(self, id):
return abs(id) in [1, 2, 3, 4, 5, 6]
def isLeptonId(self, id):
return abs(id) in [11, 12, 13, 14, 15, 16]
def isGluonId(self, id):
return abs(id) in [21, 9]
def isBosonId(self, id):
return abs(id) in [21, 9, 22, 23, 24, 25, 32, 33, 34, 35, 36, 37]
def isPhotonId(self, id):
return id == 22
def isHiggsId(self, id):
return abs(id) in [25, 35, 36, 37]
def isSusyId(self, id):
return abs(id) in [1000001, 1000002, 1000003, 1000004, 1000005, 1000006, 1000011, 1000012, 1000013, 1000014, 1000015, 1000016, 2000001, 2000002, 2000003, 2000004, 2000005, 2000006, 2000011, 2000013, 1000021, 1000022, 1000023, 1000024, 1000025, 1000035, 1000037, 1000039]
defaultQuarkDataList = ParticleDataList([
([1, - 1], ["d", "d_quark", "dbar"], ParticleData(1.0/3.0)),
([2, - 2], ["u", "u_quark", "ubar"], ParticleData(2.0/3.0)),
([3, - 3], ["s", "s_quark", "sbar"], ParticleData(1.0/3.0)),
([4, - 4], ["c", "c_quark", "cbar"], ParticleData(2.0/3.0)),
([5, - 5], ["b", "b_quark", "bbar"], ParticleData(1.0/3.0)),
([6, - 6], ["t", "t_quark", "tbar"], ParticleData(2.0/3.0))
])
defaultLeptonDataList = ParticleDataList([
([11, - 11], ["e","electron", "Electron", "e+", "e-"], ParticleData(1)),
([12, - 12], ["nu_e", "Electron_neutrino", "electron_neutrino", "nu_electron"], ParticleData(0)),
([13, - 13], ["mu", "Muon", "muon", "mu+", "mu-"], ParticleData(1)),
([14, - 14], ["nu_mu", "nu_muon", "Muon_neutrino", "muon_neutrino"], ParticleData(0)),
([15, - 15], ["tau", "Tau", "tau+", "tau-"], ParticleData(1)),
([16, - 16], ["nu_tau", "Tau_neutrino", "tau_neutrino"], ParticleData(0))
])
defaultBosonDataList = ParticleDataList([
([21, 9], ["g", "Gluon", "gluon"], ParticleData(0)),
([22], ["gamma", "Photon", "photon"], ParticleData(0)),
([23], ["Z", "Z_boson"], ParticleData(0)),
([24, - 24], ["W", "W_boson", "W+", "W-"], ParticleData(1)),
([25], ["h", "Higgs_boson", "Higgs", "higgs_boson"], ParticleData(0))
])
defaultHadronDataList = ParticleDataList([
([111], ["pi0", "Pi0"], ParticleData(0)),
([112], ["pi+", "Pi+"], ParticleData(1)),
([221], ["eta", "Eta"], ParticleData(0)),
([130], ["K0_L"], ParticleData(0)),
([310], ["K0_S"], ParticleData(0)),
([311], ["K0"], ParticleData(0)),
([321], ["K+"], ParticleData(1)),
([411], ["D0"], ParticleData(0)),
([421], ["D+"], ParticleData(1)),
([511], ["B0"], ParticleData(0)),
([521], ["B+"], ParticleData(1)),
([2212], ["p","Proton","proton"], ParticleData(1)),
([2112], ["n","Neutron","neutron"], ParticleData(0)),
([2224], ["Delta++"], ParticleData(2)),
([2214], ["Delta+"], ParticleData(1)),
([2114], ["Delta0"], ParticleData(0)),
([1114], ["Delta-"], ParticleData(1))
])
defaultExtensionDataList = ParticleDataList([
([32], ["Z'", "Z_prime"], ParticleData(0)),
([33], ["Z''", "Z_primeprime"], ParticleData(0)),
([34, - 34], ["W'", "W_prime", "W'+", "W'-"], ParticleData(1)),
([37, - 37], ["H+", "Charged_Higgs", "H+", "H-"], ParticleData(1)),
([35], ["H0", "Neutral_Higgs_H", "H"], ParticleData(0)),
([36], ["A0", "Neutral_Higgs_A", "A"], ParticleData(0))
])
defaultSusyDataList = ParticleDataList([
([1000001, - 1000001], ["d_squark_L", "d~_L", "d~_L_bar"], ParticleData(1.0/3.0)),
([1000002, - 1000002], ["u_squark_L", "u~_L", "u~_L_bar"], ParticleData(2.0/3.0)),
([1000003, - 1000003], ["s_squark_L", "s~_L", "s~_L_bar"], ParticleData(1.0/3.0)),
([1000004, - 1000004], ["c_squark_L", "c~_L", "c~_L_bar"], ParticleData(2.0/3.0)),
([1000005, - 1000005], ["sbottom_L", "b~_1", "b~_1_bar"], ParticleData(1.0/3.0)),
([1000006, - 1000006], ["stop_L", "t~_1", "t~_1_bar"], ParticleData(2.0/3.0)),
([1000011, - 1000011], ["Selectron_L", "selectron_L", "e~_L", "e~_L+", "e~_L-"], ParticleData(1)),
([1000012, - 1000012], ["Electron_sneutrino", "electron_sneutrino", "nu~_e_L"], ParticleData(0)),
([1000013, - 1000013], ["Smuon_L", "smuon_L", "mu~_L", "mu~_L+", "mu~_L-"], ParticleData(1)),
([1000014, - 1000014], ["Muon_sneutrino", "muon_sneutrino", "nu~_mu_L"], ParticleData(0)),
([1000015, - 1000015], ["Stau_1", "stau_1", "tau~_1+", "tau~_1-"], ParticleData(1)),
([1000016, - 1000016], ["Tau_sneutrino", "tau_sneutrino", "nu~_tau_L"], ParticleData(0)),
([2000001, - 2000001], ["d_squark_R", "d~_L", "d~_L_bar"], ParticleData(1.0/3.0)),
([2000002, - 2000002], ["u_squark_R", "u~_L", "u~_L_bar"], ParticleData(2.0/3.0)),
([2000003, - 2000003], ["s_squark_R", "s~_L", "s~_L_bar"], ParticleData(1.0/3.0)),
([2000004, - 2000004], ["c_squark_R", "c~_L", "c~_L_bar"], ParticleData(2.0/3.0)),
([2000005, - 2000005], ["sbottom_R", "b~_2", "b~_2_bar"], ParticleData(1.0/3.0)),
([2000006, - 2000006], ["stop_R", "t~_2", "t~_2_bar"], ParticleData(2.0/3.0)),
([2000011, - 2000011], ["Selectron_R", "selectron_R", "e~_R", "e~_R+", "e~_R-"], ParticleData(1)),
([1000013, - 1000013], ["Smuon_R", "smuon_R", "mu~_L", "mu~_R+", "mu~_R-"], ParticleData(1)),
([1000015, - 1000015], ["Stau_2", "stau_2", "tau~_2+", "tau~_2 -"], ParticleData(1)),
([1000021], ["Gluino", "gluino", "g~"], ParticleData(0)),
([1000022, - 1000022], ["Neutralino_1", "neutralino_1", "chi~_1"], ParticleData(0)),
([1000023, - 1000023], ["Neutralino_2", "neutralino_2", "chi~_2"], ParticleData(0)),
([1000025, - 1000025], ["Neutralino_3", "neutralino_3", "chi~_3"], ParticleData(0)),
([1000035, - 1000035], ["Neutralino_4", "neutralino4", "chi~_4"], ParticleData(0)),
([1000024, - 1000024], ["Chargino_1", "chargino_1", "chi~_1+", "chi~_1-"], ParticleData(1)),
([1000037, - 1000037], ["Chargino_2", "chargino_2", "chi~_2+", "chi~_2-"], ParticleData(1)),
([1000039], ["Gravitino", "gravitino", "G"], ParticleData(0))
])
defaultParticleDataList = ParticleDataList(
defaultQuarkDataList.getList() +
defaultLeptonDataList.getList() +
defaultBosonDataList.getList() +
defaultHadronDataList.getList() +
defaultExtensionDataList.getList() +
defaultSusyDataList.getList())
partonParticleDataList = ParticleDataList([
([1, - 1, 2, - 2, 3, - 3, 4, - 4, 21, 9], ["parton", "d", "dbar", "u", "ubar", "s", "sbar", "c", "cbar", "b", "bbar", "t", "tbar", "gluon", "g"], ParticleData())
] +
defaultLeptonDataList.getList() + [
([22], ["gamma", "Photon", "photon"], ParticleData(0)),
([23], ["Z", "Z_boson"], ParticleData(0)),
([24, - 24], ["W", "W_boson", "W+", "W-"], ParticleData(1)),
([25], ["h", "Higgs_boson", "Higgs", "higgs_boson"], ParticleData(1))
])
| true
| true
|
790d6040ee982da08b0b75a51da6af5a7a5cbee4
| 1,155
|
py
|
Python
|
script/eval_test.py
|
rozentill/Front2Back
|
c14e77d3cea923026129de9f04f32327d6ee4381
|
[
"Apache-2.0"
] | 5
|
2020-04-01T12:48:01.000Z
|
2022-03-29T07:43:27.000Z
|
script/eval_test.py
|
rozentill/Front2Back
|
c14e77d3cea923026129de9f04f32327d6ee4381
|
[
"Apache-2.0"
] | null | null | null |
script/eval_test.py
|
rozentill/Front2Back
|
c14e77d3cea923026129de9f04f32327d6ee4381
|
[
"Apache-2.0"
] | null | null | null |
import os
from os.path import join
import csv
def main_eval_gt():
metro = "metro\\metro"
cls_set = [
"02691156",
"02828884",
"02933112",
"02958343",
"03001627",
"03211117",
"03636649",
"03691459",
"04090263",
"04256520",
"04379243",
"04401088",
"04530566"
]
for c in range(0, 13):
cls_name = cls_set[c]
ref_dir = "rot_gt\\%s"%cls_name
res_dir = "results\\%s"%cls_name
header = ["No", "Error"]
with open(join(res_dir, "metro_%s.csv"%cls_name), 'w', newline="") as f:
f_csv = csv.writer(f)
f_csv.writerow(header)
items = os.listdir(ref_dir)
for item in items:
if "samples" in item:
continue
print(item)
filename = join(res_dir, item[:-4]+".ply")
if not os.path.exists(filename):
continue
os.system("%s %s %s %s.txt -n10000"%(metro, filename, join(ref_dir, item), join(res_dir,"output", item[:-4])))
score = 0
with open(join(res_dir,"output", item[:-4]+".txt"), 'r') as f_score:
letter = f_score.read()
if letter == "":
continue
score = float(letter)
f_csv.writerow([item[:-4], score])
if __name__ == '__main__':
main_eval_gt()
| 19.576271
| 114
| 0.603463
|
import os
from os.path import join
import csv
def main_eval_gt():
metro = "metro\\metro"
cls_set = [
"02691156",
"02828884",
"02933112",
"02958343",
"03001627",
"03211117",
"03636649",
"03691459",
"04090263",
"04256520",
"04379243",
"04401088",
"04530566"
]
for c in range(0, 13):
cls_name = cls_set[c]
ref_dir = "rot_gt\\%s"%cls_name
res_dir = "results\\%s"%cls_name
header = ["No", "Error"]
with open(join(res_dir, "metro_%s.csv"%cls_name), 'w', newline="") as f:
f_csv = csv.writer(f)
f_csv.writerow(header)
items = os.listdir(ref_dir)
for item in items:
if "samples" in item:
continue
print(item)
filename = join(res_dir, item[:-4]+".ply")
if not os.path.exists(filename):
continue
os.system("%s %s %s %s.txt -n10000"%(metro, filename, join(ref_dir, item), join(res_dir,"output", item[:-4])))
score = 0
with open(join(res_dir,"output", item[:-4]+".txt"), 'r') as f_score:
letter = f_score.read()
if letter == "":
continue
score = float(letter)
f_csv.writerow([item[:-4], score])
if __name__ == '__main__':
main_eval_gt()
| true
| true
|
790d604b881152b566a82646c92e8db868f9689f
| 6,830
|
py
|
Python
|
fcos_core/modeling/roi_heads/box_head/inference.py
|
qilei123/FCOS
|
53d355456460a2a45830e3953508f41173ddb9bf
|
[
"BSD-2-Clause"
] | null | null | null |
fcos_core/modeling/roi_heads/box_head/inference.py
|
qilei123/FCOS
|
53d355456460a2a45830e3953508f41173ddb9bf
|
[
"BSD-2-Clause"
] | null | null | null |
fcos_core/modeling/roi_heads/box_head/inference.py
|
qilei123/FCOS
|
53d355456460a2a45830e3953508f41173ddb9bf
|
[
"BSD-2-Clause"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
import torch.nn.functional as F
from torch import nn
from fcos_core.structures.bounding_box import BoxList
from fcos_core.structures.boxlist_ops import boxlist_nms
from fcos_core.structures.boxlist_ops import cat_boxlist
from fcos_core.modeling.box_coder import BoxCoder
class PostProcessor(nn.Module):
"""
From a set of classification scores, box regression and proposals,
computes the post-processed boxes, and applies NMS to obtain the
final results
"""
def __init__(
self,
score_thresh=0.05,
nms=0.5,
detections_per_img=100,
box_coder=None,
cls_agnostic_bbox_reg=False,
bbox_aug_enabled=False
):
"""
Arguments:
score_thresh (float)
nms (float)
detections_per_img (int)
box_coder (BoxCoder)
"""
super(PostProcessor, self).__init__()
self.score_thresh = score_thresh
self.nms = nms
self.detections_per_img = detections_per_img
if box_coder is None:
box_coder = BoxCoder(weights=(10., 10., 5., 5.))
self.box_coder = box_coder
self.cls_agnostic_bbox_reg = cls_agnostic_bbox_reg
self.bbox_aug_enabled = bbox_aug_enabled
def forward(self, x, boxes):
"""
Arguments:
x (tuple[tensor, tensor]): x contains the class logits
and the box_regression from the model.
boxes (list[BoxList]): bounding boxes that are used as
reference, one for ech image
Returns:
results (list[BoxList]): one BoxList for each image, containing
the extra fields labels and scores
"""
class_logits, box_regression = x
class_prob = F.softmax(class_logits, -1)
# TODO think about a representation of batch of boxes
image_shapes = [box.size for box in boxes]
boxes_per_image = [len(box) for box in boxes]
concat_boxes = torch.cat([a.bbox for a in boxes], dim=0)
if self.cls_agnostic_bbox_reg:
box_regression = box_regression[:, -4:]
proposals = self.box_coder.decode(
box_regression.view(sum(boxes_per_image), -1), concat_boxes
)
if self.cls_agnostic_bbox_reg:
proposals = proposals.repeat(1, class_prob.shape[1])
num_classes = class_prob.shape[1]
proposals = proposals.split(boxes_per_image, dim=0)
class_prob = class_prob.split(boxes_per_image, dim=0)
results = []
for prob, boxes_per_img, image_shape in zip(
class_prob, proposals, image_shapes
):
boxlist = self.prepare_boxlist(boxes_per_img, prob, image_shape)
boxlist = boxlist.clip_to_image(remove_empty=False)
if not self.bbox_aug_enabled: # If bbox aug is enabled, we will do it later
boxlist = self.filter_results(boxlist, num_classes)
results.append(boxlist)
return results
def prepare_boxlist(self, boxes, scores, image_shape):
"""
Returns BoxList from `boxes` and adds probability scores information
as an extra field
`boxes` has shape (#detections, 4 * #classes), where each row represents
a list of predicted bounding boxes for each of the object classes in the
dataset (including the background class). The detections in each row
originate from the same object proposal.
`scores` has shape (#detection, #classes), where each row represents a list
of object detection confidence scores for each of the object classes in the
dataset (including the background class). `scores[i, j]`` corresponds to the
box at `boxes[i, j * 4:(j + 1) * 4]`.
"""
boxes = boxes.reshape(-1, 4)
scores = scores.reshape(-1)
boxlist = BoxList(boxes, image_shape, mode="xyxy")
boxlist.add_field("scores", scores)
return boxlist
def filter_results(self, boxlist, num_classes):
"""Returns bounding-box detection results by thresholding on scores and
applying non-maximum suppression (NMS).
"""
# unwrap the boxlist to avoid additional overhead.
# if we had multi-class NMS, we could perform this directly on the boxlist
boxes = boxlist.bbox.reshape(-1, num_classes * 4)
scores = boxlist.get_field("scores").reshape(-1, num_classes)
device = scores.device
result = []
# Apply threshold on detection probabilities and apply NMS
# Skip j = 0, because it's the background class
inds_all = scores > self.score_thresh
for j in range(1, num_classes):
inds = inds_all[:, j].nonzero().squeeze(1)
scores_j = scores[inds, j]
boxes_j = boxes[inds, j * 4 : (j + 1) * 4]
boxlist_for_class = BoxList(boxes_j, boxlist.size, mode="xyxy")
boxlist_for_class.add_field("scores", scores_j)
boxlist_for_class = boxlist_nms(
boxlist_for_class, self.nms
)
num_labels = len(boxlist_for_class)
boxlist_for_class.add_field(
"labels", torch.full((num_labels,), j, dtype=torch.int64, device=device)
)
result.append(boxlist_for_class)
result = cat_boxlist(result)
number_of_detections = len(result)
# Limit to max_per_image detections **over all classes**
if number_of_detections > self.detections_per_img > 0:
cls_scores = result.get_field("scores")
image_thresh, _ = torch.kthvalue(
cls_scores.cpu(), number_of_detections - self.detections_per_img + 1
)
keep = cls_scores >= image_thresh.item()
keep = torch.nonzero(keep).squeeze(1)
result = result[keep]
return result
def make_roi_box_post_processor(cfg):
use_fpn = cfg.MODEL.ROI_HEADS.USE_FPN
bbox_reg_weights = cfg.MODEL.ROI_HEADS.BBOX_REG_WEIGHTS
box_coder = BoxCoder(weights=bbox_reg_weights)
score_thresh = cfg.MODEL.ROI_HEADS.SCORE_THRESH
nms_thresh = cfg.MODEL.ROI_HEADS.NMS
detections_per_img = cfg.MODEL.ROI_HEADS.DETECTIONS_PER_IMG
cls_agnostic_bbox_reg = cfg.MODEL.CLS_AGNOSTIC_BBOX_REG
bbox_aug_enabled = cfg.TEST.BBOX_AUG.ENABLED
postprocessor = PostProcessor(
score_thresh,
nms_thresh,
detections_per_img,
box_coder,
cls_agnostic_bbox_reg,
bbox_aug_enabled
)
return postprocessor
| 39.479769
| 89
| 0.623865
|
import torch
import torch.nn.functional as F
from torch import nn
from fcos_core.structures.bounding_box import BoxList
from fcos_core.structures.boxlist_ops import boxlist_nms
from fcos_core.structures.boxlist_ops import cat_boxlist
from fcos_core.modeling.box_coder import BoxCoder
class PostProcessor(nn.Module):
def __init__(
self,
score_thresh=0.05,
nms=0.5,
detections_per_img=100,
box_coder=None,
cls_agnostic_bbox_reg=False,
bbox_aug_enabled=False
):
super(PostProcessor, self).__init__()
self.score_thresh = score_thresh
self.nms = nms
self.detections_per_img = detections_per_img
if box_coder is None:
box_coder = BoxCoder(weights=(10., 10., 5., 5.))
self.box_coder = box_coder
self.cls_agnostic_bbox_reg = cls_agnostic_bbox_reg
self.bbox_aug_enabled = bbox_aug_enabled
def forward(self, x, boxes):
class_logits, box_regression = x
class_prob = F.softmax(class_logits, -1)
image_shapes = [box.size for box in boxes]
boxes_per_image = [len(box) for box in boxes]
concat_boxes = torch.cat([a.bbox for a in boxes], dim=0)
if self.cls_agnostic_bbox_reg:
box_regression = box_regression[:, -4:]
proposals = self.box_coder.decode(
box_regression.view(sum(boxes_per_image), -1), concat_boxes
)
if self.cls_agnostic_bbox_reg:
proposals = proposals.repeat(1, class_prob.shape[1])
num_classes = class_prob.shape[1]
proposals = proposals.split(boxes_per_image, dim=0)
class_prob = class_prob.split(boxes_per_image, dim=0)
results = []
for prob, boxes_per_img, image_shape in zip(
class_prob, proposals, image_shapes
):
boxlist = self.prepare_boxlist(boxes_per_img, prob, image_shape)
boxlist = boxlist.clip_to_image(remove_empty=False)
if not self.bbox_aug_enabled:
boxlist = self.filter_results(boxlist, num_classes)
results.append(boxlist)
return results
def prepare_boxlist(self, boxes, scores, image_shape):
boxes = boxes.reshape(-1, 4)
scores = scores.reshape(-1)
boxlist = BoxList(boxes, image_shape, mode="xyxy")
boxlist.add_field("scores", scores)
return boxlist
def filter_results(self, boxlist, num_classes):
boxes = boxlist.bbox.reshape(-1, num_classes * 4)
scores = boxlist.get_field("scores").reshape(-1, num_classes)
device = scores.device
result = []
inds_all = scores > self.score_thresh
for j in range(1, num_classes):
inds = inds_all[:, j].nonzero().squeeze(1)
scores_j = scores[inds, j]
boxes_j = boxes[inds, j * 4 : (j + 1) * 4]
boxlist_for_class = BoxList(boxes_j, boxlist.size, mode="xyxy")
boxlist_for_class.add_field("scores", scores_j)
boxlist_for_class = boxlist_nms(
boxlist_for_class, self.nms
)
num_labels = len(boxlist_for_class)
boxlist_for_class.add_field(
"labels", torch.full((num_labels,), j, dtype=torch.int64, device=device)
)
result.append(boxlist_for_class)
result = cat_boxlist(result)
number_of_detections = len(result)
# Limit to max_per_image detections **over all classes**
if number_of_detections > self.detections_per_img > 0:
cls_scores = result.get_field("scores")
image_thresh, _ = torch.kthvalue(
cls_scores.cpu(), number_of_detections - self.detections_per_img + 1
)
keep = cls_scores >= image_thresh.item()
keep = torch.nonzero(keep).squeeze(1)
result = result[keep]
return result
def make_roi_box_post_processor(cfg):
use_fpn = cfg.MODEL.ROI_HEADS.USE_FPN
bbox_reg_weights = cfg.MODEL.ROI_HEADS.BBOX_REG_WEIGHTS
box_coder = BoxCoder(weights=bbox_reg_weights)
score_thresh = cfg.MODEL.ROI_HEADS.SCORE_THRESH
nms_thresh = cfg.MODEL.ROI_HEADS.NMS
detections_per_img = cfg.MODEL.ROI_HEADS.DETECTIONS_PER_IMG
cls_agnostic_bbox_reg = cfg.MODEL.CLS_AGNOSTIC_BBOX_REG
bbox_aug_enabled = cfg.TEST.BBOX_AUG.ENABLED
postprocessor = PostProcessor(
score_thresh,
nms_thresh,
detections_per_img,
box_coder,
cls_agnostic_bbox_reg,
bbox_aug_enabled
)
return postprocessor
| true
| true
|
790d60be3d2c7f08742921cb5bba0c23b86c21a9
| 1,672
|
py
|
Python
|
main.py
|
shihu/qr-reader
|
66a7526e31c854f4b067ebe8f3254ab579dbe464
|
[
"MIT"
] | null | null | null |
main.py
|
shihu/qr-reader
|
66a7526e31c854f4b067ebe8f3254ab579dbe464
|
[
"MIT"
] | null | null | null |
main.py
|
shihu/qr-reader
|
66a7526e31c854f4b067ebe8f3254ab579dbe464
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
from flask import Flask, Response
from pyzbar import pyzbar
from picamera.array import PiRGBArray
from picamera import PiCamera
from datetime import datetime
import numpy as np
import cv2
import time
camera = PiCamera()
camera.resolution = (640, 480)
camera.framerate = 32
rawCapture = PiRGBArray(camera, size=(640, 480))
time.sleep(0.1)
app = Flask(__name__)
@app.route('/stream')
def stream():
return Response(gen(),
mimetype='multipart/x-mixed-replace; boundary=frame')
def gen():
while True:
frame = get_frame()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
def get_frame():
camera.capture(rawCapture, format="bgr", use_video_port=True)
frame = rawCapture.array
decoded_objs = decode(frame)
frame = display(frame, decoded_objs)
ret, jpeg = cv2.imencode('.jpg', frame)
rawCapture.truncate(0)
return jpeg.tobytes()
def decode(frame):
decoded_objs = pyzbar.decode(frame, scan_locations=True)
for obj in decoded_objs:
print(datetime.now().strftime('%H:%M:%S.%f'))
print('Type: ', obj.type)
print('Data: ', obj.data)
return decoded_objs
def display(frame, decoded_objs):
for decoded_obj in decoded_objs:
left, top, width, height = decoded_obj.rect
frame = cv2.rectangle(frame,
(left, top),
(left + width, height + top),
(0, 255, 255), 2)
return frame
if __name__ == '__main__':
app.run(host="0.0.0.0", debug=False, threaded=True)
| 26.539683
| 73
| 0.623206
|
from __future__ import print_function
from flask import Flask, Response
from pyzbar import pyzbar
from picamera.array import PiRGBArray
from picamera import PiCamera
from datetime import datetime
import numpy as np
import cv2
import time
camera = PiCamera()
camera.resolution = (640, 480)
camera.framerate = 32
rawCapture = PiRGBArray(camera, size=(640, 480))
time.sleep(0.1)
app = Flask(__name__)
@app.route('/stream')
def stream():
return Response(gen(),
mimetype='multipart/x-mixed-replace; boundary=frame')
def gen():
while True:
frame = get_frame()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
def get_frame():
camera.capture(rawCapture, format="bgr", use_video_port=True)
frame = rawCapture.array
decoded_objs = decode(frame)
frame = display(frame, decoded_objs)
ret, jpeg = cv2.imencode('.jpg', frame)
rawCapture.truncate(0)
return jpeg.tobytes()
def decode(frame):
decoded_objs = pyzbar.decode(frame, scan_locations=True)
for obj in decoded_objs:
print(datetime.now().strftime('%H:%M:%S.%f'))
print('Type: ', obj.type)
print('Data: ', obj.data)
return decoded_objs
def display(frame, decoded_objs):
for decoded_obj in decoded_objs:
left, top, width, height = decoded_obj.rect
frame = cv2.rectangle(frame,
(left, top),
(left + width, height + top),
(0, 255, 255), 2)
return frame
if __name__ == '__main__':
app.run(host="0.0.0.0", debug=False, threaded=True)
| true
| true
|
790d60c732d0cc1bb267357fc2cc662fad7ff447
| 1,745
|
py
|
Python
|
tortoise/exceptions.py
|
asitm9/tortoise-orm
|
0d14fc0b86852eed3b96989036938d77d248967c
|
[
"Apache-2.0"
] | 2
|
2020-06-24T09:30:52.000Z
|
2020-09-22T13:45:59.000Z
|
tortoise/exceptions.py
|
Tomes111/tortoise-orm
|
8b55499a228e44f33fec9099f4d559c77c73beb7
|
[
"Apache-2.0"
] | null | null | null |
tortoise/exceptions.py
|
Tomes111/tortoise-orm
|
8b55499a228e44f33fec9099f4d559c77c73beb7
|
[
"Apache-2.0"
] | null | null | null |
class BaseORMException(Exception):
"""
Base ORM Exception.
"""
class FieldError(BaseORMException):
"""
The FieldError exception is raised when there is a problem with a model field.
"""
class ParamsError(BaseORMException):
"""
The ParamsError is raised when function can not be run with given parameters
"""
class ConfigurationError(BaseORMException):
"""
The ConfigurationError exception is raised when the configuration of the ORM is invalid.
"""
class TransactionManagementError(BaseORMException):
"""
The TransactionManagementError is raised when any transaction error occurs.
"""
class OperationalError(BaseORMException):
"""
The OperationalError exception is raised when an operational error occurs.
"""
class IntegrityError(OperationalError):
"""
The IntegrityError exception is raised when there is an integrity error.
"""
class NoValuesFetched(OperationalError):
"""
The NoValuesFetched exception is raised when the related model was never fetched.
"""
class MultipleObjectsReturned(OperationalError):
"""
The MultipleObjectsReturned exception is raised when doing a ``.get()`` operation,
and more than one object is returned.
"""
class DoesNotExist(OperationalError):
"""
The DoesNotExist exception is raised when expecting data, such as a ``.get()`` operation.
"""
class IncompleteInstanceError(OperationalError):
"""
The IncompleteInstanceError exception is raised when a partial model is attempted to be persisted.
"""
class DBConnectionError(BaseORMException, ConnectionError):
"""
The DBConnectionError is raised when problems with connecting to db occurs
"""
| 24.236111
| 102
| 0.716905
|
class BaseORMException(Exception):
class FieldError(BaseORMException):
class ParamsError(BaseORMException):
class ConfigurationError(BaseORMException):
class TransactionManagementError(BaseORMException):
class OperationalError(BaseORMException):
class IntegrityError(OperationalError):
class NoValuesFetched(OperationalError):
class MultipleObjectsReturned(OperationalError):
class DoesNotExist(OperationalError):
class IncompleteInstanceError(OperationalError):
class DBConnectionError(BaseORMException, ConnectionError):
| true
| true
|
790d6164c9ea51e68107b6a91784b90f51447447
| 35,023
|
py
|
Python
|
jenkins/bootstrap.py
|
Acidburn0zzz/test-infra
|
ad19d04798049201a82c70639900bba593e740d6
|
[
"Apache-2.0"
] | 1
|
2018-05-25T17:02:06.000Z
|
2018-05-25T17:02:06.000Z
|
jenkins/bootstrap.py
|
Acidburn0zzz/test-infra
|
ad19d04798049201a82c70639900bba593e740d6
|
[
"Apache-2.0"
] | 3
|
2021-03-20T05:23:47.000Z
|
2021-03-20T05:35:10.000Z
|
jenkins/bootstrap.py
|
Acidburn0zzz/test-infra
|
ad19d04798049201a82c70639900bba593e740d6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Need to figure out why this only fails on travis
# pylint: disable=bad-continuation
"""Bootstraps starting a test job.
The following should already be done:
git checkout http://k8s.io/test-infra
cd $WORKSPACE
test-infra/jenkins/bootstrap.py <--repo=R || --bare> <--job=J> <--pull=P || --branch=B>
The bootstrapper now does the following:
# Note start time
# check out repoes defined in --repo
# note job started
# call runner defined in $JOB.json
# upload artifacts (this will change later)
# upload build-log.txt
# note job ended
The contract with the runner is as follows:
* Runner must exit non-zero if job fails for any reason.
"""
import argparse
import contextlib
import json
import logging
import os
import pipes
import random
import re
import select
import signal
import socket
import subprocess
import sys
import tempfile
import time
ORIG_CWD = os.getcwd() # Checkout changes cwd
def read_all(end, stream, append):
"""Read all buffered lines from a stream."""
while not end or time.time() < end:
line = stream.readline()
if not line:
return True # Read everything
# Strip \n at the end if any. Last line of file may not have one.
append(line.rstrip('\n'))
# Is there more on the buffer?
ret = select.select([stream.fileno()], [], [], 0.1)
if not ret[0]:
return False # Cleared buffer but not at the end
return False # Time expired
def elapsed(since):
"""Return the number of minutes elapsed since a time."""
return (time.time() - since) / 60
def terminate(end, proc, kill):
"""Terminate or kill the process after end."""
if not end or time.time() <= end:
return False
if kill: # Process will not die, kill everything
pgid = os.getpgid(proc.pid)
logging.info(
'Kill %d and process group %d', proc.pid, pgid)
os.killpg(pgid, signal.SIGKILL)
proc.kill()
return True
logging.info(
'Terminate %d on timeout', proc.pid)
proc.terminate()
return True
def _call(end, cmd, stdin=None, check=True, output=None, log_failures=True):
"""Start a subprocess."""
logging.info('Call: %s', ' '.join(pipes.quote(c) for c in cmd))
begin = time.time()
if end:
end = max(end, time.time() + 60) # Allow at least 60s per command
proc = subprocess.Popen(
cmd,
stdin=subprocess.PIPE if stdin is not None else None,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
preexec_fn=os.setsid,
)
if stdin:
proc.stdin.write(stdin)
proc.stdin.close()
out = []
code = None
timeout = False
reads = {
proc.stderr.fileno(): (proc.stderr, logging.warning),
proc.stdout.fileno(): (
proc.stdout, (out.append if output else logging.info)),
}
while reads:
if terminate(end, proc, timeout):
if timeout: # We killed everything
break
# Give subprocess some cleanup time before killing.
end = time.time() + 15 * 60
timeout = True
ret = select.select(reads, [], [], 0.1)
for fdesc in ret[0]:
if read_all(end, *reads[fdesc]):
reads.pop(fdesc)
if not ret[0] and proc.poll() is not None:
break # process exited without closing pipes (timeout?)
code = proc.wait()
if timeout:
code = code or 124
logging.error('Build timed out')
if code and log_failures:
logging.error('Command failed')
logging.info(
'process %d exited with code %d after %.1fm',
proc.pid, code, elapsed(begin))
out.append('')
lines = output and '\n'.join(out)
if check and code:
raise subprocess.CalledProcessError(code, cmd, lines)
return lines
def ref_has_shas(ref):
"""Determine if a reference specifies shas (contains ':')"""
return isinstance(ref, basestring) and ':' in ref
def pull_numbers(pull):
"""Turn a pull reference list into a list of PR numbers to merge."""
if ref_has_shas(pull):
return [r.split(':')[0] for r in pull.split(',')][1:]
return [str(pull)]
def pull_ref(pull):
"""Turn a PR number of list of refs into specific refs to fetch and check out."""
if isinstance(pull, int) or ',' not in pull:
return ['+refs/pull/%d/merge' % int(pull)], ['FETCH_HEAD']
pulls = pull.split(',')
refs = []
checkouts = []
for ref in pulls:
if ':' in ref: # master:abcd or 1234:abcd
name, sha = ref.split(':')
elif not refs: # master
name, sha = ref, 'FETCH_HEAD'
else:
name = ref
sha = 'refs/pr/%s' % ref
checkouts.append(sha)
if not refs: # First ref should be branch to merge into
refs.append(name)
else: # Subsequent refs should be PR numbers
num = int(name)
refs.append('+refs/pull/%d/head:refs/pr/%d' % (num, num))
return refs, checkouts
def branch_ref(branch):
"""Split branch:sha if necessary."""
if ref_has_shas(branch):
split_refs = branch.split(':')
return [split_refs[0]], [split_refs[1]]
return [branch], ['FETCH_HEAD']
def repository(repo, ssh):
"""Return the url associated with the repo."""
if repo.startswith('k8s.io/'):
repo = 'github.com/kubernetes/%s' % (repo[len('k8s.io/'):])
if ssh:
if ":" not in repo:
parts = repo.split('/', 1)
repo = '%s:%s' % (parts[0], parts[1])
return 'git@%s' % repo
return 'https://%s' % repo
def random_sleep(attempt):
"""Sleep 2**attempt seconds with a random fractional offset."""
time.sleep(random.random() + attempt ** 2)
def checkout(call, repo, branch, pull, ssh='', git_cache='', clean=False):
"""Fetch and checkout the repository at the specified branch/pull."""
# pylint: disable=too-many-locals
if bool(branch) == bool(pull):
raise ValueError('Must specify one of --branch or --pull')
if pull:
refs, checkouts = pull_ref(pull)
else:
refs, checkouts = branch_ref(branch)
git = 'git'
if git_cache:
cache_dir = '%s/%s' % (git_cache, repo)
try:
os.makedirs(cache_dir)
except OSError:
pass
call([git, 'init', repo, '--separate-git-dir=%s' % cache_dir])
call(['rm', '-f', '%s/index.lock' % cache_dir])
else:
call([git, 'init', repo])
os.chdir(repo)
if clean:
call([git, 'clean', '-dfx'])
call([git, 'reset', '--hard'])
# To make a merge commit, a user needs to be set. It's okay to use a dummy
# user here, since we're not exporting the history.
call([git, 'config', '--local', 'user.name', 'K8S Bootstrap'])
call([git, 'config', '--local', 'user.email', 'k8s_bootstrap@localhost'])
retries = 3
for attempt in range(retries):
try:
call([git, 'fetch', '--quiet', '--tags', repository(repo, ssh)] + refs)
break
except subprocess.CalledProcessError as cpe:
if attempt >= retries - 1:
raise
if cpe.returncode != 128:
raise
logging.warning('git fetch failed')
random_sleep(attempt)
call([git, 'checkout', '-B', 'test', checkouts[0]])
for ref, head in zip(refs, checkouts)[1:]:
call(['git', 'merge', '--no-ff', '-m', 'Merge %s' % ref, head])
def repos_dict(repos):
"""Returns {"repo1": "branch", "repo2": "pull"}."""
return {r: b or p for (r, (b, p)) in repos.items()}
def start(gsutil, paths, stamp, node_name, version, repos):
"""Construct and upload started.json."""
data = {
'timestamp': int(stamp),
'jenkins-node': node_name,
'node': node_name,
}
if version:
data['repo-version'] = version
data['version'] = version # TODO(fejta): retire
if repos:
pull = repos[repos.main]
if ref_has_shas(pull[1]):
data['pull'] = pull[1]
data['repos'] = repos_dict(repos)
gsutil.upload_json(paths.started, data)
# Upload a link to the build path in the directory
if paths.pr_build_link:
gsutil.upload_text(
paths.pr_build_link,
paths.pr_path,
additional_headers=['-h', 'x-goog-meta-link: %s' % paths.pr_path]
)
class GSUtil(object):
"""A helper class for making gsutil commands."""
gsutil = 'gsutil'
def __init__(self, call):
self.call = call
def stat(self, path):
"""Return metadata about the object, such as generation."""
cmd = [self.gsutil, 'stat', path]
return self.call(cmd, output=True, log_failures=False)
def ls(self, path):
"""List a bucket or subdir."""
cmd = [self.gsutil, 'ls', path]
return self.call(cmd, output=True)
def upload_json(self, path, jdict, generation=None):
"""Upload the dictionary object to path."""
if generation is not None: # generation==0 means object does not exist
gen = ['-h', 'x-goog-if-generation-match:%s' % generation]
else:
gen = []
cmd = [
self.gsutil, '-q',
'-h', 'Content-Type:application/json'] + gen + [
'cp', '-', path]
self.call(cmd, stdin=json.dumps(jdict, indent=2))
def copy_file(self, dest, orig):
"""Copy the file to the specified path using compressed encoding."""
cmd = [self.gsutil, '-q', 'cp', '-Z', orig, dest]
self.call(cmd)
def upload_text(self, path, txt, additional_headers=None, cached=True):
"""Copy the text to path, optionally disabling caching."""
headers = ['-h', 'Content-Type:text/plain']
if not cached:
headers += ['-h', 'Cache-Control:private, max-age=0, no-transform']
if additional_headers:
headers += additional_headers
cmd = [self.gsutil, '-q'] + headers + ['cp', '-', path]
self.call(cmd, stdin=txt)
def cat(self, path, generation):
"""Return contents of path#generation"""
cmd = [self.gsutil, '-q', 'cat', '%s#%s' % (path, generation)]
return self.call(cmd, output=True)
def upload_artifacts(self, gsutil, path, artifacts):
"""Upload artifacts to the specified path."""
# Upload artifacts
if not os.path.isdir(artifacts):
return
try:
# If remote path exists, it will create .../_artifacts subdir instead
gsutil.ls(path)
# Success means remote path exists
remote_base = os.path.basename(path)
local_base = os.path.basename(artifacts)
if remote_base != local_base:
# if basename are different, need to copy things over first.
localpath = artifacts.replace(local_base, remote_base)
os.rename(artifacts, localpath)
artifacts = localpath
path = path[:-len(remote_base + '/')]
except subprocess.CalledProcessError:
logging.warning('Remote dir %s not exist yet', path)
cmd = [
self.gsutil, '-m', '-q',
'-o', 'GSUtil:use_magicfile=True',
'cp', '-r', '-c', '-z', 'log,txt,xml',
artifacts, path,
]
self.call(cmd)
def append_result(gsutil, path, build, version, passed):
"""Download a json list and append metadata about this build to it."""
# TODO(fejta): delete the clone of this logic in upload-to-gcs.sh
# (this is update_job_result_cache)
end = time.time() + 300 # try for up to five minutes
errors = 0
while time.time() < end:
if errors:
random_sleep(min(errors, 3))
try:
out = gsutil.stat(path)
gen = re.search(r'Generation:\s+(\d+)', out).group(1)
except subprocess.CalledProcessError:
gen = 0
if gen:
try:
cache = json.loads(gsutil.cat(path, gen))
if not isinstance(cache, list):
raise ValueError(cache)
except ValueError as exc:
logging.warning('Failed to decode JSON: %s', exc)
cache = []
except subprocess.CalledProcessError: # gen doesn't exist
errors += 1
continue
else:
cache = []
cache.append({
'version': version, # TODO(fejta): retire
'job-version': version,
'buildnumber': build,
'passed': bool(passed),
'result': 'SUCCESS' if passed else 'FAILURE',
})
cache = cache[-300:]
try:
gsutil.upload_json(path, cache, generation=gen)
return
except subprocess.CalledProcessError:
logging.warning('Failed to append to %s#%s', path, gen)
errors += 1
def metadata(repos, artifacts, call):
"""Return metadata associated for the build, including inside artifacts."""
path = os.path.join(artifacts or '', 'metadata.json')
meta = None
if os.path.isfile(path):
try:
with open(path) as fp:
meta = json.loads(fp.read())
except (IOError, ValueError):
pass
if not meta or not isinstance(meta, dict):
meta = {}
if repos:
meta['repo'] = repos.main
meta['repos'] = repos_dict(repos)
try:
commit = call(['git', 'rev-parse', 'HEAD'], output=True)
if commit:
meta['repo-commit'] = commit.strip()
except subprocess.CalledProcessError:
pass
cwd = os.getcwd()
os.chdir(test_infra('.'))
try:
commit = call(['git', 'rev-parse', 'HEAD'], output=True)
if commit:
meta['infra-commit'] = commit.strip()[:9]
except subprocess.CalledProcessError:
pass
os.chdir(cwd)
return meta
def finish(gsutil, paths, success, artifacts, build, version, repos, call):
"""
Args:
paths: a Paths instance.
success: the build passed if true.
artifacts: a dir containing artifacts to upload.
build: identifier of this build.
version: identifies what version of the code the build tested.
repo: the target repo
"""
if os.path.isdir(artifacts) and any(f for _, _, f in os.walk(artifacts)):
try:
gsutil.upload_artifacts(gsutil, paths.artifacts, artifacts)
except subprocess.CalledProcessError:
logging.warning('Failed to upload artifacts')
meta = metadata(repos, artifacts, call)
if not version:
version = meta.get('job-version')
if not version: # TODO(fejta): retire
version = meta.get('version')
# github.com/kubernetes/release/find_green_build depends on append_result()
# TODO(fejta): reconsider whether this is how we want to solve this problem.
append_result(gsutil, paths.result_cache, build, version, success)
if paths.pr_result_cache:
append_result(gsutil, paths.pr_result_cache, build, version, success)
data = {
# TODO(fejta): update utils.go in contrib to accept a float
'timestamp': int(time.time()),
'result': 'SUCCESS' if success else 'FAILURE',
'passed': bool(success),
'metadata': meta,
}
if version:
data['job-version'] = version
data['version'] = version # TODO(fejta): retire
gsutil.upload_json(paths.finished, data)
# Upload the latest build for the job.
# Do this last, since other tools expect the rest of the data to be
# published when this file is created.
for path in {paths.latest, paths.pr_latest}:
if path:
try:
gsutil.upload_text(path, str(build), cached=False)
except subprocess.CalledProcessError:
logging.warning('Failed to update %s', path)
def test_infra(*paths):
"""Return path relative to root of test-infra repo."""
return os.path.join(ORIG_CWD, os.path.dirname(__file__), '..', *paths)
def node():
"""Return the name of the node running the build."""
# TODO(fejta): jenkins sets the node name and our infra expect this value.
# TODO(fejta): Consider doing something different here.
if NODE_ENV not in os.environ:
os.environ[NODE_ENV] = ''.join(socket.gethostname().split('.')[:1])
return os.environ[NODE_ENV]
def find_version(call):
"""Determine and return the version of the build."""
# TODO(fejta): once job-version is functional switch this to
# git rev-parse [--short=N] HEAD^{commit}
version_file = 'version'
if os.path.isfile(version_file):
# e2e tests which download kubernetes use this path:
with open(version_file) as fp:
return fp.read().strip()
version_script = 'hack/lib/version.sh'
if os.path.isfile(version_script):
cmd = [
'bash', '-c', (
"""
set -o errexit
set -o nounset
export KUBE_ROOT=.
source %s
kube::version::get_version_vars
echo $KUBE_GIT_VERSION
""" % version_script)
]
return call(cmd, output=True).strip()
return 'unknown'
class Paths(object): # pylint: disable=too-many-instance-attributes,too-few-public-methods
"""Links to remote gcs-paths for uploading results."""
def __init__( # pylint: disable=too-many-arguments
self,
artifacts, # artifacts folder (in build)
build_log, # build-log.txt (in build)
pr_path, # path to build
finished, # finished.json (metadata from end of build)
latest, # latest-build.txt (in job)
pr_build_link, # file containng pr_path (in job directory)
pr_latest, # latest-build.txt (in pr job)
pr_result_cache, # jobResultsCache.json (in pr job)
result_cache, # jobResultsCache.json (cache of latest results in job)
started, # started.json (metadata from start of build)
):
self.artifacts = artifacts
self.build_log = build_log
self.pr_path = pr_path
self.finished = finished
self.latest = latest
self.pr_build_link = pr_build_link
self.pr_latest = pr_latest
self.pr_result_cache = pr_result_cache
self.result_cache = result_cache
self.started = started
def ci_paths(base, job, build):
"""Return a Paths() instance for a continuous build."""
latest = os.path.join(base, job, 'latest-build.txt')
return Paths(
artifacts=os.path.join(base, job, build, 'artifacts'),
build_log=os.path.join(base, job, build, 'build-log.txt'),
pr_path=None,
finished=os.path.join(base, job, build, 'finished.json'),
latest=latest,
pr_build_link=None,
pr_latest=None,
pr_result_cache=None,
result_cache=os.path.join(base, job, 'jobResultsCache.json'),
started=os.path.join(base, job, build, 'started.json'),
)
def pr_paths(base, repos, job, build):
"""Return a Paths() instance for a PR."""
if not repos:
raise ValueError('repos is empty')
repo = repos.main
pull = str(repos[repo][1])
if repo in ['k8s.io/kubernetes', 'kubernetes/kubernetes']:
prefix = ''
elif repo.startswith('k8s.io/'):
prefix = repo[len('k8s.io/'):]
elif repo.startswith('kubernetes/'):
prefix = repo[len('kubernetes/'):]
elif repo.startswith('github.com/'):
prefix = repo[len('github.com/'):].replace('/', '_')
else:
prefix = repo.replace('/', '_')
# Batch merges are those with more than one PR specified.
pr_nums = pull_numbers(pull)
if len(pr_nums) > 1:
pull = os.path.join(prefix, 'batch')
else:
pull = os.path.join(prefix, pr_nums[0])
pr_path = os.path.join(base, 'pull', pull, job, build)
result_cache = os.path.join(
base, 'directory', job, 'jobResultsCache.json')
pr_result_cache = os.path.join(
base, 'pull', pull, job, 'jobResultsCache.json')
return Paths(
artifacts=os.path.join(pr_path, 'artifacts'),
build_log=os.path.join(pr_path, 'build-log.txt'),
pr_path=pr_path,
finished=os.path.join(pr_path, 'finished.json'),
latest=os.path.join(base, 'directory', job, 'latest-build.txt'),
pr_build_link=os.path.join(base, 'directory', job, '%s.txt' % build),
pr_latest=os.path.join(base, 'pull', pull, job, 'latest-build.txt'),
pr_result_cache=pr_result_cache,
result_cache=result_cache,
started=os.path.join(pr_path, 'started.json'),
)
BUILD_ENV = 'BUILD_NUMBER'
BOOTSTRAP_ENV = 'BOOTSTRAP_MIGRATION'
CLOUDSDK_ENV = 'CLOUDSDK_CONFIG'
GCE_KEY_ENV = 'JENKINS_GCE_SSH_PRIVATE_KEY_FILE'
GUBERNATOR = 'https://k8s-gubernator.appspot.com/build'
HOME_ENV = 'HOME'
JOB_ENV = 'JOB_NAME'
NODE_ENV = 'NODE_NAME'
SERVICE_ACCOUNT_ENV = 'GOOGLE_APPLICATION_CREDENTIALS'
WORKSPACE_ENV = 'WORKSPACE'
GCS_ARTIFACTS_ENV = 'GCS_ARTIFACTS_DIR'
def build_name(started):
"""Return the unique(ish) string representing this build."""
# TODO(fejta): right now jenkins sets the BUILD_NUMBER and does this
# in an environment variable. Consider migrating this to a
# bootstrap.py flag
if BUILD_ENV not in os.environ:
# Automatically generate a build number if none is set
uniq = '%x-%d' % (hash(node()), os.getpid())
autogen = time.strftime('%Y%m%d-%H%M%S-' + uniq, time.gmtime(started))
os.environ[BUILD_ENV] = autogen
return os.environ[BUILD_ENV]
def setup_credentials(call, robot, upload):
"""Activate the service account unless robot is none."""
# TODO(fejta): stop activating inside the image
# TODO(fejta): allow use of existing gcloud auth
if robot:
os.environ[SERVICE_ACCOUNT_ENV] = robot
if not os.getenv(SERVICE_ACCOUNT_ENV) and upload:
logging.warning('Cannot --upload=%s, no active gcloud account.', upload)
raise ValueError('--upload requires --service-account')
if not os.getenv(SERVICE_ACCOUNT_ENV) and not upload:
logging.info('Will not upload results.')
return
if not os.path.isfile(os.environ[SERVICE_ACCOUNT_ENV]):
raise IOError(
'Cannot find service account credentials',
os.environ[SERVICE_ACCOUNT_ENV],
'Create service account and then create key at '
'https://console.developers.google.com/iam-admin/serviceaccounts/project', # pylint: disable=line-too-long
)
call([
'gcloud',
'auth',
'activate-service-account',
'--key-file=%s' % os.environ[SERVICE_ACCOUNT_ENV],
])
try: # Old versions of gcloud may not support this value
account = call(
['gcloud', 'config', 'get-value', 'account'], output=True).strip()
except subprocess.CalledProcessError:
account = 'unknown'
logging.info('Will upload results to %s using %s', upload, account)
def setup_logging(path):
"""Initialize logging to screen and path."""
# See https://docs.python.org/2/library/logging.html#logrecord-attributes
# [IWEF]mmdd HH:MM:SS.mmm] msg
fmt = '%(levelname).1s%(asctime)s.%(msecs)03d] %(message)s' # pylint: disable=line-too-long
datefmt = '%m%d %H:%M:%S'
logging.basicConfig(
level=logging.INFO,
format=fmt,
datefmt=datefmt,
)
build_log = logging.FileHandler(filename=path, mode='w')
build_log.setLevel(logging.INFO)
formatter = logging.Formatter(fmt, datefmt=datefmt)
build_log.setFormatter(formatter)
logging.getLogger('').addHandler(build_log)
return build_log
def setup_magic_environment(job):
"""Set magic environment variables scripts currently expect."""
home = os.environ[HOME_ENV]
# TODO(fejta): jenkins sets these values. Consider migrating to using
# a secret volume instead and passing the path to this volume
# into bootstrap.py as a flag.
os.environ.setdefault(
GCE_KEY_ENV,
os.path.join(home, '.ssh/google_compute_engine'),
)
os.environ.setdefault(
'JENKINS_GCE_SSH_PUBLIC_KEY_FILE',
os.path.join(home, '.ssh/google_compute_engine.pub'),
)
os.environ.setdefault(
'JENKINS_AWS_SSH_PRIVATE_KEY_FILE',
os.path.join(home, '.ssh/kube_aws_rsa'),
)
os.environ.setdefault(
'JENKINS_AWS_SSH_PUBLIC_KEY_FILE',
os.path.join(home, '.ssh/kube_aws_rsa.pub'),
)
cwd = os.getcwd()
# TODO(fejta): jenkins sets WORKSPACE and pieces of our infra expect this
# value. Consider doing something else in the future.
os.environ[WORKSPACE_ENV] = cwd
# TODO(fejta): Previously dockerized-e2e-runner.sh also sets HOME to WORKSPACE,
# probably to minimize leakage between jobs.
# Consider accomplishing this another way.
os.environ[HOME_ENV] = cwd
# TODO(fejta): jenkins sets JOB_ENV and pieces of our infra expect this
# value. Consider making everything below here agnostic to the
# job name.
if JOB_ENV not in os.environ:
os.environ[JOB_ENV] = job
elif os.environ[JOB_ENV] != job:
logging.warning('%s=%s (overrides %s)', JOB_ENV, job, os.environ[JOB_ENV])
os.environ[JOB_ENV] = job
# TODO(fejta): Magic value to tell our test code not do upload started.json
# TODO(fejta): delete upload-to-gcs.sh and then this value.
os.environ[BOOTSTRAP_ENV] = 'yes'
# This helps prevent reuse of cloudsdk configuration. It also reduces the
# risk that running a job on a workstation corrupts the user's config.
os.environ[CLOUDSDK_ENV] = '%s/.config/gcloud' % cwd
def job_args(args):
"""Converts 'a ${FOO} $bar' into 'a wildly different string'."""
return [os.path.expandvars(a) for a in args]
def job_script(job):
"""Return path to script for job."""
with open(test_infra('jobs/config.json')) as fp:
config = json.loads(fp.read())
job_config = config[job]
cmd = test_infra('scenarios/%s.py' % job_config['scenario'])
return [cmd] + job_args(job_config.get('args', []))
def gubernator_uri(paths):
"""Return a gubernator link for this build."""
job = os.path.dirname(paths.build_log)
if job.startswith('gs:/'):
return job.replace('gs:/', GUBERNATOR, 1)
return job
@contextlib.contextmanager
def choose_ssh_key(ssh):
"""Creates a script for GIT_SSH that uses -i ssh if set."""
if not ssh: # Nothing to do
yield
return
# Create a script for use with GIT_SSH, which defines the program git uses
# during git fetch. In the future change this to GIT_SSH_COMMAND
# https://superuser.com/questions/232373/how-to-tell-git-which-private-key-to-use
with tempfile.NamedTemporaryFile(prefix='ssh', delete=False) as fp:
fp.write('#!/bin/sh\nssh -o StrictHostKeyChecking=no -i \'%s\' -F /dev/null "${@}"\n' % ssh)
try:
os.chmod(fp.name, 0500)
had = 'GIT_SSH' in os.environ
old = os.getenv('GIT_SSH')
os.environ['GIT_SSH'] = fp.name
yield
del os.environ['GIT_SSH']
if had:
os.environ['GIT_SSH'] = old
finally:
os.unlink(fp.name)
def setup_root(call, root, repos, ssh, git_cache, clean):
"""Create root dir, checkout repo and cd into resulting dir."""
if not os.path.exists(root):
os.makedirs(root)
root_dir = os.path.realpath(root)
logging.info('Root: %s', root_dir)
os.chdir(root_dir)
logging.info('cd to %s', root_dir)
with choose_ssh_key(ssh):
for repo, (branch, pull) in repos.items():
os.chdir(root_dir)
logging.info(
'Checkout: %s %s',
os.path.join(root_dir, repo),
pull and pull or branch)
checkout(call, repo, branch, pull, ssh, git_cache, clean)
if len(repos) > 1: # cd back into the primary repo
os.chdir(root_dir)
os.chdir(repos.main)
class Repos(dict):
"""{"repo": (branch, pull)} dict with a .main attribute."""
main = ''
def __setitem__(self, k, v):
if not self:
self.main = k
return super(Repos, self).__setitem__(k, v)
def parse_repos(args):
"""Convert --repo=foo=this,123:abc,555:ddd into a Repos()."""
repos = args.repo or {}
if not repos and not args.bare:
raise ValueError('--bare or --repo required')
ret = Repos()
if len(repos) != 1:
if args.pull:
raise ValueError('Multi --repo does not support --pull, use --repo=R=branch,p1,p2')
if args.branch:
raise ValueError('Multi --repo does not support --branch, use --repo=R=branch')
elif len(repos) == 1 and (args.branch or args.pull):
repo = repos[0]
if '=' in repo or ':' in repo:
raise ValueError('--repo cannot contain = or : with --branch or --pull')
ret[repo] = (args.branch, args.pull)
return ret
for repo in repos:
mat = re.match(r'([^=]+)(=([^:,~^\s]+(:[0-9a-fA-F]+)?(,|$))+)?$', repo)
if not mat:
raise ValueError('bad repo', repo, repos)
this_repo = mat.group(1)
if not mat.group(2):
ret[this_repo] = ('master', '')
continue
commits = mat.group(2)[1:].split(',')
if len(commits) == 1:
# Checking out a branch, possibly at a specific commit
ret[this_repo] = (commits[0], '')
continue
# Checking out one or more PRs
ret[this_repo] = ('', ','.join(commits))
return ret
def bootstrap(args):
"""Clone repo at pull/branch into root and run job script."""
# pylint: disable=too-many-locals,too-many-branches,too-many-statements
job = args.job
repos = parse_repos(args)
upload = args.upload
build_log_path = os.path.abspath('build-log.txt')
build_log = setup_logging(build_log_path)
started = time.time()
if args.timeout:
end = started + args.timeout * 60
else:
end = 0
call = lambda *a, **kw: _call(end, *a, **kw)
gsutil = GSUtil(call)
logging.info('Bootstrap %s...', job)
build = build_name(started)
if upload:
if repos and repos[repos.main][1]: # merging commits, a pr
paths = pr_paths(upload, repos, job, build)
else:
paths = ci_paths(upload, job, build)
logging.info('Gubernator results at %s', gubernator_uri(paths))
# TODO(fejta): Replace env var below with a flag eventually.
os.environ[GCS_ARTIFACTS_ENV] = paths.artifacts
version = 'unknown'
exc_type = None
setup_creds = False
try:
setup_root(call, args.root, repos, args.ssh, args.git_cache, args.clean)
logging.info('Configure environment...')
if repos:
version = find_version(call)
else:
version = ''
setup_magic_environment(job)
setup_credentials(call, args.service_account, upload)
setup_creds = True
logging.info('Start %s at %s...', build, version)
if upload:
start(gsutil, paths, started, node(), version, repos)
success = False
try:
call(job_script(job))
logging.info('PASS: %s', job)
success = True
except subprocess.CalledProcessError:
logging.error('FAIL: %s', job)
except Exception: # pylint: disable=broad-except
exc_type, exc_value, exc_traceback = sys.exc_info()
logging.exception('unexpected error')
success = False
if not setup_creds:
setup_credentials(call, args.service_account, upload)
if upload:
logging.info('Upload result and artifacts...')
logging.info('Gubernator results at %s', gubernator_uri(paths))
try:
finish(gsutil, paths, success, '_artifacts', build, version, repos, call)
except subprocess.CalledProcessError: # Still try to upload build log
success = False
logging.getLogger('').removeHandler(build_log)
build_log.close()
if upload:
gsutil.copy_file(paths.build_log, build_log_path)
if exc_type:
raise exc_type, exc_value, exc_traceback # pylint: disable=raising-bad-type
if not success:
# TODO(fejta/spxtr): we should distinguish infra and non-infra problems
# by exit code and automatically retrigger after an infra-problem.
sys.exit(1)
def parse_args(arguments=None):
"""Parse arguments or sys.argv[1:]."""
parser = argparse.ArgumentParser()
parser.add_argument('--root', default='.', help='Root dir to work with')
parser.add_argument(
'--timeout', type=float, default=0, help='Timeout in minutes if set')
parser.add_argument(
'--repo',
action='append',
help='Fetch the specified repositories, with the first one considered primary')
parser.add_argument(
'--bare',
action='store_true',
help='Do not check out a repository')
parser.add_argument('--job', required=True, help='Name of the job to run')
parser.add_argument(
'--upload',
help='Upload results here if set, requires --service-account')
parser.add_argument(
'--service-account',
help='Activate and use path/to/service-account.json if set.')
parser.add_argument(
'--ssh',
help='Use the ssh key to fetch the repository instead of https if set.')
parser.add_argument(
'--git-cache',
help='Location of the git cache.')
parser.add_argument(
'--clean',
action='store_true',
help='Clean the git repo before running tests.')
args = parser.parse_args(arguments)
# --pull is deprecated, use --repo=k8s.io/foo=master:abcd,12:ef12,45:ff65
setattr(args, 'pull', None)
# --branch is deprecated, use --repo=k8s.io/foo=master
setattr(args, 'branch', None)
if bool(args.repo) == bool(args.bare):
raise argparse.ArgumentTypeError(
'Expected --repo xor --bare:', args.repo, args.bare)
return args
if __name__ == '__main__':
ARGS = parse_args()
bootstrap(ARGS)
| 35.163655
| 119
| 0.608115
|
"""Bootstraps starting a test job.
The following should already be done:
git checkout http://k8s.io/test-infra
cd $WORKSPACE
test-infra/jenkins/bootstrap.py <--repo=R || --bare> <--job=J> <--pull=P || --branch=B>
The bootstrapper now does the following:
# Note start time
# check out repoes defined in --repo
# note job started
# call runner defined in $JOB.json
# upload artifacts (this will change later)
# upload build-log.txt
# note job ended
The contract with the runner is as follows:
* Runner must exit non-zero if job fails for any reason.
"""
import argparse
import contextlib
import json
import logging
import os
import pipes
import random
import re
import select
import signal
import socket
import subprocess
import sys
import tempfile
import time
ORIG_CWD = os.getcwd()
def read_all(end, stream, append):
"""Read all buffered lines from a stream."""
while not end or time.time() < end:
line = stream.readline()
if not line:
return True
append(line.rstrip('\n'))
ret = select.select([stream.fileno()], [], [], 0.1)
if not ret[0]:
return False
return False
def elapsed(since):
"""Return the number of minutes elapsed since a time."""
return (time.time() - since) / 60
def terminate(end, proc, kill):
"""Terminate or kill the process after end."""
if not end or time.time() <= end:
return False
if kill:
pgid = os.getpgid(proc.pid)
logging.info(
'Kill %d and process group %d', proc.pid, pgid)
os.killpg(pgid, signal.SIGKILL)
proc.kill()
return True
logging.info(
'Terminate %d on timeout', proc.pid)
proc.terminate()
return True
def _call(end, cmd, stdin=None, check=True, output=None, log_failures=True):
"""Start a subprocess."""
logging.info('Call: %s', ' '.join(pipes.quote(c) for c in cmd))
begin = time.time()
if end:
end = max(end, time.time() + 60)
proc = subprocess.Popen(
cmd,
stdin=subprocess.PIPE if stdin is not None else None,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
preexec_fn=os.setsid,
)
if stdin:
proc.stdin.write(stdin)
proc.stdin.close()
out = []
code = None
timeout = False
reads = {
proc.stderr.fileno(): (proc.stderr, logging.warning),
proc.stdout.fileno(): (
proc.stdout, (out.append if output else logging.info)),
}
while reads:
if terminate(end, proc, timeout):
if timeout:
break
end = time.time() + 15 * 60
timeout = True
ret = select.select(reads, [], [], 0.1)
for fdesc in ret[0]:
if read_all(end, *reads[fdesc]):
reads.pop(fdesc)
if not ret[0] and proc.poll() is not None:
break
code = proc.wait()
if timeout:
code = code or 124
logging.error('Build timed out')
if code and log_failures:
logging.error('Command failed')
logging.info(
'process %d exited with code %d after %.1fm',
proc.pid, code, elapsed(begin))
out.append('')
lines = output and '\n'.join(out)
if check and code:
raise subprocess.CalledProcessError(code, cmd, lines)
return lines
def ref_has_shas(ref):
"""Determine if a reference specifies shas (contains ':')"""
return isinstance(ref, basestring) and ':' in ref
def pull_numbers(pull):
"""Turn a pull reference list into a list of PR numbers to merge."""
if ref_has_shas(pull):
return [r.split(':')[0] for r in pull.split(',')][1:]
return [str(pull)]
def pull_ref(pull):
"""Turn a PR number of list of refs into specific refs to fetch and check out."""
if isinstance(pull, int) or ',' not in pull:
return ['+refs/pull/%d/merge' % int(pull)], ['FETCH_HEAD']
pulls = pull.split(',')
refs = []
checkouts = []
for ref in pulls:
if ':' in ref:
name, sha = ref.split(':')
elif not refs:
name, sha = ref, 'FETCH_HEAD'
else:
name = ref
sha = 'refs/pr/%s' % ref
checkouts.append(sha)
if not refs:
refs.append(name)
else:
num = int(name)
refs.append('+refs/pull/%d/head:refs/pr/%d' % (num, num))
return refs, checkouts
def branch_ref(branch):
"""Split branch:sha if necessary."""
if ref_has_shas(branch):
split_refs = branch.split(':')
return [split_refs[0]], [split_refs[1]]
return [branch], ['FETCH_HEAD']
def repository(repo, ssh):
"""Return the url associated with the repo."""
if repo.startswith('k8s.io/'):
repo = 'github.com/kubernetes/%s' % (repo[len('k8s.io/'):])
if ssh:
if ":" not in repo:
parts = repo.split('/', 1)
repo = '%s:%s' % (parts[0], parts[1])
return 'git@%s' % repo
return 'https://%s' % repo
def random_sleep(attempt):
"""Sleep 2**attempt seconds with a random fractional offset."""
time.sleep(random.random() + attempt ** 2)
def checkout(call, repo, branch, pull, ssh='', git_cache='', clean=False):
"""Fetch and checkout the repository at the specified branch/pull."""
if bool(branch) == bool(pull):
raise ValueError('Must specify one of --branch or --pull')
if pull:
refs, checkouts = pull_ref(pull)
else:
refs, checkouts = branch_ref(branch)
git = 'git'
if git_cache:
cache_dir = '%s/%s' % (git_cache, repo)
try:
os.makedirs(cache_dir)
except OSError:
pass
call([git, 'init', repo, '--separate-git-dir=%s' % cache_dir])
call(['rm', '-f', '%s/index.lock' % cache_dir])
else:
call([git, 'init', repo])
os.chdir(repo)
if clean:
call([git, 'clean', '-dfx'])
call([git, 'reset', '--hard'])
# user here, since we're not exporting the history.
call([git, 'config', '--local', 'user.name', 'K8S Bootstrap'])
call([git, 'config', '--local', 'user.email', 'k8s_bootstrap@localhost'])
retries = 3
for attempt in range(retries):
try:
call([git, 'fetch', '--quiet', '--tags', repository(repo, ssh)] + refs)
break
except subprocess.CalledProcessError as cpe:
if attempt >= retries - 1:
raise
if cpe.returncode != 128:
raise
logging.warning('git fetch failed')
random_sleep(attempt)
call([git, 'checkout', '-B', 'test', checkouts[0]])
for ref, head in zip(refs, checkouts)[1:]:
call(['git', 'merge', '--no-ff', '-m', 'Merge %s' % ref, head])
def repos_dict(repos):
"""Returns {"repo1": "branch", "repo2": "pull"}."""
return {r: b or p for (r, (b, p)) in repos.items()}
def start(gsutil, paths, stamp, node_name, version, repos):
"""Construct and upload started.json."""
data = {
'timestamp': int(stamp),
'jenkins-node': node_name,
'node': node_name,
}
if version:
data['repo-version'] = version
data['version'] = version
if repos:
pull = repos[repos.main]
if ref_has_shas(pull[1]):
data['pull'] = pull[1]
data['repos'] = repos_dict(repos)
gsutil.upload_json(paths.started, data)
if paths.pr_build_link:
gsutil.upload_text(
paths.pr_build_link,
paths.pr_path,
additional_headers=['-h', 'x-goog-meta-link: %s' % paths.pr_path]
)
class GSUtil(object):
"""A helper class for making gsutil commands."""
gsutil = 'gsutil'
def __init__(self, call):
self.call = call
def stat(self, path):
"""Return metadata about the object, such as generation."""
cmd = [self.gsutil, 'stat', path]
return self.call(cmd, output=True, log_failures=False)
def ls(self, path):
"""List a bucket or subdir."""
cmd = [self.gsutil, 'ls', path]
return self.call(cmd, output=True)
def upload_json(self, path, jdict, generation=None):
"""Upload the dictionary object to path."""
if generation is not None:
gen = ['-h', 'x-goog-if-generation-match:%s' % generation]
else:
gen = []
cmd = [
self.gsutil, '-q',
'-h', 'Content-Type:application/json'] + gen + [
'cp', '-', path]
self.call(cmd, stdin=json.dumps(jdict, indent=2))
def copy_file(self, dest, orig):
"""Copy the file to the specified path using compressed encoding."""
cmd = [self.gsutil, '-q', 'cp', '-Z', orig, dest]
self.call(cmd)
def upload_text(self, path, txt, additional_headers=None, cached=True):
"""Copy the text to path, optionally disabling caching."""
headers = ['-h', 'Content-Type:text/plain']
if not cached:
headers += ['-h', 'Cache-Control:private, max-age=0, no-transform']
if additional_headers:
headers += additional_headers
cmd = [self.gsutil, '-q'] + headers + ['cp', '-', path]
self.call(cmd, stdin=txt)
def cat(self, path, generation):
"""Return contents of path#generation"""
cmd = [self.gsutil, '-q', 'cat', '%s#%s' % (path, generation)]
return self.call(cmd, output=True)
def upload_artifacts(self, gsutil, path, artifacts):
"""Upload artifacts to the specified path."""
if not os.path.isdir(artifacts):
return
try:
gsutil.ls(path)
remote_base = os.path.basename(path)
local_base = os.path.basename(artifacts)
if remote_base != local_base:
localpath = artifacts.replace(local_base, remote_base)
os.rename(artifacts, localpath)
artifacts = localpath
path = path[:-len(remote_base + '/')]
except subprocess.CalledProcessError:
logging.warning('Remote dir %s not exist yet', path)
cmd = [
self.gsutil, '-m', '-q',
'-o', 'GSUtil:use_magicfile=True',
'cp', '-r', '-c', '-z', 'log,txt,xml',
artifacts, path,
]
self.call(cmd)
def append_result(gsutil, path, build, version, passed):
"""Download a json list and append metadata about this build to it."""
end = time.time() + 300
errors = 0
while time.time() < end:
if errors:
random_sleep(min(errors, 3))
try:
out = gsutil.stat(path)
gen = re.search(r'Generation:\s+(\d+)', out).group(1)
except subprocess.CalledProcessError:
gen = 0
if gen:
try:
cache = json.loads(gsutil.cat(path, gen))
if not isinstance(cache, list):
raise ValueError(cache)
except ValueError as exc:
logging.warning('Failed to decode JSON: %s', exc)
cache = []
except subprocess.CalledProcessError:
errors += 1
continue
else:
cache = []
cache.append({
'version': version, # TODO(fejta): retire
'job-version': version,
'buildnumber': build,
'passed': bool(passed),
'result': 'SUCCESS' if passed else 'FAILURE',
})
cache = cache[-300:]
try:
gsutil.upload_json(path, cache, generation=gen)
return
except subprocess.CalledProcessError:
logging.warning('Failed to append to %s
errors += 1
def metadata(repos, artifacts, call):
"""Return metadata associated for the build, including inside artifacts."""
path = os.path.join(artifacts or '', 'metadata.json')
meta = None
if os.path.isfile(path):
try:
with open(path) as fp:
meta = json.loads(fp.read())
except (IOError, ValueError):
pass
if not meta or not isinstance(meta, dict):
meta = {}
if repos:
meta['repo'] = repos.main
meta['repos'] = repos_dict(repos)
try:
commit = call(['git', 'rev-parse', 'HEAD'], output=True)
if commit:
meta['repo-commit'] = commit.strip()
except subprocess.CalledProcessError:
pass
cwd = os.getcwd()
os.chdir(test_infra('.'))
try:
commit = call(['git', 'rev-parse', 'HEAD'], output=True)
if commit:
meta['infra-commit'] = commit.strip()[:9]
except subprocess.CalledProcessError:
pass
os.chdir(cwd)
return meta
def finish(gsutil, paths, success, artifacts, build, version, repos, call):
"""
Args:
paths: a Paths instance.
success: the build passed if true.
artifacts: a dir containing artifacts to upload.
build: identifier of this build.
version: identifies what version of the code the build tested.
repo: the target repo
"""
if os.path.isdir(artifacts) and any(f for _, _, f in os.walk(artifacts)):
try:
gsutil.upload_artifacts(gsutil, paths.artifacts, artifacts)
except subprocess.CalledProcessError:
logging.warning('Failed to upload artifacts')
meta = metadata(repos, artifacts, call)
if not version:
version = meta.get('job-version')
if not version: # TODO(fejta): retire
version = meta.get('version')
# github.com/kubernetes/release/find_green_build depends on append_result()
# TODO(fejta): reconsider whether this is how we want to solve this problem.
append_result(gsutil, paths.result_cache, build, version, success)
if paths.pr_result_cache:
append_result(gsutil, paths.pr_result_cache, build, version, success)
data = {
# TODO(fejta): update utils.go in contrib to accept a float
'timestamp': int(time.time()),
'result': 'SUCCESS' if success else 'FAILURE',
'passed': bool(success),
'metadata': meta,
}
if version:
data['job-version'] = version
data['version'] = version # TODO(fejta): retire
gsutil.upload_json(paths.finished, data)
# Upload the latest build for the job.
# Do this last, since other tools expect the rest of the data to be
# published when this file is created.
for path in {paths.latest, paths.pr_latest}:
if path:
try:
gsutil.upload_text(path, str(build), cached=False)
except subprocess.CalledProcessError:
logging.warning('Failed to update %s', path)
def test_infra(*paths):
"""Return path relative to root of test-infra repo."""
return os.path.join(ORIG_CWD, os.path.dirname(__file__), '..', *paths)
def node():
"""Return the name of the node running the build."""
# TODO(fejta): jenkins sets the node name and our infra expect this value.
# TODO(fejta): Consider doing something different here.
if NODE_ENV not in os.environ:
os.environ[NODE_ENV] = ''.join(socket.gethostname().split('.')[:1])
return os.environ[NODE_ENV]
def find_version(call):
"""Determine and return the version of the build."""
# TODO(fejta): once job-version is functional switch this to
# git rev-parse [--short=N] HEAD^{commit}
version_file = 'version'
if os.path.isfile(version_file):
# e2e tests which download kubernetes use this path:
with open(version_file) as fp:
return fp.read().strip()
version_script = 'hack/lib/version.sh'
if os.path.isfile(version_script):
cmd = [
'bash', '-c', (
"""
set -o errexit
set -o nounset
export KUBE_ROOT=.
source %s
kube::version::get_version_vars
echo $KUBE_GIT_VERSION
""" % version_script)
]
return call(cmd, output=True).strip()
return 'unknown'
class Paths(object): # pylint: disable=too-many-instance-attributes,too-few-public-methods
"""Links to remote gcs-paths for uploading results."""
def __init__( # pylint: disable=too-many-arguments
self,
artifacts, # artifacts folder (in build)
build_log, # build-log.txt (in build)
pr_path, # path to build
finished, # finished.json (metadata from end of build)
latest, # latest-build.txt (in job)
pr_build_link, # file containng pr_path (in job directory)
pr_latest, # latest-build.txt (in pr job)
pr_result_cache, # jobResultsCache.json (in pr job)
result_cache, # jobResultsCache.json (cache of latest results in job)
started, # started.json (metadata from start of build)
):
self.artifacts = artifacts
self.build_log = build_log
self.pr_path = pr_path
self.finished = finished
self.latest = latest
self.pr_build_link = pr_build_link
self.pr_latest = pr_latest
self.pr_result_cache = pr_result_cache
self.result_cache = result_cache
self.started = started
def ci_paths(base, job, build):
"""Return a Paths() instance for a continuous build."""
latest = os.path.join(base, job, 'latest-build.txt')
return Paths(
artifacts=os.path.join(base, job, build, 'artifacts'),
build_log=os.path.join(base, job, build, 'build-log.txt'),
pr_path=None,
finished=os.path.join(base, job, build, 'finished.json'),
latest=latest,
pr_build_link=None,
pr_latest=None,
pr_result_cache=None,
result_cache=os.path.join(base, job, 'jobResultsCache.json'),
started=os.path.join(base, job, build, 'started.json'),
)
def pr_paths(base, repos, job, build):
"""Return a Paths() instance for a PR."""
if not repos:
raise ValueError('repos is empty')
repo = repos.main
pull = str(repos[repo][1])
if repo in ['k8s.io/kubernetes', 'kubernetes/kubernetes']:
prefix = ''
elif repo.startswith('k8s.io/'):
prefix = repo[len('k8s.io/'):]
elif repo.startswith('kubernetes/'):
prefix = repo[len('kubernetes/'):]
elif repo.startswith('github.com/'):
prefix = repo[len('github.com/'):].replace('/', '_')
else:
prefix = repo.replace('/', '_')
# Batch merges are those with more than one PR specified.
pr_nums = pull_numbers(pull)
if len(pr_nums) > 1:
pull = os.path.join(prefix, 'batch')
else:
pull = os.path.join(prefix, pr_nums[0])
pr_path = os.path.join(base, 'pull', pull, job, build)
result_cache = os.path.join(
base, 'directory', job, 'jobResultsCache.json')
pr_result_cache = os.path.join(
base, 'pull', pull, job, 'jobResultsCache.json')
return Paths(
artifacts=os.path.join(pr_path, 'artifacts'),
build_log=os.path.join(pr_path, 'build-log.txt'),
pr_path=pr_path,
finished=os.path.join(pr_path, 'finished.json'),
latest=os.path.join(base, 'directory', job, 'latest-build.txt'),
pr_build_link=os.path.join(base, 'directory', job, '%s.txt' % build),
pr_latest=os.path.join(base, 'pull', pull, job, 'latest-build.txt'),
pr_result_cache=pr_result_cache,
result_cache=result_cache,
started=os.path.join(pr_path, 'started.json'),
)
BUILD_ENV = 'BUILD_NUMBER'
BOOTSTRAP_ENV = 'BOOTSTRAP_MIGRATION'
CLOUDSDK_ENV = 'CLOUDSDK_CONFIG'
GCE_KEY_ENV = 'JENKINS_GCE_SSH_PRIVATE_KEY_FILE'
GUBERNATOR = 'https://k8s-gubernator.appspot.com/build'
HOME_ENV = 'HOME'
JOB_ENV = 'JOB_NAME'
NODE_ENV = 'NODE_NAME'
SERVICE_ACCOUNT_ENV = 'GOOGLE_APPLICATION_CREDENTIALS'
WORKSPACE_ENV = 'WORKSPACE'
GCS_ARTIFACTS_ENV = 'GCS_ARTIFACTS_DIR'
def build_name(started):
"""Return the unique(ish) string representing this build."""
# TODO(fejta): right now jenkins sets the BUILD_NUMBER and does this
# in an environment variable. Consider migrating this to a
# bootstrap.py flag
if BUILD_ENV not in os.environ:
# Automatically generate a build number if none is set
uniq = '%x-%d' % (hash(node()), os.getpid())
autogen = time.strftime('%Y%m%d-%H%M%S-' + uniq, time.gmtime(started))
os.environ[BUILD_ENV] = autogen
return os.environ[BUILD_ENV]
def setup_credentials(call, robot, upload):
"""Activate the service account unless robot is none."""
# TODO(fejta): stop activating inside the image
# TODO(fejta): allow use of existing gcloud auth
if robot:
os.environ[SERVICE_ACCOUNT_ENV] = robot
if not os.getenv(SERVICE_ACCOUNT_ENV) and upload:
logging.warning('Cannot --upload=%s, no active gcloud account.', upload)
raise ValueError('--upload requires --service-account')
if not os.getenv(SERVICE_ACCOUNT_ENV) and not upload:
logging.info('Will not upload results.')
return
if not os.path.isfile(os.environ[SERVICE_ACCOUNT_ENV]):
raise IOError(
'Cannot find service account credentials',
os.environ[SERVICE_ACCOUNT_ENV],
'Create service account and then create key at '
'https://console.developers.google.com/iam-admin/serviceaccounts/project', # pylint: disable=line-too-long
)
call([
'gcloud',
'auth',
'activate-service-account',
'--key-file=%s' % os.environ[SERVICE_ACCOUNT_ENV],
])
try: # Old versions of gcloud may not support this value
account = call(
['gcloud', 'config', 'get-value', 'account'], output=True).strip()
except subprocess.CalledProcessError:
account = 'unknown'
logging.info('Will upload results to %s using %s', upload, account)
def setup_logging(path):
"""Initialize logging to screen and path."""
# See https://docs.python.org/2/library/logging.html#logrecord-attributes
# [IWEF]mmdd HH:MM:SS.mmm] msg
fmt = '%(levelname).1s%(asctime)s.%(msecs)03d] %(message)s' # pylint: disable=line-too-long
datefmt = '%m%d %H:%M:%S'
logging.basicConfig(
level=logging.INFO,
format=fmt,
datefmt=datefmt,
)
build_log = logging.FileHandler(filename=path, mode='w')
build_log.setLevel(logging.INFO)
formatter = logging.Formatter(fmt, datefmt=datefmt)
build_log.setFormatter(formatter)
logging.getLogger('').addHandler(build_log)
return build_log
def setup_magic_environment(job):
"""Set magic environment variables scripts currently expect."""
home = os.environ[HOME_ENV]
# TODO(fejta): jenkins sets these values. Consider migrating to using
# a secret volume instead and passing the path to this volume
# into bootstrap.py as a flag.
os.environ.setdefault(
GCE_KEY_ENV,
os.path.join(home, '.ssh/google_compute_engine'),
)
os.environ.setdefault(
'JENKINS_GCE_SSH_PUBLIC_KEY_FILE',
os.path.join(home, '.ssh/google_compute_engine.pub'),
)
os.environ.setdefault(
'JENKINS_AWS_SSH_PRIVATE_KEY_FILE',
os.path.join(home, '.ssh/kube_aws_rsa'),
)
os.environ.setdefault(
'JENKINS_AWS_SSH_PUBLIC_KEY_FILE',
os.path.join(home, '.ssh/kube_aws_rsa.pub'),
)
cwd = os.getcwd()
# TODO(fejta): jenkins sets WORKSPACE and pieces of our infra expect this
# value. Consider doing something else in the future.
os.environ[WORKSPACE_ENV] = cwd
# TODO(fejta): Previously dockerized-e2e-runner.sh also sets HOME to WORKSPACE,
# probably to minimize leakage between jobs.
# Consider accomplishing this another way.
os.environ[HOME_ENV] = cwd
# TODO(fejta): jenkins sets JOB_ENV and pieces of our infra expect this
# value. Consider making everything below here agnostic to the
# job name.
if JOB_ENV not in os.environ:
os.environ[JOB_ENV] = job
elif os.environ[JOB_ENV] != job:
logging.warning('%s=%s (overrides %s)', JOB_ENV, job, os.environ[JOB_ENV])
os.environ[JOB_ENV] = job
# TODO(fejta): Magic value to tell our test code not do upload started.json
# TODO(fejta): delete upload-to-gcs.sh and then this value.
os.environ[BOOTSTRAP_ENV] = 'yes'
# This helps prevent reuse of cloudsdk configuration. It also reduces the
# risk that running a job on a workstation corrupts the user's config.
os.environ[CLOUDSDK_ENV] = '%s/.config/gcloud' % cwd
def job_args(args):
"""Converts 'a ${FOO} $bar' into 'a wildly different string'."""
return [os.path.expandvars(a) for a in args]
def job_script(job):
"""Return path to script for job."""
with open(test_infra('jobs/config.json')) as fp:
config = json.loads(fp.read())
job_config = config[job]
cmd = test_infra('scenarios/%s.py' % job_config['scenario'])
return [cmd] + job_args(job_config.get('args', []))
def gubernator_uri(paths):
"""Return a gubernator link for this build."""
job = os.path.dirname(paths.build_log)
if job.startswith('gs:/'):
return job.replace('gs:/', GUBERNATOR, 1)
return job
@contextlib.contextmanager
def choose_ssh_key(ssh):
"""Creates a script for GIT_SSH that uses -i ssh if set."""
if not ssh:
yield
return
with tempfile.NamedTemporaryFile(prefix='ssh', delete=False) as fp:
fp.write('#!/bin/sh\nssh -o StrictHostKeyChecking=no -i \'%s\' -F /dev/null "${@}"\n' % ssh)
try:
os.chmod(fp.name, 0500)
had = 'GIT_SSH' in os.environ
old = os.getenv('GIT_SSH')
os.environ['GIT_SSH'] = fp.name
yield
del os.environ['GIT_SSH']
if had:
os.environ['GIT_SSH'] = old
finally:
os.unlink(fp.name)
def setup_root(call, root, repos, ssh, git_cache, clean):
"""Create root dir, checkout repo and cd into resulting dir."""
if not os.path.exists(root):
os.makedirs(root)
root_dir = os.path.realpath(root)
logging.info('Root: %s', root_dir)
os.chdir(root_dir)
logging.info('cd to %s', root_dir)
with choose_ssh_key(ssh):
for repo, (branch, pull) in repos.items():
os.chdir(root_dir)
logging.info(
'Checkout: %s %s',
os.path.join(root_dir, repo),
pull and pull or branch)
checkout(call, repo, branch, pull, ssh, git_cache, clean)
if len(repos) > 1:
os.chdir(root_dir)
os.chdir(repos.main)
class Repos(dict):
"""{"repo": (branch, pull)} dict with a .main attribute."""
main = ''
def __setitem__(self, k, v):
if not self:
self.main = k
return super(Repos, self).__setitem__(k, v)
def parse_repos(args):
"""Convert --repo=foo=this,123:abc,555:ddd into a Repos()."""
repos = args.repo or {}
if not repos and not args.bare:
raise ValueError('--bare or --repo required')
ret = Repos()
if len(repos) != 1:
if args.pull:
raise ValueError('Multi --repo does not support --pull, use --repo=R=branch,p1,p2')
if args.branch:
raise ValueError('Multi --repo does not support --branch, use --repo=R=branch')
elif len(repos) == 1 and (args.branch or args.pull):
repo = repos[0]
if '=' in repo or ':' in repo:
raise ValueError('--repo cannot contain = or : with --branch or --pull')
ret[repo] = (args.branch, args.pull)
return ret
for repo in repos:
mat = re.match(r'([^=]+)(=([^:,~^\s]+(:[0-9a-fA-F]+)?(,|$))+)?$', repo)
if not mat:
raise ValueError('bad repo', repo, repos)
this_repo = mat.group(1)
if not mat.group(2):
ret[this_repo] = ('master', '')
continue
commits = mat.group(2)[1:].split(',')
if len(commits) == 1:
ret[this_repo] = (commits[0], '')
continue
ret[this_repo] = ('', ','.join(commits))
return ret
def bootstrap(args):
"""Clone repo at pull/branch into root and run job script."""
job = args.job
repos = parse_repos(args)
upload = args.upload
build_log_path = os.path.abspath('build-log.txt')
build_log = setup_logging(build_log_path)
started = time.time()
if args.timeout:
end = started + args.timeout * 60
else:
end = 0
call = lambda *a, **kw: _call(end, *a, **kw)
gsutil = GSUtil(call)
logging.info('Bootstrap %s...', job)
build = build_name(started)
if upload:
if repos and repos[repos.main][1]:
paths = pr_paths(upload, repos, job, build)
else:
paths = ci_paths(upload, job, build)
logging.info('Gubernator results at %s', gubernator_uri(paths))
os.environ[GCS_ARTIFACTS_ENV] = paths.artifacts
version = 'unknown'
exc_type = None
setup_creds = False
try:
setup_root(call, args.root, repos, args.ssh, args.git_cache, args.clean)
logging.info('Configure environment...')
if repos:
version = find_version(call)
else:
version = ''
setup_magic_environment(job)
setup_credentials(call, args.service_account, upload)
setup_creds = True
logging.info('Start %s at %s...', build, version)
if upload:
start(gsutil, paths, started, node(), version, repos)
success = False
try:
call(job_script(job))
logging.info('PASS: %s', job)
success = True
except subprocess.CalledProcessError:
logging.error('FAIL: %s', job)
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
logging.exception('unexpected error')
success = False
if not setup_creds:
setup_credentials(call, args.service_account, upload)
if upload:
logging.info('Upload result and artifacts...')
logging.info('Gubernator results at %s', gubernator_uri(paths))
try:
finish(gsutil, paths, success, '_artifacts', build, version, repos, call)
except subprocess.CalledProcessError:
success = False
logging.getLogger('').removeHandler(build_log)
build_log.close()
if upload:
gsutil.copy_file(paths.build_log, build_log_path)
if exc_type:
raise exc_type, exc_value, exc_traceback
if not success:
sys.exit(1)
def parse_args(arguments=None):
"""Parse arguments or sys.argv[1:]."""
parser = argparse.ArgumentParser()
parser.add_argument('--root', default='.', help='Root dir to work with')
parser.add_argument(
'--timeout', type=float, default=0, help='Timeout in minutes if set')
parser.add_argument(
'--repo',
action='append',
help='Fetch the specified repositories, with the first one considered primary')
parser.add_argument(
'--bare',
action='store_true',
help='Do not check out a repository')
parser.add_argument('--job', required=True, help='Name of the job to run')
parser.add_argument(
'--upload',
help='Upload results here if set, requires --service-account')
parser.add_argument(
'--service-account',
help='Activate and use path/to/service-account.json if set.')
parser.add_argument(
'--ssh',
help='Use the ssh key to fetch the repository instead of https if set.')
parser.add_argument(
'--git-cache',
help='Location of the git cache.')
parser.add_argument(
'--clean',
action='store_true',
help='Clean the git repo before running tests.')
args = parser.parse_args(arguments)
setattr(args, 'pull', None)
setattr(args, 'branch', None)
if bool(args.repo) == bool(args.bare):
raise argparse.ArgumentTypeError(
'Expected --repo xor --bare:', args.repo, args.bare)
return args
if __name__ == '__main__':
ARGS = parse_args()
bootstrap(ARGS)
| false
| true
|
790d629d64a74f9cb75fd24c59994a2a6d1221e7
| 39,787
|
py
|
Python
|
test/dialect/mysql/test_reflection.py
|
lxl0928/timi_sqlalchemy
|
ebd3abc1e7bc23f211ef11ed05ef821233d066cc
|
[
"MIT"
] | 1
|
2021-09-04T18:25:05.000Z
|
2021-09-04T18:25:05.000Z
|
test/dialect/mysql/test_reflection.py
|
lxl0928/timi_sqlalchemy
|
ebd3abc1e7bc23f211ef11ed05ef821233d066cc
|
[
"MIT"
] | null | null | null |
test/dialect/mysql/test_reflection.py
|
lxl0928/timi_sqlalchemy
|
ebd3abc1e7bc23f211ef11ed05ef821233d066cc
|
[
"MIT"
] | 21
|
2017-11-13T13:23:27.000Z
|
2019-10-07T02:00:52.000Z
|
# coding: utf-8
import re
from sqlalchemy import BigInteger
from sqlalchemy import Column
from sqlalchemy import DateTime
from sqlalchemy import DDL
from sqlalchemy import DefaultClause
from sqlalchemy import event
from sqlalchemy import exc
from sqlalchemy import ForeignKey
from sqlalchemy import Index
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import LargeBinary
from sqlalchemy import MetaData
from sqlalchemy import NCHAR
from sqlalchemy import select
from sqlalchemy import SmallInteger
from sqlalchemy import sql
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import testing
from sqlalchemy import Text
from sqlalchemy import TIMESTAMP
from sqlalchemy import Unicode
from sqlalchemy import UnicodeText
from sqlalchemy import UniqueConstraint
from sqlalchemy import util
from sqlalchemy.dialects.mysql import base as mysql
from sqlalchemy.dialects.mysql import reflection as _reflection
from sqlalchemy.schema import CreateIndex
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import eq_
from sqlalchemy.testing import expect_warnings
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing import mock
class TypeReflectionTest(fixtures.TestBase):
__only_on__ = "mysql"
__backend__ = True
@testing.provide_metadata
def _run_test(self, specs, attributes):
columns = [Column("c%i" % (i + 1), t[0]) for i, t in enumerate(specs)]
# Early 5.0 releases seem to report more "general" for columns
# in a view, e.g. char -> varchar, tinyblob -> mediumblob
use_views = testing.db.dialect.server_version_info > (5, 0, 10)
m = self.metadata
Table("mysql_types", m, *columns)
if use_views:
event.listen(
m,
"after_create",
DDL(
"CREATE OR REPLACE VIEW mysql_types_v "
"AS SELECT * from mysql_types"
),
)
event.listen(
m, "before_drop", DDL("DROP VIEW IF EXISTS mysql_types_v")
)
m.create_all()
m2 = MetaData(testing.db)
tables = [Table("mysql_types", m2, autoload=True)]
if use_views:
tables.append(Table("mysql_types_v", m2, autoload=True))
for table in tables:
for i, (reflected_col, spec) in enumerate(zip(table.c, specs)):
expected_spec = spec[1]
reflected_type = reflected_col.type
is_(type(reflected_type), type(expected_spec))
for attr in attributes:
eq_(
getattr(reflected_type, attr),
getattr(expected_spec, attr),
"Column %s: Attribute %s value of %s does not "
"match %s for type %s"
% (
"c%i" % (i + 1),
attr,
getattr(reflected_type, attr),
getattr(expected_spec, attr),
spec[0],
),
)
def test_time_types(self):
specs = []
if testing.requires.mysql_fsp.enabled:
fsps = [None, 0, 5]
else:
fsps = [None]
for type_ in (mysql.TIMESTAMP, mysql.DATETIME, mysql.TIME):
# MySQL defaults fsp to 0, and if 0 does not report it.
# we don't actually render 0 right now in DDL but even if we do,
# it comes back blank
for fsp in fsps:
if fsp:
specs.append((type_(fsp=fsp), type_(fsp=fsp)))
else:
specs.append((type_(), type_()))
specs.extend(
[(TIMESTAMP(), mysql.TIMESTAMP()), (DateTime(), mysql.DATETIME())]
)
# note 'timezone' should always be None on both
self._run_test(specs, ["fsp", "timezone"])
def test_year_types(self):
specs = [
(mysql.YEAR(), mysql.YEAR(display_width=4)),
(mysql.YEAR(display_width=4), mysql.YEAR(display_width=4)),
]
self._run_test(specs, ["display_width"])
def test_string_types(self):
specs = [
(String(1), mysql.MSString(1)),
(String(3), mysql.MSString(3)),
(Text(), mysql.MSText()),
(Unicode(1), mysql.MSString(1)),
(Unicode(3), mysql.MSString(3)),
(UnicodeText(), mysql.MSText()),
(mysql.MSChar(1), mysql.MSChar(1)),
(mysql.MSChar(3), mysql.MSChar(3)),
(NCHAR(2), mysql.MSChar(2)),
(mysql.MSNChar(2), mysql.MSChar(2)),
(mysql.MSNVarChar(22), mysql.MSString(22)),
]
self._run_test(specs, ["length"])
def test_integer_types(self):
specs = []
for type_ in [
mysql.TINYINT,
mysql.SMALLINT,
mysql.MEDIUMINT,
mysql.INTEGER,
mysql.BIGINT,
]:
for display_width in [None, 4, 7]:
for unsigned in [False, True]:
for zerofill in [None, True]:
kw = {}
if display_width:
kw["display_width"] = display_width
if unsigned is not None:
kw["unsigned"] = unsigned
if zerofill is not None:
kw["zerofill"] = zerofill
zerofill = bool(zerofill)
source_type = type_(**kw)
if display_width is None:
display_width = {
mysql.MEDIUMINT: 9,
mysql.SMALLINT: 6,
mysql.TINYINT: 4,
mysql.INTEGER: 11,
mysql.BIGINT: 20,
}[type_]
if zerofill:
unsigned = True
expected_type = type_(
display_width=display_width,
unsigned=unsigned,
zerofill=zerofill,
)
specs.append((source_type, expected_type))
specs.extend(
[
(SmallInteger(), mysql.SMALLINT(display_width=6)),
(Integer(), mysql.INTEGER(display_width=11)),
(BigInteger, mysql.BIGINT(display_width=20)),
]
)
self._run_test(specs, ["display_width", "unsigned", "zerofill"])
def test_binary_types(self):
specs = [
(LargeBinary(3), mysql.TINYBLOB()),
(LargeBinary(), mysql.BLOB()),
(mysql.MSBinary(3), mysql.MSBinary(3)),
(mysql.MSVarBinary(3), mysql.MSVarBinary(3)),
(mysql.MSTinyBlob(), mysql.MSTinyBlob()),
(mysql.MSBlob(), mysql.MSBlob()),
(mysql.MSBlob(1234), mysql.MSBlob()),
(mysql.MSMediumBlob(), mysql.MSMediumBlob()),
(mysql.MSLongBlob(), mysql.MSLongBlob()),
]
self._run_test(specs, [])
@testing.uses_deprecated("Manually quoting ENUM value literals")
def test_legacy_enum_types(self):
specs = [(mysql.ENUM("''", "'fleem'"), mysql.ENUM("''", "'fleem'"))]
self._run_test(specs, ["enums"])
class ReflectionTest(fixtures.TestBase, AssertsCompiledSQL):
__only_on__ = "mysql"
__backend__ = True
def test_default_reflection(self):
"""Test reflection of column defaults."""
from sqlalchemy.dialects.mysql import VARCHAR
def_table = Table(
"mysql_def",
MetaData(testing.db),
Column(
"c1",
VARCHAR(10, collation="utf8_unicode_ci"),
DefaultClause(""),
nullable=False,
),
Column("c2", String(10), DefaultClause("0")),
Column("c3", String(10), DefaultClause("abc")),
Column("c4", TIMESTAMP, DefaultClause("2009-04-05 12:00:00")),
Column("c5", TIMESTAMP),
Column(
"c6",
TIMESTAMP,
DefaultClause(
sql.text(
"CURRENT_TIMESTAMP " "ON UPDATE CURRENT_TIMESTAMP"
)
),
),
)
def_table.create()
try:
reflected = Table("mysql_def", MetaData(testing.db), autoload=True)
finally:
def_table.drop()
assert def_table.c.c1.server_default.arg == ""
assert def_table.c.c2.server_default.arg == "0"
assert def_table.c.c3.server_default.arg == "abc"
assert def_table.c.c4.server_default.arg == "2009-04-05 12:00:00"
assert str(reflected.c.c1.server_default.arg) == "''"
assert str(reflected.c.c2.server_default.arg) == "'0'"
assert str(reflected.c.c3.server_default.arg) == "'abc'"
assert (
str(reflected.c.c4.server_default.arg) == "'2009-04-05 12:00:00'"
)
assert reflected.c.c5.default is None
assert reflected.c.c5.server_default is None
assert reflected.c.c6.default is None
assert re.match(
r"CURRENT_TIMESTAMP(\(\))? ON UPDATE CURRENT_TIMESTAMP(\(\))?",
str(reflected.c.c6.server_default.arg).upper(),
)
reflected.create()
try:
reflected2 = Table(
"mysql_def", MetaData(testing.db), autoload=True
)
finally:
reflected.drop()
assert str(reflected2.c.c1.server_default.arg) == "''"
assert str(reflected2.c.c2.server_default.arg) == "'0'"
assert str(reflected2.c.c3.server_default.arg) == "'abc'"
assert (
str(reflected2.c.c4.server_default.arg) == "'2009-04-05 12:00:00'"
)
assert reflected.c.c5.default is None
assert reflected.c.c5.server_default is None
assert reflected.c.c6.default is None
assert re.match(
r"CURRENT_TIMESTAMP(\(\))? ON UPDATE CURRENT_TIMESTAMP(\(\))?",
str(reflected.c.c6.server_default.arg).upper(),
)
def test_reflection_with_table_options(self):
comment = r"""Comment types type speedily ' " \ '' Fun!"""
def_table = Table(
"mysql_def",
MetaData(testing.db),
Column("c1", Integer()),
mysql_engine="MEMORY",
comment=comment,
mysql_default_charset="utf8",
mysql_auto_increment="5",
mysql_avg_row_length="3",
mysql_password="secret",
mysql_connection="fish",
)
def_table.create()
try:
reflected = Table("mysql_def", MetaData(testing.db), autoload=True)
finally:
def_table.drop()
assert def_table.kwargs["mysql_engine"] == "MEMORY"
assert def_table.comment == comment
assert def_table.kwargs["mysql_default_charset"] == "utf8"
assert def_table.kwargs["mysql_auto_increment"] == "5"
assert def_table.kwargs["mysql_avg_row_length"] == "3"
assert def_table.kwargs["mysql_password"] == "secret"
assert def_table.kwargs["mysql_connection"] == "fish"
assert reflected.kwargs["mysql_engine"] == "MEMORY"
assert reflected.comment == comment
assert reflected.kwargs["mysql_comment"] == comment
assert reflected.kwargs["mysql_default charset"] == "utf8"
assert reflected.kwargs["mysql_avg_row_length"] == "3"
assert reflected.kwargs["mysql_connection"] == "fish"
# This field doesn't seem to be returned by mysql itself.
# assert reflected.kwargs['mysql_password'] == 'secret'
# This is explicitly ignored when reflecting schema.
# assert reflected.kwargs['mysql_auto_increment'] == '5'
def test_reflection_on_include_columns(self):
"""Test reflection of include_columns to be sure they respect case."""
case_table = Table(
"mysql_case",
MetaData(testing.db),
Column("c1", String(10)),
Column("C2", String(10)),
Column("C3", String(10)),
)
try:
case_table.create()
reflected = Table(
"mysql_case",
MetaData(testing.db),
autoload=True,
include_columns=["c1", "C2"],
)
for t in case_table, reflected:
assert "c1" in t.c.keys()
assert "C2" in t.c.keys()
reflected2 = Table(
"mysql_case",
MetaData(testing.db),
autoload=True,
include_columns=["c1", "c2"],
)
assert "c1" in reflected2.c.keys()
for c in ["c2", "C2", "C3"]:
assert c not in reflected2.c.keys()
finally:
case_table.drop()
def test_autoincrement(self):
meta = MetaData(testing.db)
try:
Table(
"ai_1",
meta,
Column("int_y", Integer, primary_key=True, autoincrement=True),
Column("int_n", Integer, DefaultClause("0"), primary_key=True),
mysql_engine="MyISAM",
)
Table(
"ai_2",
meta,
Column("int_y", Integer, primary_key=True, autoincrement=True),
Column("int_n", Integer, DefaultClause("0"), primary_key=True),
mysql_engine="MyISAM",
)
Table(
"ai_3",
meta,
Column(
"int_n",
Integer,
DefaultClause("0"),
primary_key=True,
autoincrement=False,
),
Column("int_y", Integer, primary_key=True, autoincrement=True),
mysql_engine="MyISAM",
)
Table(
"ai_4",
meta,
Column(
"int_n",
Integer,
DefaultClause("0"),
primary_key=True,
autoincrement=False,
),
Column(
"int_n2",
Integer,
DefaultClause("0"),
primary_key=True,
autoincrement=False,
),
mysql_engine="MyISAM",
)
Table(
"ai_5",
meta,
Column("int_y", Integer, primary_key=True, autoincrement=True),
Column(
"int_n",
Integer,
DefaultClause("0"),
primary_key=True,
autoincrement=False,
),
mysql_engine="MyISAM",
)
Table(
"ai_6",
meta,
Column("o1", String(1), DefaultClause("x"), primary_key=True),
Column("int_y", Integer, primary_key=True, autoincrement=True),
mysql_engine="MyISAM",
)
Table(
"ai_7",
meta,
Column("o1", String(1), DefaultClause("x"), primary_key=True),
Column("o2", String(1), DefaultClause("x"), primary_key=True),
Column("int_y", Integer, primary_key=True, autoincrement=True),
mysql_engine="MyISAM",
)
Table(
"ai_8",
meta,
Column("o1", String(1), DefaultClause("x"), primary_key=True),
Column("o2", String(1), DefaultClause("x"), primary_key=True),
mysql_engine="MyISAM",
)
meta.create_all()
table_names = [
"ai_1",
"ai_2",
"ai_3",
"ai_4",
"ai_5",
"ai_6",
"ai_7",
"ai_8",
]
mr = MetaData(testing.db)
mr.reflect(only=table_names)
for tbl in [mr.tables[name] for name in table_names]:
for c in tbl.c:
if c.name.startswith("int_y"):
assert c.autoincrement
elif c.name.startswith("int_n"):
assert not c.autoincrement
tbl.insert().execute()
if "int_y" in tbl.c:
assert select([tbl.c.int_y]).scalar() == 1
assert list(tbl.select().execute().first()).count(1) == 1
else:
assert 1 not in list(tbl.select().execute().first())
finally:
meta.drop_all()
@testing.provide_metadata
def test_view_reflection(self):
Table(
"x", self.metadata, Column("a", Integer), Column("b", String(50))
)
self.metadata.create_all()
with testing.db.connect() as conn:
conn.execute("CREATE VIEW v1 AS SELECT * FROM x")
conn.execute("CREATE ALGORITHM=MERGE VIEW v2 AS SELECT * FROM x")
conn.execute(
"CREATE ALGORITHM=UNDEFINED VIEW v3 AS SELECT * FROM x"
)
conn.execute(
"CREATE DEFINER=CURRENT_USER VIEW v4 AS SELECT * FROM x"
)
@event.listens_for(self.metadata, "before_drop")
def cleanup(*arg, **kw):
with testing.db.connect() as conn:
for v in ["v1", "v2", "v3", "v4"]:
conn.execute("DROP VIEW %s" % v)
insp = inspect(testing.db)
for v in ["v1", "v2", "v3", "v4"]:
eq_(
[
(col["name"], col["type"].__class__)
for col in insp.get_columns(v)
],
[("a", mysql.INTEGER), ("b", mysql.VARCHAR)],
)
@testing.provide_metadata
def test_skip_not_describable(self):
@event.listens_for(self.metadata, "before_drop")
def cleanup(*arg, **kw):
with testing.db.connect() as conn:
conn.execute("DROP TABLE IF EXISTS test_t1")
conn.execute("DROP TABLE IF EXISTS test_t2")
conn.execute("DROP VIEW IF EXISTS test_v")
with testing.db.connect() as conn:
conn.execute("CREATE TABLE test_t1 (id INTEGER)")
conn.execute("CREATE TABLE test_t2 (id INTEGER)")
conn.execute("CREATE VIEW test_v AS SELECT id FROM test_t1")
conn.execute("DROP TABLE test_t1")
m = MetaData()
with expect_warnings(
"Skipping .* Table or view named .?test_v.? could not be "
"reflected: .* references invalid table"
):
m.reflect(views=True, bind=conn)
eq_(m.tables["test_t2"].name, "test_t2")
assert_raises_message(
exc.UnreflectableTableError,
"references invalid table",
Table,
"test_v",
MetaData(),
autoload_with=conn,
)
@testing.exclude("mysql", "<", (5, 0, 0), "no information_schema support")
def test_system_views(self):
dialect = testing.db.dialect
connection = testing.db.connect()
view_names = dialect.get_view_names(connection, "information_schema")
self.assert_("TABLES" in view_names)
@testing.provide_metadata
def test_nullable_reflection(self):
"""test reflection of NULL/NOT NULL, in particular with TIMESTAMP
defaults where MySQL is inconsistent in how it reports CREATE TABLE.
"""
meta = self.metadata
# this is ideally one table, but older MySQL versions choke
# on the multiple TIMESTAMP columns
row = testing.db.execute(
"show variables like '%%explicit_defaults_for_timestamp%%'"
).first()
explicit_defaults_for_timestamp = row[1].lower() in ("on", "1", "true")
reflected = []
for idx, cols in enumerate(
[
[
"x INTEGER NULL",
"y INTEGER NOT NULL",
"z INTEGER",
"q TIMESTAMP NULL",
],
["p TIMESTAMP NULL DEFAULT CURRENT_TIMESTAMP"],
["r TIMESTAMP NOT NULL"],
["s TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP"],
["t TIMESTAMP"],
["u TIMESTAMP DEFAULT CURRENT_TIMESTAMP"],
]
):
Table("nn_t%d" % idx, meta) # to allow DROP
testing.db.execute(
"""
CREATE TABLE nn_t%d (
%s
)
"""
% (idx, ", \n".join(cols))
)
reflected.extend(
{
"name": d["name"],
"nullable": d["nullable"],
"default": d["default"],
}
for d in inspect(testing.db).get_columns("nn_t%d" % idx)
)
if testing.db.dialect._is_mariadb_102:
current_timestamp = "current_timestamp()"
else:
current_timestamp = "CURRENT_TIMESTAMP"
eq_(
reflected,
[
{"name": "x", "nullable": True, "default": None},
{"name": "y", "nullable": False, "default": None},
{"name": "z", "nullable": True, "default": None},
{"name": "q", "nullable": True, "default": None},
{"name": "p", "nullable": True, "default": current_timestamp},
{
"name": "r",
"nullable": False,
"default": None
if explicit_defaults_for_timestamp
else (
"%(current_timestamp)s "
"ON UPDATE %(current_timestamp)s"
)
% {"current_timestamp": current_timestamp},
},
{"name": "s", "nullable": False, "default": current_timestamp},
{
"name": "t",
"nullable": True
if explicit_defaults_for_timestamp
else False,
"default": None
if explicit_defaults_for_timestamp
else (
"%(current_timestamp)s "
"ON UPDATE %(current_timestamp)s"
)
% {"current_timestamp": current_timestamp},
},
{
"name": "u",
"nullable": True
if explicit_defaults_for_timestamp
else False,
"default": current_timestamp,
},
],
)
@testing.provide_metadata
def test_reflection_with_unique_constraint(self):
insp = inspect(testing.db)
meta = self.metadata
uc_table = Table(
"mysql_uc",
meta,
Column("a", String(10)),
UniqueConstraint("a", name="uc_a"),
)
uc_table.create()
# MySQL converts unique constraints into unique indexes.
# separately we get both
indexes = dict((i["name"], i) for i in insp.get_indexes("mysql_uc"))
constraints = set(
i["name"] for i in insp.get_unique_constraints("mysql_uc")
)
self.assert_("uc_a" in indexes)
self.assert_(indexes["uc_a"]["unique"])
self.assert_("uc_a" in constraints)
# reflection here favors the unique index, as that's the
# more "official" MySQL construct
reflected = Table("mysql_uc", MetaData(testing.db), autoload=True)
indexes = dict((i.name, i) for i in reflected.indexes)
constraints = set(uc.name for uc in reflected.constraints)
self.assert_("uc_a" in indexes)
self.assert_(indexes["uc_a"].unique)
self.assert_("uc_a" not in constraints)
@testing.provide_metadata
def test_reflect_fulltext(self):
mt = Table(
"mytable",
self.metadata,
Column("id", Integer, primary_key=True),
Column("textdata", String(50)),
mysql_engine="InnoDB",
)
Index("textdata_ix", mt.c.textdata, mysql_prefix="FULLTEXT")
self.metadata.create_all(testing.db)
mt = Table("mytable", MetaData(), autoload_with=testing.db)
idx = list(mt.indexes)[0]
eq_(idx.name, "textdata_ix")
eq_(idx.dialect_options["mysql"]["prefix"], "FULLTEXT")
self.assert_compile(
CreateIndex(idx),
"CREATE FULLTEXT INDEX textdata_ix ON mytable (textdata)",
)
@testing.requires.mysql_ngram_fulltext
@testing.provide_metadata
def test_reflect_fulltext_comment(self):
mt = Table(
"mytable",
self.metadata,
Column("id", Integer, primary_key=True),
Column("textdata", String(50)),
mysql_engine="InnoDB",
)
Index(
"textdata_ix",
mt.c.textdata,
mysql_prefix="FULLTEXT",
mysql_with_parser="ngram",
)
self.metadata.create_all(testing.db)
mt = Table("mytable", MetaData(), autoload_with=testing.db)
idx = list(mt.indexes)[0]
eq_(idx.name, "textdata_ix")
eq_(idx.dialect_options["mysql"]["prefix"], "FULLTEXT")
eq_(idx.dialect_options["mysql"]["with_parser"], "ngram")
self.assert_compile(
CreateIndex(idx),
"CREATE FULLTEXT INDEX textdata_ix ON mytable "
"(textdata) WITH PARSER ngram",
)
@testing.provide_metadata
def test_non_column_index(self):
m1 = self.metadata
t1 = Table(
"add_ix", m1, Column("x", String(50)), mysql_engine="InnoDB"
)
Index("foo_idx", t1.c.x.desc())
m1.create_all()
insp = inspect(testing.db)
eq_(
insp.get_indexes("add_ix"),
[{"name": "foo_idx", "column_names": ["x"], "unique": False}],
)
def _bug_88718_casing_0(self):
fkeys_casing_0 = [
{
"name": "FK_PlaylistTTrackId",
"constrained_columns": ["TTrackID"],
"referred_schema": "test_schema",
"referred_table": "Track",
"referred_columns": ["trackid"],
"options": {},
},
{
"name": "FK_PlaylistTrackId",
"constrained_columns": ["TrackID"],
"referred_schema": None,
"referred_table": "Track",
"referred_columns": ["trackid"],
"options": {},
},
]
ischema_casing_0 = [
("test", "Track", "TrackID"),
("test_schema", "Track", "TrackID"),
]
return fkeys_casing_0, ischema_casing_0
def _bug_88718_casing_1(self):
fkeys_casing_1 = [
{
"name": "FK_PlaylistTTrackId",
"constrained_columns": ["TTrackID"],
"referred_schema": "test_schema",
"referred_table": "Track",
"referred_columns": ["trackid"],
"options": {},
},
{
"name": "FK_PlaylistTrackId",
"constrained_columns": ["TrackID"],
"referred_schema": None,
"referred_table": "Track",
"referred_columns": ["trackid"],
"options": {},
},
]
ischema_casing_1 = [
(util.u("test"), util.u("Track"), "TrackID"),
(util.u("test_schema"), util.u("Track"), "TrackID"),
]
return fkeys_casing_1, ischema_casing_1
def _bug_88718_casing_2(self):
fkeys_casing_2 = [
{
"name": "FK_PlaylistTTrackId",
"constrained_columns": ["TTrackID"],
"referred_schema": "test_schema",
"referred_table": "Track",
"referred_columns": ["trackid"],
"options": {},
},
{
"name": "FK_PlaylistTrackId",
"constrained_columns": ["TrackID"],
"referred_schema": None,
"referred_table": "Track",
"referred_columns": ["trackid"],
"options": {},
},
]
ischema_casing_2 = [
("test", "Track", "TrackID"),
("test_schema", "Track", "TrackID"),
]
return fkeys_casing_2, ischema_casing_2
def test_correct_for_mysql_bug_88718(self):
dialect = mysql.dialect()
for casing, (fkeys, ischema) in [
(0, self._bug_88718_casing_0()),
(1, self._bug_88718_casing_1()),
(2, self._bug_88718_casing_2()),
]:
dialect._casing = casing
dialect.default_schema_name = "test"
connection = mock.Mock(
dialect=dialect, execute=lambda stmt, **params: ischema
)
dialect._correct_for_mysql_bug_88718(fkeys, connection)
eq_(
fkeys,
[
{
"name": "FK_PlaylistTTrackId",
"constrained_columns": ["TTrackID"],
"referred_schema": "test_schema",
"referred_table": "Track",
"referred_columns": ["TrackID"],
"options": {},
},
{
"name": "FK_PlaylistTrackId",
"constrained_columns": ["TrackID"],
"referred_schema": None,
"referred_table": "Track",
"referred_columns": ["TrackID"],
"options": {},
},
],
)
@testing.provide_metadata
def test_case_sensitive_column_constraint_reflection(self):
# test for issue #4344 which works around
# MySQL 8.0 bug https://bugs.mysql.com/bug.php?id=88718
m1 = self.metadata
Table(
"Track",
m1,
Column("TrackID", Integer, primary_key=True),
mysql_engine="InnoDB",
)
Table(
"Track",
m1,
Column("TrackID", Integer, primary_key=True),
schema=testing.config.test_schema,
mysql_engine="InnoDB",
)
Table(
"PlaylistTrack",
m1,
Column("id", Integer, primary_key=True),
Column(
"TrackID",
ForeignKey("Track.TrackID", name="FK_PlaylistTrackId"),
),
Column(
"TTrackID",
ForeignKey(
"%s.Track.TrackID" % (testing.config.test_schema,),
name="FK_PlaylistTTrackId",
),
),
mysql_engine="InnoDB",
)
m1.create_all()
if testing.db.dialect._casing in (1, 2):
eq_(
inspect(testing.db).get_foreign_keys("PlaylistTrack"),
[
{
"name": "FK_PlaylistTTrackId",
"constrained_columns": ["TTrackID"],
"referred_schema": testing.config.test_schema,
"referred_table": "track",
"referred_columns": ["TrackID"],
"options": {},
},
{
"name": "FK_PlaylistTrackId",
"constrained_columns": ["TrackID"],
"referred_schema": None,
"referred_table": "track",
"referred_columns": ["TrackID"],
"options": {},
},
],
)
else:
eq_(
inspect(testing.db).get_foreign_keys("PlaylistTrack"),
[
{
"name": "FK_PlaylistTTrackId",
"constrained_columns": ["TTrackID"],
"referred_schema": testing.config.test_schema,
"referred_table": "Track",
"referred_columns": ["TrackID"],
"options": {},
},
{
"name": "FK_PlaylistTrackId",
"constrained_columns": ["TrackID"],
"referred_schema": None,
"referred_table": "Track",
"referred_columns": ["TrackID"],
"options": {},
},
],
)
@testing.requires.mysql_fully_case_sensitive
@testing.provide_metadata
def test_case_sensitive_reflection_dual_case_references(self):
# this tests that within the fix we do for MySQL bug
# 88718, we don't do case-insensitive logic if the backend
# is case sensitive
m = self.metadata
Table(
"t1",
m,
Column("some_id", Integer, primary_key=True),
mysql_engine="InnoDB",
)
Table(
"T1",
m,
Column("Some_Id", Integer, primary_key=True),
mysql_engine="InnoDB",
)
Table(
"t2",
m,
Column("id", Integer, primary_key=True),
Column("t1id", ForeignKey("t1.some_id", name="t1id_fk")),
Column("cap_t1id", ForeignKey("T1.Some_Id", name="cap_t1id_fk")),
mysql_engine="InnoDB",
)
m.create_all(testing.db)
eq_(
dict(
(rec["name"], rec)
for rec in inspect(testing.db).get_foreign_keys("t2")
),
{
"cap_t1id_fk": {
"name": "cap_t1id_fk",
"constrained_columns": ["cap_t1id"],
"referred_schema": None,
"referred_table": "T1",
"referred_columns": ["Some_Id"],
"options": {},
},
"t1id_fk": {
"name": "t1id_fk",
"constrained_columns": ["t1id"],
"referred_schema": None,
"referred_table": "t1",
"referred_columns": ["some_id"],
"options": {},
},
},
)
class RawReflectionTest(fixtures.TestBase):
__backend__ = True
def setup(self):
dialect = mysql.dialect()
self.parser = _reflection.MySQLTableDefinitionParser(
dialect, dialect.identifier_preparer
)
def test_key_reflection(self):
regex = self.parser._re_key
assert regex.match(" PRIMARY KEY (`id`),")
assert regex.match(" PRIMARY KEY USING BTREE (`id`),")
assert regex.match(" PRIMARY KEY (`id`) USING BTREE,")
assert regex.match(" PRIMARY KEY (`id`)")
assert regex.match(" PRIMARY KEY USING BTREE (`id`)")
assert regex.match(" PRIMARY KEY (`id`) USING BTREE")
assert regex.match(
" PRIMARY KEY (`id`) USING BTREE KEY_BLOCK_SIZE 16"
)
assert regex.match(
" PRIMARY KEY (`id`) USING BTREE KEY_BLOCK_SIZE=16"
)
assert regex.match(
" PRIMARY KEY (`id`) USING BTREE KEY_BLOCK_SIZE = 16"
)
assert not regex.match(
" PRIMARY KEY (`id`) USING BTREE KEY_BLOCK_SIZE = = 16"
)
assert regex.match(" KEY (`id`) USING BTREE COMMENT 'comment'")
# `SHOW CREATE TABLE` returns COMMENT '''comment'
# after creating table with COMMENT '\'comment'
assert regex.match(" KEY (`id`) USING BTREE COMMENT '''comment'")
assert regex.match(" KEY (`id`) USING BTREE COMMENT 'comment'''")
assert regex.match(" KEY (`id`) USING BTREE COMMENT 'prefix''suffix'")
assert regex.match(
" KEY (`id`) USING BTREE COMMENT 'prefix''text''suffix'"
)
# https://forums.mysql.com/read.php?20,567102,567111#msg-567111
# "It means if the MySQL version >= 501, execute what's in the comment"
assert regex.match(
" FULLTEXT KEY `ix_fulltext_oi_g_name` (`oi_g_name`) "
"/*!50100 WITH PARSER `ngram` */ "
)
def test_key_reflection_columns(self):
regex = self.parser._re_key
exprs = self.parser._re_keyexprs
m = regex.match(" KEY (`id`) USING BTREE COMMENT '''comment'")
eq_(m.group("columns"), "`id`")
m = regex.match(" KEY (`x`, `y`) USING BTREE")
eq_(m.group("columns"), "`x`, `y`")
eq_(exprs.findall(m.group("columns")), [("x", "", ""), ("y", "", "")])
m = regex.match(" KEY (`x`(25), `y`(15)) USING BTREE")
eq_(m.group("columns"), "`x`(25), `y`(15)")
eq_(
exprs.findall(m.group("columns")),
[("x", "25", ""), ("y", "15", "")],
)
m = regex.match(" KEY (`x`(25) DESC, `y`(15) ASC) USING BTREE")
eq_(m.group("columns"), "`x`(25) DESC, `y`(15) ASC")
eq_(
exprs.findall(m.group("columns")),
[("x", "25", "DESC"), ("y", "15", "ASC")],
)
m = regex.match(" KEY `foo_idx` (`x` DESC)")
eq_(m.group("columns"), "`x` DESC")
eq_(exprs.findall(m.group("columns")), [("x", "", "DESC")])
eq_(exprs.findall(m.group("columns")), [("x", "", "DESC")])
m = regex.match(" KEY `foo_idx` (`x` DESC, `y` ASC)")
eq_(m.group("columns"), "`x` DESC, `y` ASC")
def test_fk_reflection(self):
regex = self.parser._re_fk_constraint
m = regex.match(
" CONSTRAINT `addresses_user_id_fkey` "
"FOREIGN KEY (`user_id`) "
"REFERENCES `users` (`id`) "
"ON DELETE CASCADE ON UPDATE CASCADE"
)
eq_(
m.groups(),
(
"addresses_user_id_fkey",
"`user_id`",
"`users`",
"`id`",
None,
"CASCADE",
"CASCADE",
),
)
m = regex.match(
" CONSTRAINT `addresses_user_id_fkey` "
"FOREIGN KEY (`user_id`) "
"REFERENCES `users` (`id`) "
"ON DELETE CASCADE ON UPDATE SET NULL"
)
eq_(
m.groups(),
(
"addresses_user_id_fkey",
"`user_id`",
"`users`",
"`id`",
None,
"CASCADE",
"SET NULL",
),
)
| 35.085538
| 79
| 0.487647
|
import re
from sqlalchemy import BigInteger
from sqlalchemy import Column
from sqlalchemy import DateTime
from sqlalchemy import DDL
from sqlalchemy import DefaultClause
from sqlalchemy import event
from sqlalchemy import exc
from sqlalchemy import ForeignKey
from sqlalchemy import Index
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import LargeBinary
from sqlalchemy import MetaData
from sqlalchemy import NCHAR
from sqlalchemy import select
from sqlalchemy import SmallInteger
from sqlalchemy import sql
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import testing
from sqlalchemy import Text
from sqlalchemy import TIMESTAMP
from sqlalchemy import Unicode
from sqlalchemy import UnicodeText
from sqlalchemy import UniqueConstraint
from sqlalchemy import util
from sqlalchemy.dialects.mysql import base as mysql
from sqlalchemy.dialects.mysql import reflection as _reflection
from sqlalchemy.schema import CreateIndex
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import eq_
from sqlalchemy.testing import expect_warnings
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing import mock
class TypeReflectionTest(fixtures.TestBase):
__only_on__ = "mysql"
__backend__ = True
@testing.provide_metadata
def _run_test(self, specs, attributes):
columns = [Column("c%i" % (i + 1), t[0]) for i, t in enumerate(specs)]
use_views = testing.db.dialect.server_version_info > (5, 0, 10)
m = self.metadata
Table("mysql_types", m, *columns)
if use_views:
event.listen(
m,
"after_create",
DDL(
"CREATE OR REPLACE VIEW mysql_types_v "
"AS SELECT * from mysql_types"
),
)
event.listen(
m, "before_drop", DDL("DROP VIEW IF EXISTS mysql_types_v")
)
m.create_all()
m2 = MetaData(testing.db)
tables = [Table("mysql_types", m2, autoload=True)]
if use_views:
tables.append(Table("mysql_types_v", m2, autoload=True))
for table in tables:
for i, (reflected_col, spec) in enumerate(zip(table.c, specs)):
expected_spec = spec[1]
reflected_type = reflected_col.type
is_(type(reflected_type), type(expected_spec))
for attr in attributes:
eq_(
getattr(reflected_type, attr),
getattr(expected_spec, attr),
"Column %s: Attribute %s value of %s does not "
"match %s for type %s"
% (
"c%i" % (i + 1),
attr,
getattr(reflected_type, attr),
getattr(expected_spec, attr),
spec[0],
),
)
def test_time_types(self):
specs = []
if testing.requires.mysql_fsp.enabled:
fsps = [None, 0, 5]
else:
fsps = [None]
for type_ in (mysql.TIMESTAMP, mysql.DATETIME, mysql.TIME):
# it comes back blank
for fsp in fsps:
if fsp:
specs.append((type_(fsp=fsp), type_(fsp=fsp)))
else:
specs.append((type_(), type_()))
specs.extend(
[(TIMESTAMP(), mysql.TIMESTAMP()), (DateTime(), mysql.DATETIME())]
)
# note 'timezone' should always be None on both
self._run_test(specs, ["fsp", "timezone"])
def test_year_types(self):
specs = [
(mysql.YEAR(), mysql.YEAR(display_width=4)),
(mysql.YEAR(display_width=4), mysql.YEAR(display_width=4)),
]
self._run_test(specs, ["display_width"])
def test_string_types(self):
specs = [
(String(1), mysql.MSString(1)),
(String(3), mysql.MSString(3)),
(Text(), mysql.MSText()),
(Unicode(1), mysql.MSString(1)),
(Unicode(3), mysql.MSString(3)),
(UnicodeText(), mysql.MSText()),
(mysql.MSChar(1), mysql.MSChar(1)),
(mysql.MSChar(3), mysql.MSChar(3)),
(NCHAR(2), mysql.MSChar(2)),
(mysql.MSNChar(2), mysql.MSChar(2)),
(mysql.MSNVarChar(22), mysql.MSString(22)),
]
self._run_test(specs, ["length"])
def test_integer_types(self):
specs = []
for type_ in [
mysql.TINYINT,
mysql.SMALLINT,
mysql.MEDIUMINT,
mysql.INTEGER,
mysql.BIGINT,
]:
for display_width in [None, 4, 7]:
for unsigned in [False, True]:
for zerofill in [None, True]:
kw = {}
if display_width:
kw["display_width"] = display_width
if unsigned is not None:
kw["unsigned"] = unsigned
if zerofill is not None:
kw["zerofill"] = zerofill
zerofill = bool(zerofill)
source_type = type_(**kw)
if display_width is None:
display_width = {
mysql.MEDIUMINT: 9,
mysql.SMALLINT: 6,
mysql.TINYINT: 4,
mysql.INTEGER: 11,
mysql.BIGINT: 20,
}[type_]
if zerofill:
unsigned = True
expected_type = type_(
display_width=display_width,
unsigned=unsigned,
zerofill=zerofill,
)
specs.append((source_type, expected_type))
specs.extend(
[
(SmallInteger(), mysql.SMALLINT(display_width=6)),
(Integer(), mysql.INTEGER(display_width=11)),
(BigInteger, mysql.BIGINT(display_width=20)),
]
)
self._run_test(specs, ["display_width", "unsigned", "zerofill"])
def test_binary_types(self):
specs = [
(LargeBinary(3), mysql.TINYBLOB()),
(LargeBinary(), mysql.BLOB()),
(mysql.MSBinary(3), mysql.MSBinary(3)),
(mysql.MSVarBinary(3), mysql.MSVarBinary(3)),
(mysql.MSTinyBlob(), mysql.MSTinyBlob()),
(mysql.MSBlob(), mysql.MSBlob()),
(mysql.MSBlob(1234), mysql.MSBlob()),
(mysql.MSMediumBlob(), mysql.MSMediumBlob()),
(mysql.MSLongBlob(), mysql.MSLongBlob()),
]
self._run_test(specs, [])
@testing.uses_deprecated("Manually quoting ENUM value literals")
def test_legacy_enum_types(self):
specs = [(mysql.ENUM("''", "'fleem'"), mysql.ENUM("''", "'fleem'"))]
self._run_test(specs, ["enums"])
class ReflectionTest(fixtures.TestBase, AssertsCompiledSQL):
__only_on__ = "mysql"
__backend__ = True
def test_default_reflection(self):
from sqlalchemy.dialects.mysql import VARCHAR
def_table = Table(
"mysql_def",
MetaData(testing.db),
Column(
"c1",
VARCHAR(10, collation="utf8_unicode_ci"),
DefaultClause(""),
nullable=False,
),
Column("c2", String(10), DefaultClause("0")),
Column("c3", String(10), DefaultClause("abc")),
Column("c4", TIMESTAMP, DefaultClause("2009-04-05 12:00:00")),
Column("c5", TIMESTAMP),
Column(
"c6",
TIMESTAMP,
DefaultClause(
sql.text(
"CURRENT_TIMESTAMP " "ON UPDATE CURRENT_TIMESTAMP"
)
),
),
)
def_table.create()
try:
reflected = Table("mysql_def", MetaData(testing.db), autoload=True)
finally:
def_table.drop()
assert def_table.c.c1.server_default.arg == ""
assert def_table.c.c2.server_default.arg == "0"
assert def_table.c.c3.server_default.arg == "abc"
assert def_table.c.c4.server_default.arg == "2009-04-05 12:00:00"
assert str(reflected.c.c1.server_default.arg) == "''"
assert str(reflected.c.c2.server_default.arg) == "'0'"
assert str(reflected.c.c3.server_default.arg) == "'abc'"
assert (
str(reflected.c.c4.server_default.arg) == "'2009-04-05 12:00:00'"
)
assert reflected.c.c5.default is None
assert reflected.c.c5.server_default is None
assert reflected.c.c6.default is None
assert re.match(
r"CURRENT_TIMESTAMP(\(\))? ON UPDATE CURRENT_TIMESTAMP(\(\))?",
str(reflected.c.c6.server_default.arg).upper(),
)
reflected.create()
try:
reflected2 = Table(
"mysql_def", MetaData(testing.db), autoload=True
)
finally:
reflected.drop()
assert str(reflected2.c.c1.server_default.arg) == "''"
assert str(reflected2.c.c2.server_default.arg) == "'0'"
assert str(reflected2.c.c3.server_default.arg) == "'abc'"
assert (
str(reflected2.c.c4.server_default.arg) == "'2009-04-05 12:00:00'"
)
assert reflected.c.c5.default is None
assert reflected.c.c5.server_default is None
assert reflected.c.c6.default is None
assert re.match(
r"CURRENT_TIMESTAMP(\(\))? ON UPDATE CURRENT_TIMESTAMP(\(\))?",
str(reflected.c.c6.server_default.arg).upper(),
)
def test_reflection_with_table_options(self):
comment = r"""Comment types type speedily ' " \ '' Fun!"""
def_table = Table(
"mysql_def",
MetaData(testing.db),
Column("c1", Integer()),
mysql_engine="MEMORY",
comment=comment,
mysql_default_charset="utf8",
mysql_auto_increment="5",
mysql_avg_row_length="3",
mysql_password="secret",
mysql_connection="fish",
)
def_table.create()
try:
reflected = Table("mysql_def", MetaData(testing.db), autoload=True)
finally:
def_table.drop()
assert def_table.kwargs["mysql_engine"] == "MEMORY"
assert def_table.comment == comment
assert def_table.kwargs["mysql_default_charset"] == "utf8"
assert def_table.kwargs["mysql_auto_increment"] == "5"
assert def_table.kwargs["mysql_avg_row_length"] == "3"
assert def_table.kwargs["mysql_password"] == "secret"
assert def_table.kwargs["mysql_connection"] == "fish"
assert reflected.kwargs["mysql_engine"] == "MEMORY"
assert reflected.comment == comment
assert reflected.kwargs["mysql_comment"] == comment
assert reflected.kwargs["mysql_default charset"] == "utf8"
assert reflected.kwargs["mysql_avg_row_length"] == "3"
assert reflected.kwargs["mysql_connection"] == "fish"
# This field doesn't seem to be returned by mysql itself.
# assert reflected.kwargs['mysql_password'] == 'secret'
# This is explicitly ignored when reflecting schema.
# assert reflected.kwargs['mysql_auto_increment'] == '5'
def test_reflection_on_include_columns(self):
case_table = Table(
"mysql_case",
MetaData(testing.db),
Column("c1", String(10)),
Column("C2", String(10)),
Column("C3", String(10)),
)
try:
case_table.create()
reflected = Table(
"mysql_case",
MetaData(testing.db),
autoload=True,
include_columns=["c1", "C2"],
)
for t in case_table, reflected:
assert "c1" in t.c.keys()
assert "C2" in t.c.keys()
reflected2 = Table(
"mysql_case",
MetaData(testing.db),
autoload=True,
include_columns=["c1", "c2"],
)
assert "c1" in reflected2.c.keys()
for c in ["c2", "C2", "C3"]:
assert c not in reflected2.c.keys()
finally:
case_table.drop()
def test_autoincrement(self):
meta = MetaData(testing.db)
try:
Table(
"ai_1",
meta,
Column("int_y", Integer, primary_key=True, autoincrement=True),
Column("int_n", Integer, DefaultClause("0"), primary_key=True),
mysql_engine="MyISAM",
)
Table(
"ai_2",
meta,
Column("int_y", Integer, primary_key=True, autoincrement=True),
Column("int_n", Integer, DefaultClause("0"), primary_key=True),
mysql_engine="MyISAM",
)
Table(
"ai_3",
meta,
Column(
"int_n",
Integer,
DefaultClause("0"),
primary_key=True,
autoincrement=False,
),
Column("int_y", Integer, primary_key=True, autoincrement=True),
mysql_engine="MyISAM",
)
Table(
"ai_4",
meta,
Column(
"int_n",
Integer,
DefaultClause("0"),
primary_key=True,
autoincrement=False,
),
Column(
"int_n2",
Integer,
DefaultClause("0"),
primary_key=True,
autoincrement=False,
),
mysql_engine="MyISAM",
)
Table(
"ai_5",
meta,
Column("int_y", Integer, primary_key=True, autoincrement=True),
Column(
"int_n",
Integer,
DefaultClause("0"),
primary_key=True,
autoincrement=False,
),
mysql_engine="MyISAM",
)
Table(
"ai_6",
meta,
Column("o1", String(1), DefaultClause("x"), primary_key=True),
Column("int_y", Integer, primary_key=True, autoincrement=True),
mysql_engine="MyISAM",
)
Table(
"ai_7",
meta,
Column("o1", String(1), DefaultClause("x"), primary_key=True),
Column("o2", String(1), DefaultClause("x"), primary_key=True),
Column("int_y", Integer, primary_key=True, autoincrement=True),
mysql_engine="MyISAM",
)
Table(
"ai_8",
meta,
Column("o1", String(1), DefaultClause("x"), primary_key=True),
Column("o2", String(1), DefaultClause("x"), primary_key=True),
mysql_engine="MyISAM",
)
meta.create_all()
table_names = [
"ai_1",
"ai_2",
"ai_3",
"ai_4",
"ai_5",
"ai_6",
"ai_7",
"ai_8",
]
mr = MetaData(testing.db)
mr.reflect(only=table_names)
for tbl in [mr.tables[name] for name in table_names]:
for c in tbl.c:
if c.name.startswith("int_y"):
assert c.autoincrement
elif c.name.startswith("int_n"):
assert not c.autoincrement
tbl.insert().execute()
if "int_y" in tbl.c:
assert select([tbl.c.int_y]).scalar() == 1
assert list(tbl.select().execute().first()).count(1) == 1
else:
assert 1 not in list(tbl.select().execute().first())
finally:
meta.drop_all()
@testing.provide_metadata
def test_view_reflection(self):
Table(
"x", self.metadata, Column("a", Integer), Column("b", String(50))
)
self.metadata.create_all()
with testing.db.connect() as conn:
conn.execute("CREATE VIEW v1 AS SELECT * FROM x")
conn.execute("CREATE ALGORITHM=MERGE VIEW v2 AS SELECT * FROM x")
conn.execute(
"CREATE ALGORITHM=UNDEFINED VIEW v3 AS SELECT * FROM x"
)
conn.execute(
"CREATE DEFINER=CURRENT_USER VIEW v4 AS SELECT * FROM x"
)
@event.listens_for(self.metadata, "before_drop")
def cleanup(*arg, **kw):
with testing.db.connect() as conn:
for v in ["v1", "v2", "v3", "v4"]:
conn.execute("DROP VIEW %s" % v)
insp = inspect(testing.db)
for v in ["v1", "v2", "v3", "v4"]:
eq_(
[
(col["name"], col["type"].__class__)
for col in insp.get_columns(v)
],
[("a", mysql.INTEGER), ("b", mysql.VARCHAR)],
)
@testing.provide_metadata
def test_skip_not_describable(self):
@event.listens_for(self.metadata, "before_drop")
def cleanup(*arg, **kw):
with testing.db.connect() as conn:
conn.execute("DROP TABLE IF EXISTS test_t1")
conn.execute("DROP TABLE IF EXISTS test_t2")
conn.execute("DROP VIEW IF EXISTS test_v")
with testing.db.connect() as conn:
conn.execute("CREATE TABLE test_t1 (id INTEGER)")
conn.execute("CREATE TABLE test_t2 (id INTEGER)")
conn.execute("CREATE VIEW test_v AS SELECT id FROM test_t1")
conn.execute("DROP TABLE test_t1")
m = MetaData()
with expect_warnings(
"Skipping .* Table or view named .?test_v.? could not be "
"reflected: .* references invalid table"
):
m.reflect(views=True, bind=conn)
eq_(m.tables["test_t2"].name, "test_t2")
assert_raises_message(
exc.UnreflectableTableError,
"references invalid table",
Table,
"test_v",
MetaData(),
autoload_with=conn,
)
@testing.exclude("mysql", "<", (5, 0, 0), "no information_schema support")
def test_system_views(self):
dialect = testing.db.dialect
connection = testing.db.connect()
view_names = dialect.get_view_names(connection, "information_schema")
self.assert_("TABLES" in view_names)
@testing.provide_metadata
def test_nullable_reflection(self):
meta = self.metadata
# this is ideally one table, but older MySQL versions choke
# on the multiple TIMESTAMP columns
row = testing.db.execute(
"show variables like '%%explicit_defaults_for_timestamp%%'"
).first()
explicit_defaults_for_timestamp = row[1].lower() in ("on", "1", "true")
reflected = []
for idx, cols in enumerate(
[
[
"x INTEGER NULL",
"y INTEGER NOT NULL",
"z INTEGER",
"q TIMESTAMP NULL",
],
["p TIMESTAMP NULL DEFAULT CURRENT_TIMESTAMP"],
["r TIMESTAMP NOT NULL"],
["s TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP"],
["t TIMESTAMP"],
["u TIMESTAMP DEFAULT CURRENT_TIMESTAMP"],
]
):
Table("nn_t%d" % idx, meta) # to allow DROP
testing.db.execute(
"""
CREATE TABLE nn_t%d (
%s
)
"""
% (idx, ", \n".join(cols))
)
reflected.extend(
{
"name": d["name"],
"nullable": d["nullable"],
"default": d["default"],
}
for d in inspect(testing.db).get_columns("nn_t%d" % idx)
)
if testing.db.dialect._is_mariadb_102:
current_timestamp = "current_timestamp()"
else:
current_timestamp = "CURRENT_TIMESTAMP"
eq_(
reflected,
[
{"name": "x", "nullable": True, "default": None},
{"name": "y", "nullable": False, "default": None},
{"name": "z", "nullable": True, "default": None},
{"name": "q", "nullable": True, "default": None},
{"name": "p", "nullable": True, "default": current_timestamp},
{
"name": "r",
"nullable": False,
"default": None
if explicit_defaults_for_timestamp
else (
"%(current_timestamp)s "
"ON UPDATE %(current_timestamp)s"
)
% {"current_timestamp": current_timestamp},
},
{"name": "s", "nullable": False, "default": current_timestamp},
{
"name": "t",
"nullable": True
if explicit_defaults_for_timestamp
else False,
"default": None
if explicit_defaults_for_timestamp
else (
"%(current_timestamp)s "
"ON UPDATE %(current_timestamp)s"
)
% {"current_timestamp": current_timestamp},
},
{
"name": "u",
"nullable": True
if explicit_defaults_for_timestamp
else False,
"default": current_timestamp,
},
],
)
@testing.provide_metadata
def test_reflection_with_unique_constraint(self):
insp = inspect(testing.db)
meta = self.metadata
uc_table = Table(
"mysql_uc",
meta,
Column("a", String(10)),
UniqueConstraint("a", name="uc_a"),
)
uc_table.create()
# MySQL converts unique constraints into unique indexes.
# separately we get both
indexes = dict((i["name"], i) for i in insp.get_indexes("mysql_uc"))
constraints = set(
i["name"] for i in insp.get_unique_constraints("mysql_uc")
)
self.assert_("uc_a" in indexes)
self.assert_(indexes["uc_a"]["unique"])
self.assert_("uc_a" in constraints)
# reflection here favors the unique index, as that's the
# more "official" MySQL construct
reflected = Table("mysql_uc", MetaData(testing.db), autoload=True)
indexes = dict((i.name, i) for i in reflected.indexes)
constraints = set(uc.name for uc in reflected.constraints)
self.assert_("uc_a" in indexes)
self.assert_(indexes["uc_a"].unique)
self.assert_("uc_a" not in constraints)
@testing.provide_metadata
def test_reflect_fulltext(self):
mt = Table(
"mytable",
self.metadata,
Column("id", Integer, primary_key=True),
Column("textdata", String(50)),
mysql_engine="InnoDB",
)
Index("textdata_ix", mt.c.textdata, mysql_prefix="FULLTEXT")
self.metadata.create_all(testing.db)
mt = Table("mytable", MetaData(), autoload_with=testing.db)
idx = list(mt.indexes)[0]
eq_(idx.name, "textdata_ix")
eq_(idx.dialect_options["mysql"]["prefix"], "FULLTEXT")
self.assert_compile(
CreateIndex(idx),
"CREATE FULLTEXT INDEX textdata_ix ON mytable (textdata)",
)
@testing.requires.mysql_ngram_fulltext
@testing.provide_metadata
def test_reflect_fulltext_comment(self):
mt = Table(
"mytable",
self.metadata,
Column("id", Integer, primary_key=True),
Column("textdata", String(50)),
mysql_engine="InnoDB",
)
Index(
"textdata_ix",
mt.c.textdata,
mysql_prefix="FULLTEXT",
mysql_with_parser="ngram",
)
self.metadata.create_all(testing.db)
mt = Table("mytable", MetaData(), autoload_with=testing.db)
idx = list(mt.indexes)[0]
eq_(idx.name, "textdata_ix")
eq_(idx.dialect_options["mysql"]["prefix"], "FULLTEXT")
eq_(idx.dialect_options["mysql"]["with_parser"], "ngram")
self.assert_compile(
CreateIndex(idx),
"CREATE FULLTEXT INDEX textdata_ix ON mytable "
"(textdata) WITH PARSER ngram",
)
@testing.provide_metadata
def test_non_column_index(self):
m1 = self.metadata
t1 = Table(
"add_ix", m1, Column("x", String(50)), mysql_engine="InnoDB"
)
Index("foo_idx", t1.c.x.desc())
m1.create_all()
insp = inspect(testing.db)
eq_(
insp.get_indexes("add_ix"),
[{"name": "foo_idx", "column_names": ["x"], "unique": False}],
)
def _bug_88718_casing_0(self):
fkeys_casing_0 = [
{
"name": "FK_PlaylistTTrackId",
"constrained_columns": ["TTrackID"],
"referred_schema": "test_schema",
"referred_table": "Track",
"referred_columns": ["trackid"],
"options": {},
},
{
"name": "FK_PlaylistTrackId",
"constrained_columns": ["TrackID"],
"referred_schema": None,
"referred_table": "Track",
"referred_columns": ["trackid"],
"options": {},
},
]
ischema_casing_0 = [
("test", "Track", "TrackID"),
("test_schema", "Track", "TrackID"),
]
return fkeys_casing_0, ischema_casing_0
def _bug_88718_casing_1(self):
fkeys_casing_1 = [
{
"name": "FK_PlaylistTTrackId",
"constrained_columns": ["TTrackID"],
"referred_schema": "test_schema",
"referred_table": "Track",
"referred_columns": ["trackid"],
"options": {},
},
{
"name": "FK_PlaylistTrackId",
"constrained_columns": ["TrackID"],
"referred_schema": None,
"referred_table": "Track",
"referred_columns": ["trackid"],
"options": {},
},
]
ischema_casing_1 = [
(util.u("test"), util.u("Track"), "TrackID"),
(util.u("test_schema"), util.u("Track"), "TrackID"),
]
return fkeys_casing_1, ischema_casing_1
def _bug_88718_casing_2(self):
fkeys_casing_2 = [
{
"name": "FK_PlaylistTTrackId",
"constrained_columns": ["TTrackID"],
"referred_schema": "test_schema",
"referred_table": "Track",
"referred_columns": ["trackid"],
"options": {},
},
{
"name": "FK_PlaylistTrackId",
"constrained_columns": ["TrackID"],
"referred_schema": None,
"referred_table": "Track",
"referred_columns": ["trackid"],
"options": {},
},
]
ischema_casing_2 = [
("test", "Track", "TrackID"),
("test_schema", "Track", "TrackID"),
]
return fkeys_casing_2, ischema_casing_2
def test_correct_for_mysql_bug_88718(self):
dialect = mysql.dialect()
for casing, (fkeys, ischema) in [
(0, self._bug_88718_casing_0()),
(1, self._bug_88718_casing_1()),
(2, self._bug_88718_casing_2()),
]:
dialect._casing = casing
dialect.default_schema_name = "test"
connection = mock.Mock(
dialect=dialect, execute=lambda stmt, **params: ischema
)
dialect._correct_for_mysql_bug_88718(fkeys, connection)
eq_(
fkeys,
[
{
"name": "FK_PlaylistTTrackId",
"constrained_columns": ["TTrackID"],
"referred_schema": "test_schema",
"referred_table": "Track",
"referred_columns": ["TrackID"],
"options": {},
},
{
"name": "FK_PlaylistTrackId",
"constrained_columns": ["TrackID"],
"referred_schema": None,
"referred_table": "Track",
"referred_columns": ["TrackID"],
"options": {},
},
],
)
@testing.provide_metadata
def test_case_sensitive_column_constraint_reflection(self):
# test for issue #4344 which works around
# MySQL 8.0 bug https://bugs.mysql.com/bug.php?id=88718
m1 = self.metadata
Table(
"Track",
m1,
Column("TrackID", Integer, primary_key=True),
mysql_engine="InnoDB",
)
Table(
"Track",
m1,
Column("TrackID", Integer, primary_key=True),
schema=testing.config.test_schema,
mysql_engine="InnoDB",
)
Table(
"PlaylistTrack",
m1,
Column("id", Integer, primary_key=True),
Column(
"TrackID",
ForeignKey("Track.TrackID", name="FK_PlaylistTrackId"),
),
Column(
"TTrackID",
ForeignKey(
"%s.Track.TrackID" % (testing.config.test_schema,),
name="FK_PlaylistTTrackId",
),
),
mysql_engine="InnoDB",
)
m1.create_all()
if testing.db.dialect._casing in (1, 2):
eq_(
inspect(testing.db).get_foreign_keys("PlaylistTrack"),
[
{
"name": "FK_PlaylistTTrackId",
"constrained_columns": ["TTrackID"],
"referred_schema": testing.config.test_schema,
"referred_table": "track",
"referred_columns": ["TrackID"],
"options": {},
},
{
"name": "FK_PlaylistTrackId",
"constrained_columns": ["TrackID"],
"referred_schema": None,
"referred_table": "track",
"referred_columns": ["TrackID"],
"options": {},
},
],
)
else:
eq_(
inspect(testing.db).get_foreign_keys("PlaylistTrack"),
[
{
"name": "FK_PlaylistTTrackId",
"constrained_columns": ["TTrackID"],
"referred_schema": testing.config.test_schema,
"referred_table": "Track",
"referred_columns": ["TrackID"],
"options": {},
},
{
"name": "FK_PlaylistTrackId",
"constrained_columns": ["TrackID"],
"referred_schema": None,
"referred_table": "Track",
"referred_columns": ["TrackID"],
"options": {},
},
],
)
@testing.requires.mysql_fully_case_sensitive
@testing.provide_metadata
def test_case_sensitive_reflection_dual_case_references(self):
# this tests that within the fix we do for MySQL bug
# 88718, we don't do case-insensitive logic if the backend
# is case sensitive
m = self.metadata
Table(
"t1",
m,
Column("some_id", Integer, primary_key=True),
mysql_engine="InnoDB",
)
Table(
"T1",
m,
Column("Some_Id", Integer, primary_key=True),
mysql_engine="InnoDB",
)
Table(
"t2",
m,
Column("id", Integer, primary_key=True),
Column("t1id", ForeignKey("t1.some_id", name="t1id_fk")),
Column("cap_t1id", ForeignKey("T1.Some_Id", name="cap_t1id_fk")),
mysql_engine="InnoDB",
)
m.create_all(testing.db)
eq_(
dict(
(rec["name"], rec)
for rec in inspect(testing.db).get_foreign_keys("t2")
),
{
"cap_t1id_fk": {
"name": "cap_t1id_fk",
"constrained_columns": ["cap_t1id"],
"referred_schema": None,
"referred_table": "T1",
"referred_columns": ["Some_Id"],
"options": {},
},
"t1id_fk": {
"name": "t1id_fk",
"constrained_columns": ["t1id"],
"referred_schema": None,
"referred_table": "t1",
"referred_columns": ["some_id"],
"options": {},
},
},
)
class RawReflectionTest(fixtures.TestBase):
__backend__ = True
def setup(self):
dialect = mysql.dialect()
self.parser = _reflection.MySQLTableDefinitionParser(
dialect, dialect.identifier_preparer
)
def test_key_reflection(self):
regex = self.parser._re_key
assert regex.match(" PRIMARY KEY (`id`),")
assert regex.match(" PRIMARY KEY USING BTREE (`id`),")
assert regex.match(" PRIMARY KEY (`id`) USING BTREE,")
assert regex.match(" PRIMARY KEY (`id`)")
assert regex.match(" PRIMARY KEY USING BTREE (`id`)")
assert regex.match(" PRIMARY KEY (`id`) USING BTREE")
assert regex.match(
" PRIMARY KEY (`id`) USING BTREE KEY_BLOCK_SIZE 16"
)
assert regex.match(
" PRIMARY KEY (`id`) USING BTREE KEY_BLOCK_SIZE=16"
)
assert regex.match(
" PRIMARY KEY (`id`) USING BTREE KEY_BLOCK_SIZE = 16"
)
assert not regex.match(
" PRIMARY KEY (`id`) USING BTREE KEY_BLOCK_SIZE = = 16"
)
assert regex.match(" KEY (`id`) USING BTREE COMMENT 'comment'")
# `SHOW CREATE TABLE` returns COMMENT '''comment'
# after creating table with COMMENT '\'comment'
assert regex.match(" KEY (`id`) USING BTREE COMMENT '''comment'")
assert regex.match(" KEY (`id`) USING BTREE COMMENT 'comment'''")
assert regex.match(" KEY (`id`) USING BTREE COMMENT 'prefix''suffix'")
assert regex.match(
" KEY (`id`) USING BTREE COMMENT 'prefix''text''suffix'"
)
# https://forums.mysql.com/read.php?20,567102,567111#msg-567111
# "It means if the MySQL version >= 501, execute what's in the comment"
assert regex.match(
" FULLTEXT KEY `ix_fulltext_oi_g_name` (`oi_g_name`) "
"/*!50100 WITH PARSER `ngram` */ "
)
def test_key_reflection_columns(self):
regex = self.parser._re_key
exprs = self.parser._re_keyexprs
m = regex.match(" KEY (`id`) USING BTREE COMMENT '''comment'")
eq_(m.group("columns"), "`id`")
m = regex.match(" KEY (`x`, `y`) USING BTREE")
eq_(m.group("columns"), "`x`, `y`")
eq_(exprs.findall(m.group("columns")), [("x", "", ""), ("y", "", "")])
m = regex.match(" KEY (`x`(25), `y`(15)) USING BTREE")
eq_(m.group("columns"), "`x`(25), `y`(15)")
eq_(
exprs.findall(m.group("columns")),
[("x", "25", ""), ("y", "15", "")],
)
m = regex.match(" KEY (`x`(25) DESC, `y`(15) ASC) USING BTREE")
eq_(m.group("columns"), "`x`(25) DESC, `y`(15) ASC")
eq_(
exprs.findall(m.group("columns")),
[("x", "25", "DESC"), ("y", "15", "ASC")],
)
m = regex.match(" KEY `foo_idx` (`x` DESC)")
eq_(m.group("columns"), "`x` DESC")
eq_(exprs.findall(m.group("columns")), [("x", "", "DESC")])
eq_(exprs.findall(m.group("columns")), [("x", "", "DESC")])
m = regex.match(" KEY `foo_idx` (`x` DESC, `y` ASC)")
eq_(m.group("columns"), "`x` DESC, `y` ASC")
def test_fk_reflection(self):
regex = self.parser._re_fk_constraint
m = regex.match(
" CONSTRAINT `addresses_user_id_fkey` "
"FOREIGN KEY (`user_id`) "
"REFERENCES `users` (`id`) "
"ON DELETE CASCADE ON UPDATE CASCADE"
)
eq_(
m.groups(),
(
"addresses_user_id_fkey",
"`user_id`",
"`users`",
"`id`",
None,
"CASCADE",
"CASCADE",
),
)
m = regex.match(
" CONSTRAINT `addresses_user_id_fkey` "
"FOREIGN KEY (`user_id`) "
"REFERENCES `users` (`id`) "
"ON DELETE CASCADE ON UPDATE SET NULL"
)
eq_(
m.groups(),
(
"addresses_user_id_fkey",
"`user_id`",
"`users`",
"`id`",
None,
"CASCADE",
"SET NULL",
),
)
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.