id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
406598
|
import os
import errno
import random
import socket
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
def get_random_free_port(interface = '0.0.0.0'):
while True:
port = random.randint(20000, 65535)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.bind((interface, port))
sock.close()
return port
except Exception as ex:
raise ex
|
406607
|
from collections import defaultdict
from urllib.parse import urlparse
from datetime import datetime, timedelta
import pymysql
import html2text
import subprocess
import yaml
h = html2text.HTML2Text()
full_html_str = ''
num_for_each_category = 10
with open('config.yml', 'r') as config_file:
config = yaml.load(config_file)
conn = pymysql.connect(
host='127.0.0.1',
charset='utf8',
use_unicode=True,
unix_socket='/tmp/mysql.sock',
user=config['database']['user'],
passwd=None,
db=config['database']['db'],
autocommit=True
)
cur = conn.cursor()
repo_table_name = 'github_repos_' + datetime.now().strftime('%Y_%m_%d')
# repo_table_name = 'github_repos_2016_04_17'
time_format = '%Y-%m-%d'
current_time = datetime.now()
current_day = current_time.strftime(time_format)
previous_seven_days = (current_time - timedelta(days=7)).strftime(time_format)
previous_month = (current_time - timedelta(days=30)).strftime(time_format)
def determine_stats(stats_type):
if stats_type == 'star':
return ['WatchEvent', 'starred', ':star:']
elif stats_type == 'active':
return ['PushEvent', 'active', 'Pushes']
elif stats_type == 'pull request':
return ['PullRequestEvent', 'pull requests', 'PRs']
def determin_time_period(time_period):
if time_period == 'week':
return previous_seven_days
elif time_period == 'month':
return previous_month
def generating_stats(stats_type, time_type, current, num=None):
event_type, header_text, table_header_text = determine_stats(stats_type)
time_period = determin_time_period(time_type)
query = "select repo_name, a.repo_url, count(*) as cnt, stargazers_count, description from awesome_go_activities as a, %s as b where a.repo_url=b.url and type='%s' and (a.created_at >= '%s' and a.created_at < '%s') group by repo_url order by cnt desc" % (repo_table_name, event_type, time_period, current)
if num:
query += ' limit ' + str(num)
cur.execute(query)
table_list = []
for row in cur:
repo_name, repo_api_url, increased_star_count, stars_count, description = row
simplified_name = repo_name.split('/')[-1]
repo_url = 'https://github.com/' + '/'.join(urlparse(repo_api_url).path.split('/')[-2:])
table_list.append([simplified_name, repo_url, str(increased_star_count), str(stars_count), description])
html_str = '<h2>Most %s repos in the past %s (from %s to %s)</h2>' % (header_text, time_type, time_period, current)
table_html_str = '<table><tr><th>Repo name</th><th>:arrow_up:%s </th><th>:star:</th><th>Description</th></tr>' % (table_header_text)
for row in table_list:
table_html_str += '<tr>'
table_html_str += "<td><a href=%s target='_blank'>%s</a></td><td>%s</td><td>%s</td><td>%s</td>" % (row[1], row[0], row[2], row[3], row[4])
table_html_str += '</tr>'
table_html_str += '</table>'
html_str += table_html_str
return html_str
def generating_inactive_repos(query_repo, query_activity, header):
cur.execute(query_repo)
all_repos = []
for row in cur:
all_repos.append(row)
cur.execute(query_activity)
active_repos = []
for row in cur:
active_repos.append(row[0])
inactive_repos = []
for row in all_repos:
if row[0] not in active_repos:
inactive_repos.append(row)
header += ' (%d repos)' % (len(inactive_repos))
html_str = '<h2>%s</h2>' % (header)
table_html_str = '<table><tr><th>Repo name</th><th>:star:</th><th>Description</th></tr>'
for row in inactive_repos:
url, stars_count, description = row
repo_name = urlparse(url).path.split('/')[-1]
repo_url = "https://github.com/" + '/'.join(urlparse(url).path.split('/')[-2:])
table_html_str += "<tr><td><a href=%s target='_blank'>%s</a></td><td>%s</td><td>%s</td></tr>" % (repo_url, repo_name, str(stars_count), description)
table_html_str += '</table>'
html_str += table_html_str
return html_str
inactive_2016_query_repo = "select url, stargazers_count, description from " + repo_table_name + " where updated_at < '2016' order by stargazers_count asc"
inactive_2016_query_activity = 'select distinct repo_url from awesome_go_activities'
inactive_2016_header = 'Inactive repos in 2016 (no recorded events and last updated was before 2016)'
inactive_no_push_repo = "select url, stargazers_count, description from " + repo_table_name + " where pushed_at < '2016' order by stargazers_count asc"
inactive_no_push_activity = 'select distinct repo_url from awesome_go_activities where type="PushEvent"'
inactive_no_push_header = 'Inactive repos that has no push event in 2016'
full_html_str = generating_stats('star', 'week', current_day, num_for_each_category) + generating_stats('active', 'week', current_day, num_for_each_category) + generating_stats('star', 'month', current_day, num_for_each_category) + generating_stats('active', 'month', current_day, num_for_each_category) + generating_inactive_repos(inactive_2016_query_repo, inactive_2016_query_activity, inactive_2016_header) + generating_inactive_repos(inactive_no_push_repo, inactive_no_push_activity, inactive_no_push_header)
f = open('awesome_go_dashboard.html', 'w')
f.write(full_html_str)
del f
# Generating GithuB Flavored Markdown file with pandoc
subprocess.check_call(['pandoc', 'awesome_go_dashboard.html', '-f', 'html', '-t', 'markdown_github', '-s', '--toc', '-o', 'awesome_go_dashboard.md'])
|
406612
|
apps_details = [
{
"app": "Learning xc functional from experimental data",
"repo": "https://github.com/mfkasim1/xcnn", # leave blank if no repo available
# leave blank if no paper available, strongly suggested to link to open-access paper
"paper": "https://arxiv.org/abs/2102.04229",
},
{
"app": "Basis optimization",
"repo": "https://github.com/diffqc/dqc-apps/tree/main/01-basis-opt",
"paper": "",
},
{
"app": "Alchemical perturbation",
"repo": "https://github.com/diffqc/dqc-apps/tree/main/04-alchemical-perturbation",
"paper": "",
},
]
repo_icons = {
"github": "docs/data/readme_icons/github.svg",
}
paper_icon = "docs/data/readme_icons/paper.svg"
def get_repo_name(repo_link):
# get the repository name
for repo_name in repo_icons.keys():
if repo_name in repo_link:
return repo_name
raise RuntimeError("Unlisted repository, please contact admin to add the repository.")
def add_row(app_detail):
# get the string for repository column
if app_detail['repo'].strip() != "":
repo_name = get_repo_name(app_detail['repo'])
repo_detail = f"[]({app_detail['repo']})"
else:
repo_detail = ""
# get the string for the paper column
if app_detail['paper'].strip() != "":
paper_detail = f"[]({app_detail['paper']})"
else:
paper_detail = ""
s = f"| {app_detail['app']} | {repo_detail} | {paper_detail} |\n"
return s
def main():
# construct the strings
s = "| Applications | Repo | Paper |\n"
s += "|-----------------------------------|------|-------|\n"
for app_detail in apps_details:
s += add_row(app_detail)
# open the readme file
fname = "README.md"
with open(fname, "r") as f:
content = f.read()
# find the signature in README
sig_start = "<!-- start of readme_appgen.py -->"
sig_end = "<!-- end of readme_appgen.py -->"
note = "<!-- Please do not edit this part directly, instead add your " + \
"application in the readme_appgen.py file -->\n"
idx_start = content.find(sig_start)
idx_end = content.find(sig_end)
# write the string to the README
content = content[:idx_start] + sig_start + "\n" + note + s + content[idx_end:]
with open(fname, "w") as f:
f.write(content)
if __name__ == "__main__":
main()
|
406623
|
import os
from google.cloud import storage as google_storage
from label_studio_sdk import Client
BUCKET_NAME = 'my-bucket' # specify your bucket name here
GOOGLE_APPLICATION_CREDENTIALS = 'my-service-account-credentials.json' # specify your GCS credentials
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = GOOGLE_APPLICATION_CREDENTIALS
google_client = google_storage.Client()
bucket = google_client.get_bucket(BUCKET_NAME)
tasks = []
for filename in bucket.list_blobs():
tasks.append({'image': f'gs://{BUCKET_NAME}/{filename}'})
LABEL_STUDIO_URL = 'http://localhost:8080'
API_KEY = '91b3b61589784ed069b138eae3d5a5fe1e909f57'
ls = Client(url=LABEL_STUDIO_URL, api_key=API_KEY)
ls.check_connection()
project = ls.start_project(
title='Image Annotation Project from SDK',
label_config='''
<View>
<Image name="image" value="$image"/>
<RectangleLabels name="objects" toName="image">
<Choice value="Airplane"/>
<Choice value="Car"/>
</RectangleLabels>
</View>
''',
)
project.connect_google_import_storage(
bucket=BUCKET_NAME, google_application_credentials=GOOGLE_APPLICATION_CREDENTIALS
)
project.import_tasks(tasks)
|
406635
|
import pytest
from simple_zpl2 import ZPLDocument
def test_field_block():
zdoc = ZPLDocument()
zdoc.add_field_block()
assert(zdoc.zpl_bytes == b'^XA\n^FB0,1,0,L,0\n^XZ')
zdoc = ZPLDocument()
zdoc.add_field_block(1000)
assert (zdoc.zpl_bytes == b'^XA\n^FB1000,1,0,L,0\n^XZ')
zdoc = ZPLDocument()
zdoc.add_field_block(100, 99)
assert(zdoc.zpl_bytes == b'^XA\n^FB100,99,0,L,0\n^XZ')
zdoc = ZPLDocument()
zdoc.add_field_block(99, 98, -78)
assert(zdoc.zpl_bytes == b'^XA\n^FB99,98,-78,L,0\n^XZ')
zdoc = ZPLDocument()
zdoc.add_field_block(111, 9999, -9999, 'C')
assert(zdoc.zpl_bytes == b'^XA\n^FB111,9999,-9999,C,0\n^XZ')
zdoc = ZPLDocument()
zdoc.add_field_block(222, 1, 9999, 'R', 9999)
assert(zdoc.zpl_bytes == b'^XA\n^FB222,1,9999,R,9999\n^XZ')
def test_field_block_error():
for width in (-1, 'A', '', None):
with pytest.raises(Exception):
zdoc = ZPLDocument()
zdoc.add_field_block(width=width)
for lines in (0, 10000, 'A', '', None):
with pytest.raises(Exception):
zdoc = ZPLDocument()
zdoc.add_field_block(max_lines=lines)
for space in (-10000, 10000, 'A', '', None):
with pytest.raises(Exception):
zdoc = ZPLDocument()
zdoc.add_field_block(dots_between_lines=space)
for justify in (0, 'Q', '', None):
with pytest.raises(Exception):
zdoc = ZPLDocument()
zdoc.add_field_block(text_justification=justify)
for indent in (-1, 10000, 'B', '', None):
with pytest.raises(Exception):
zdoc = ZPLDocument()
zdoc.add_field_block(hanging_indent=indent)
|
406666
|
from django.utils.functional import cached_property
from waldur_core.structure.tests import fixtures as structure_fixtures
from . import factories
class PayPalFixture(structure_fixtures.CustomerFixture):
@cached_property
def payment(self):
return factories.PaypalPaymentFactory(customer=self.customer)
@cached_property
def invoice(self):
return factories.InvoiceFactory(customer=self.customer)
|
406680
|
import os
import sys
import time
import json
import copy
import shlex
import shutil
import random
import signal
import hashlib
import platform
import subprocess
from pyeoskit import config
from pyeoskit import wallet
from pyeoskit import utils
from pyeoskit import eosapi
from pyeoskit import log
logger = log.get_logger(__name__)
class Testnet(object):
def __init__(self, host='127.0.0.1', single_node=True, show_log=False, log_config='', extra=''):
self.host=host
self.single_node = single_node
self.show_log = show_log
self.log_config = log_config
self.extra = extra
self.tmp_dir='.eos-testnet'
self.nodes = []
self.test_accounts = (
'hello',
'helloworld11',
'helloworld12',
'helloworld13',
'helloworld14',
'helloworld15',
'helloworld33',
)
self.producer_accounts = (
'genesisbp111',
'genesisbp112',
'genesisbp113',
'genesisbp114',
'genesisbp115'
)
self.cur_dir = os.path.dirname(__file__)
if not os.path.exists(self.tmp_dir):
os.mkdir(self.tmp_dir)
wallet.set_dir(self.tmp_dir)
test_wallet = os.path.join(self.tmp_dir, 'test.wallet')
if os.path.exists(test_wallet):
os.remove(test_wallet)
psw = wallet.create('test')
print(psw)
priv_keys = [
'<KEY>',#<KEY>
'<KEY>9DN124GUok9s',#EOS61MgZLN7Frbc2J7giU7JdYjy2TqnfWFjZuLXvpHJoKzWAj7Nst
'5JbDP55GXN7MLcNYKCnJtfKi9aD2HvHAdY7g8m67zFTAFkY1uBB',#EOS5JuNfuZPATy8oPz9KMZV2asKf9m8fb2bSzftvhW55FKQFakzFL
'5K463ynhZoCDDa4RDcr63cUwWLTnKqmdcoTKTHBjqoKfv4u5V7p',#EOS8Znrtgwt8TfpmbVpTKvA2oB8Nqey625CLN8bCN3TEbgx86Dsvr
'<KEY>',<KEY>
'<KEY>',<KEY>
'<KEY>',<KEY>
'5Jbb4wuwz8MAzTB9FJNmrVYGXo4ABb7wqPVoWGcZ6x8V2FwNeDo',#EOS7sPDxfw5yx5SZgQcVb57zS1XeSWLNpQKhaGjjy2qe61BrAQ49o
'5JHRxntHapUryUetZgWdd3cg6BrpZLMJdqhhXnMaZiiT4qdJPhv',#<KEY>
'5Jbh1Dn57DKPUHQ6F6eExX55S2nSFNxZhpZUxNYFjJ1arKGK9Q3',#<KEY>
'5JJYrXzjt47UjHyo3ud5rVnNEPTCqWvf73yWHtVH<KEY>',#EOS8h8TmXCU7Pzo5XQKqyWwXAqLpPj4DPZCv5Wx9Y4yjRrB6R64ok
'<KEY>',#<KEY>
'<KEY>',#<KEY>
]
for priv_key in priv_keys:
wallet.import_key('test', priv_key, False)
def start_nodes(self, wait=False):
self.nodes = []
if self.log_config:
configs = f'--data-dir ./{self.tmp_dir}/dd --config-dir ./{self.tmp_dir}/cd -l {self.log_config} {self.extra}'
else:
configs = f'--data-dir ./{self.tmp_dir}/dd --config-dir ./{self.tmp_dir}/cd {self.extra}'
args = f'nodeos --verbose-http-errors --http-max-response-time-ms 100 --p2p-listen-endpoint {self.host}:9100 {configs} -p eosio --plugin eosio::producer_plugin --plugin eosio::chain_api_plugin --plugin eosio::producer_api_plugin --plugin eosio::history_api_plugin -e --resource-monitor-space-threshold 99 --http-server-address {self.host}:9000 --contracts-console --access-control-allow-origin="*"' # --backing-store rocksdb'
logger.info(args)
args = shlex.split(args)
# if self.show_log:
# f = sys.stdout
# else:
# f = open('log.txt', 'a')
# f = sys.stdout
if self.show_log:
p = subprocess.Popen(args)
else:
f = open('log.txt', 'a')
p = subprocess.Popen(args, stdout=f, stderr=f)
self.nodes.append(p)
eosapi.set_node(f'http://{self.host}:9000')
while True:
time.sleep(0.5)
try:
info = eosapi.get_info()
# logger.info(info)
break
except Exception as e:
logger.info(e)
if self.single_node:
return
self.producer_keys = [
{
"public": "<KEY>",
"private": "<KEY>"
},
{
"public": "<KEY>",
"private": "<KEY>"
},
{
"public": "<KEY>",
"private": "<KEY>"
},
{
"public": "<KEY>",
"private": "<KEY>"
},
{
"public": "<KEY>",
"private": "<KEY>"
}
]
start_port = 9001
http_ports = [port for port in range(start_port, start_port+5)]
p2p_ports = [9100,]
for http_port in http_ports:
p2p_listen_port = http_port+100
index = http_port-start_port
http_ports_copy = copy.copy(http_ports)
del http_ports_copy[index]
bp = f'genesisbp11{index+1}'
logfile = f'{self.tmp_dir}/{bp}.log'
pub = self.producer_keys[index]['public']
priv = self.producer_keys[index]['private']
signature_provider = f'--signature-provider={pub}=KEY:{priv}'
http_server_address = f'--http-server-address {self.host}:{http_port}'
p2p_listen_endpoint = f'--p2p-listen-endpoint {self.host}:{p2p_listen_port}'
p2p_peer_address = ''
for port in p2p_ports:
p2p_peer_address += f'--p2p-peer-address {self.host}:{port} '
dirs = f'--data-dir {self.tmp_dir}/dd-{bp} --config-dir {self.tmp_dir}/cd-{bp} -p {bp}'
if http_port == 9001:
args = f'nodeos -e {dirs} {signature_provider} {http_server_address} {p2p_listen_endpoint} {p2p_peer_address} --verbose-http-errors --http-max-response-time-ms 100 --plugin eosio::producer_plugin --plugin eosio::chain_api_plugin --plugin eosio::producer_api_plugin --plugin eosio::history_api_plugin --resource-monitor-space-threshold 99 --contracts-console --access-control-allow-origin="*"' # --backing-store rocksdb'
else:
args = f'nodeos {dirs} {signature_provider} {http_server_address} {p2p_listen_endpoint} {p2p_peer_address} --verbose-http-errors --http-max-response-time-ms 100 --plugin eosio::producer_plugin --plugin eosio::chain_api_plugin --plugin eosio::producer_api_plugin --plugin eosio::history_api_plugin --resource-monitor-space-threshold 99 --contracts-console --access-control-allow-origin="*"' # --backing-store rocksdb'
logger.info(args)
args = shlex.split(args)
f = open(logfile, 'a')
p = subprocess.Popen(args, stdout=f, stderr=f)
self.nodes.append(p)
p2p_ports.append(p2p_listen_port)
if wait:
p.wait()
return p
def start(self):
return self.run()
def run(self):
p = self.start_nodes()
# p.wait()
# return
try:
self.init_testnet()
except Exception as e:
logger.exception(e)
# p.wait()
# print('done!')
def stop(self):
for p in self.nodes:
p.send_signal(signal.SIGINT)
self.wait()
self.nodes = []
print('done!')
def cleanup(self):
self.stop()
import shutil
if os.path.exists(self.tmp_dir):
shutil.rmtree(self.tmp_dir)
def wait(self):
for p in self.nodes:
p.wait()
def deploy_contract(self, account_name, contract_name, contracts_path=None):
logger.info('++++deploy_contract %s %s', account_name, contract_name)
if not contracts_path:
contracts_path = os.path.dirname(__file__)
# contracts_path = '../../../build/externals/eosio.contracts'
# contracts_path = '.'
contracts_path = os.path.join(contracts_path, f'contracts/{contract_name}')
code_path = os.path.join(contracts_path, f'{contract_name}.wasm')
abi_path = os.path.join(contracts_path, f'{contract_name}.abi')
logger.info(code_path)
code = open(code_path, 'rb').read()
abi = open(abi_path, 'rb').read()
m = hashlib.sha256()
m.update(code)
code_hash = m.hexdigest()
r = eosapi.get_raw_code(account_name)
logger.info((code_hash, r['code_hash']))
if code_hash == r['code_hash']:
logger.info('contract already running this version of code')
return True
logger.info(f"++++++++++set contract: {account_name}")
r = eosapi.deploy_contract(account_name, code, abi, vm_type=0, compress=True)
return True
def deploy_micropython_contract(self):
logger.info("++++++++deploy_micropython_contract")
code_path = os.path.join(self.cur_dir, './contracts/micropython/micropython_uuos.wasm')
code_path = os.path.join(self.cur_dir, './contracts/micropython/micropython.wasm')
abi_path = os.path.join(self.cur_dir, './contracts/micropython/micropython.abi')
code = open(code_path, 'rb').read()
abi = open(abi_path, 'rb').read()
try:
pass
#r = eosapi.deploy_contract('hello', code, abi, vm_type=0, compress=True)
#r = eosapi.deploy_contract('eosio.mpy', code, abi, vm_type=0, compress=True)
except Exception as e:
logger.exception(e)
return
code = '''
def apply(a, b, c):
pass
'''
account = 'hello'
code = eosapi.mp_compile(account, code)
account = 'hello'
args = eosapi.s2b(account) + code
eosapi.push_action(account, 'setcode', args, {account:'active'})
account = 'eosio.mpy'
args = eosapi.s2b(account) + code
eosapi.push_action(account, 'setcode', args, {account:'active'})
def create_account(self, account, key1, key2):
newaccount = {
'creator': 'eosio',
'name': '',
'owner':
{
'threshold': 1,
'keys': [
{
'key': key1,
'weight': 1
}
],
'accounts': [],
'waits': []
},
'active':
{
'threshold': 1,
'keys': [
{
'key': key2,
'weight': 1
}
],
'accounts': [],
'waits': []
}
}
actions = []
# logger.info(('+++++++++create account', account))
newaccount['name'] = account
act = ['eosio', 'newaccount', newaccount, {'eosio':'active'}]
actions.append(act)
r = eosapi.push_actions(actions)
def init_testnet(self):
self.init_accounts()
self.init_producer()
def init_accounts(self):
if eosapi.get_account('helloworld11'):
return
# formatter = logging.Formatter('%(asctime)s %(levelname)s %(module)s %(lineno)d %(message)s')
# handler = logging.StreamHandler()
# handler.setFormatter(formatter)
# config.setup_eos_network()
# if len(sys.argv) == 2:
# print(sys.argv)
# eosapi.set_nodes([sys.argv[1]])
key1 = '<KEY>'
key2 = '<KEY>'
system_accounts = [
'eosio.mpy',
'eosio.bpay',
'eosio.msig',
'eosio.names',
'eosio.ram',
'eosio.ramfee',
'eosio.saving',
'eosio.stake',
'eosio.token',
'eosio.vpay',
'eosio.rex',
'eosio.reserv'
]
for a in system_accounts:
self.create_account(a, key1, key2)
pub_keys = (
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
)
producer_pub_keys = (
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
)
i = 0
for a in self.test_accounts:
key = pub_keys[i]
self.create_account(a, key, key)
i += 1
i = 0
for a in self.producer_accounts:
key = producer_pub_keys[i]
self.create_account(a, key, key)
i += 1
try:
eosapi.schedule_protocol_feature_activations(['0ec7e080177b2c02b278d5088611686b49d739925a92d9bfcacd7fc6b74053bd']) #PREACTIVATE_FEATURE
time.sleep(1.0)
logger.info('set PREACTIVATE_FEATURE done!')
except Exception as e:
logger.exception(e)
# try:
# eosapi.update_runtime_options(max_transaction_time=230)
# time.sleep(2.0)
# except Exception as e:
# logger.exception(e)
contracts_path = os.path.dirname(__file__)
contracts_path = os.path.join(contracts_path, 'contracts')
if not eosapi.get_raw_code_and_abi('eosio')['wasm']:
self.deploy_contract('eosio', 'eosio.bios')
time.sleep(1.0)
feature_digests = [
'1a99a59d87e06e09ec5b028a9cbb7749b4a5ad8819004365d02dc4379a8b7241', #'ONLY_LINK_TO_EXISTING_PERMISSION'
'2652f5f96006294109b3dd0bbde63693f55324af452b799ee137a81a905eed25', #'FORWARD_SETCODE'
'299dcb6af692324b899b39f16d5a530a33062804e41f09dc97e9f156b4476707', #'WTMSIG_BLOCK_SIGNATURES'
'ef43112c6543b88db2283a2e077278c315ae2c84719a8b25f25cc88565fbea99', #'REPLACE_DEFERRED'
'4a90c00d55454dc5b059055ca213579c6ea856967712a56017487886a4d4cc0f', #'NO_DUPLICATE_DEFERRED_ID'
'4e7bf348da00a945489b2a681749eb56f5de00b900014e137ddae39f48f69d67', #'RAM_RESTRICTIONS'
'4fca8bd82bbd181e714e283f83e1b45d95ca5af40fb89ad3977b653c448f78c2', #'WEBAUTHN_KEY'
'<KEY>', #'BLOCKCHAIN_PARAMETERS'
'68dcaa34c0517d19666e6b33add67351d8c5f69e999ca1e37931bc410a297428', #'DISALLOW_EMPTY_PRODUCER_SCHEDULE'
'825ee6288fb1373eab1b5187ec2f04f6eacb39cb3a97f356a07c91622dd61d16', #'KV_DATABASE'
'8ba52fe7a3956c5cd3a656a3174b931d3bb2abb45578befc59f283ecd816a405', #'ONLY_BILL_FIRST_AUTHORIZER'
'ad9e3d8f650687709fd68f4b90b41f7d825a365b02c23a636cef88ac2ac00c43', #'RESTRICT_ACTION_TO_SELF'
'bf61537fd21c61a60e542a5d66c3f6a78da0589336868307f94a82bccea84e88', #'CONFIGURABLE_WASM_LIMITS'
'c3a6138c5061cf291310887c0b5c71fcaffeab90d5deb50d3b9e687cead45071', #'ACTION_RETURN_VALUE'
'e0fb64b1085cc5538970158d05a009c24e276fb94e1a0bf6a528b48fbc4ff526', #'FIX_LINKAUTH_RESTRICTION'
'f0af56d2c5a48d60a4a5b5c903edfb7db3a736a94ed589d0b797df33ff9d3e1d', #'GET_SENDER'
]
for digest in feature_digests:
try:
args = {'feature_digest': digest}
# logger.info(f'activate {digest}')
eosapi.push_action('eosio', 'activate', args, {'eosio':'active'})
except Exception as e:
logger.error(e)
self.deploy_micropython_contract()
try:
self.deploy_contract('eosio.token', 'eosio.token')
except Exception as e:
logger.exception(e)
if not eosapi.get_balance('eosio'):
logger.info('issue system token...')
msg = {"issuer":"eosio","maximum_supply":f"11000000000.0000 {config.main_token}"}
r = eosapi.push_action('eosio.token', 'create', msg, {'eosio.token':'active'})
assert r
r = eosapi.push_action('eosio.token','issue',{"to":"eosio","quantity":f"1000000000.0000 {config.main_token}","memo":""},{'eosio':'active'})
assert r
try:
self.deploy_contract('eosio.msig', 'eosio.msig')
except Exception as e:
logger.exception(e)
#wait for protocol activation
time.sleep(1.0)
for i in range(3):
try:
if self.deploy_contract('eosio', 'eosio.system'):
logger.info('deploy eosio.system done!')
break
except Exception as e:
pass
# logger.info(e)
else:
assert False, 'deploy eosio.system failed!'
if True:
args = dict(version = 0,
core = '4,EOS',
# min_bp_staking_amount = 0,
# vote_producer_limit = 100,
# mini_voting_requirement = 21
)
# args['min_bp_staking_amount'] = 10000000000
try:
eosapi.push_action('eosio', 'init', args, {'eosio':'active'})
except Exception as e:
logger.exception(e)
if eosapi.get_balance('helloworld11') <=0:
r = eosapi.push_action('eosio.token', 'transfer', {"from":"eosio", "to":"helloworld11","quantity":f"10000000.0000 {config.main_token}","memo":""}, {'eosio':'active'})
if eosapi.get_balance('helloworld12') <=0:
r = eosapi.push_action('eosio.token', 'transfer', {"from":"eosio", "to":"helloworld12","quantity":f"10000000.0000 {config.main_token}","memo":""}, {'eosio':'active'})
if eosapi.get_balance('helloworld13') <=0:
r = eosapi.push_action('eosio.token', 'transfer', {"from":"eosio", "to":"helloworld13","quantity":f"10000000.0000 {config.main_token}","memo":""}, {'eosio':'active'})
if eosapi.get_balance('helloworld14') <=0:
r = eosapi.push_action('eosio.token', 'transfer', {"from":"eosio", "to":"helloworld14","quantity":f"10000000.0000 {config.main_token}","memo":""}, {'eosio':'active'})
if eosapi.get_balance('hello') <=0:
args = {"from":"eosio", "to":"hello","quantity":f"600000000.0000 {config.main_token}","memo":""}
r = eosapi.push_action('eosio.token', 'transfer', args, {'eosio':'active'})
for account in self.test_accounts:
eosapi.transfer('eosio', account, 10000.0)
utils.buyrambytes('eosio', account, 5*1024*1024)
utils.dbw(account, account, 1.0, 1000)
utils.dbw('hello', 'hello', 1.0, 5*1e8)
if 0:
try:
args = {'vmtype': 1, 'vmversion':0} #activate vm python
eosapi.push_action('eosio', 'activatevm', args, {'eosio':'active'})
except Exception as e:
logger.info(e)
try:
args = {'vmtype': 2, 'vmversion':0} #activate vm python
eosapi.push_action('eosio', 'activatevm', args, {'eosio':'active'})
except Exception as e:
logger.info(e)
balance = eosapi.get_balance('hello')
logger.info(f'++++balance: {balance}')
while False:
n = random.randint(0,10000000)
elapsed = 0
for i in range(n, n+10):
try:
r = eosapi.transfer('hello', 'eosio', 0.0001, str(i))
logger.info(r['processed']['elapsed'])
elapsed += int(r['processed']['elapsed'])
except Exception as e:
logger.exception(e)
logger.info(f'AVG: {elapsed/10}')
logger.info(eosapi.get_balance('hello'))
time.sleep(2.0)
for account in self.test_accounts:
# print('buy ram', account)
utils.buyrambytes('hello', account, 10*1024*1024)
# print('buy cpu', account)
utils.dbw('hello', account, 1.0, 1.0)
if 0:
args = {
'owner': 'helloworld11',
'amount': '1.0000 EOS'
}
eosapi.push_action('eosio', 'deposit', args, {'helloworld11': 'active'})
def init_producer(self):
if self.single_node:
return
a = eosapi.get_producers(True, '0', 10)
logger.info(a)
if len(a['rows']) > 1:
return
logger.info('++++++register producers...')
index = 0
for p in self.producer_accounts:
args = {
"producer": p,
"producer_key": self.producer_keys[index]['public'],
"url": '',
"location": 0
}
eosapi.push_action('eosio', 'regproducer', args, {p:'active'})
index += 1
logger.info('+++++++vote producers...')
args = {
"voter": '',
"proxy": '',
"producers": self.producer_accounts
}
for account in self.test_accounts:
args['voter'] = account
eosapi.push_action('eosio', 'voteproducer', args, {account:'active'})
|
406722
|
import picamera
camera = picamera.PiCamera()
for filename in camera.record_sequence('%d.h264' % i for i in range(1, 11)):
camera.wait_recording(2)
|
406738
|
import glob
import bs4
import concurrent.futures
import os
import re
import json
import hashlib
import gzip
def _name(arr):
index, names = arr
for name in names:
try:
soup = bs4.BeautifulSoup( gzip.decompress(open(name, 'rb').read()) )
reviews = soup.find_all('div', {'data-hook':'review'})
if reviews == []:
continue
for review in reviews:
star = review.find('i', {'data-hook':'review-star-rating'}).text
body = review.find('span', {'data-hook':'review-body'}).text.strip()
if re.search('5つ星', star) is None:
'''エンコーディングがバグってる'''
'''削除'''
os.remove(name)
break
star = float(re.search(r'5つ星のうち(.*?)$', star).group(1))
d = {'star':star, 'body':body}
obj = json.dumps(d, indent=2, ensure_ascii=False)
sha = hashlib.sha256(bytes(obj,'utf8')).hexdigest()
open('reviews/' + sha, 'w').write(obj)
print(obj)
except Exception as ex:
print(ex)
arr = {}
for index, name in enumerate(glob.glob('htmls/*')):
key = index%32
if arr.get(key) is None:
arr[key] = []
arr[key].append(name)
arr = [(index,names) for index,names in arr.items()]
#_name(arr[0])
with concurrent.futures.ProcessPoolExecutor(max_workers=32) as exe:
exe.map(_name, arr)
|
406793
|
from contextlib import contextmanager
from PIL import Image
from .quantize import mmcq
@contextmanager
def get_palette(filename, color_count=10):
with Image.open(filename) as image:
colors = []
rgb = image.convert('RGB')
for x in range(0, rgb.width, 5):
for y in range(0, rgb.height, 5):
rgb_color = rgb.getpixel((x, y))
colors.append(rgb_color)
c_map = mmcq(colors, color_count)
yield c_map.palette
def get_dominant_color(filename, color_count=10):
with get_palette(filename, color_count) as palette:
return palette[0]
|
406794
|
import json
import decimal
class DecimalEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, decimal.Decimal):
return {
"_type": "decimal",
"value": str(obj)
}
return super(DecimalEncoder, self).default(obj)
class DecimalDecoder(json.JSONDecoder):
def __init__(self, *args, **kwargs):
json.JSONDecoder.__init__(self, object_hook=self.object_hook, *args, **kwargs)
def object_hook(self, obj):
if '_type' not in obj:
return obj
type = obj['_type']
if type == 'decimal':
return decimal.Decimal(obj['value'])
return obj
|
406830
|
from thinc.api import chain, Relu, reduce_max, Softmax, add, concatenate
bad_model = chain(Relu(10), reduce_max(), Softmax())
bad_model2 = add(Relu(10), reduce_max(), Softmax())
bad_model_only_plugin = chain(
Relu(10), Relu(10), Relu(10), Relu(10), reduce_max(), Softmax()
)
bad_model_only_plugin2 = add(
Relu(10), Relu(10), Relu(10), Relu(10), reduce_max(), Softmax()
)
reveal_type(bad_model_only_plugin2)
bad_model_only_plugin3 = concatenate(
Relu(10), Relu(10), Relu(10), Relu(10), reduce_max(), Softmax()
)
reveal_type(bad_model_only_plugin3)
|
406855
|
import numpy as np
from pandas import DataFrame
from typing import Optional, Tuple
from numbers import Number
from pycsou.core.solver import GenericIterativeAlgorithm
from pycsou.core.map import DifferentiableMap
from pycsou.core.linop import LinearOperator
from pycsou.core.functional import ProximableFunctional
from pycsou.func.base import NullDifferentiableFunctional, NullProximableFunctional
from pycsou.util.stats import P2Algorithm
class PMYULA(GenericIterativeAlgorithm):
def __init__(self, dim: int, F: Optional[DifferentiableMap] = None, G: Optional[ProximableFunctional] = None,
tau: Optional[float] = None, gamma: Optional[None] = None, x0: Optional[np.ndarray] = None,
max_iter: int = 1e5, min_iter: int = 200, beta: Optional[float] = None,
accuracy_threshold: float = 1e-5, nb_burnin_iterations: int = 1000, thinning_factor: int = 100,
verbose: Optional[int] = 1, seed: int = 0, linops: Optional[Tuple[LinearOperator, ...]] = None,
store_mcmc_samples: bool = False,
pvalues: Optional[Tuple[float, ...]] = (0.10, 0.9)):
self.dim = dim
self.seed = seed
self.rng = np.random.default_rng(seed=seed)
self.linops = linops
self.nb_burnin_iterations = nb_burnin_iterations
self.thinning_factor = thinning_factor
self.store_mcmc_samples = store_mcmc_samples
self.mcmc_samples = []
self.pvalues = pvalues
self.count = 0
if isinstance(F, DifferentiableMap):
if F.shape[1] != dim:
raise ValueError(f'F does not have the proper dimension: {F.shape[1]}!={dim}.')
else:
self.F = F
if F.diff_lipschitz_cst < np.infty:
self.beta = self.F.diff_lipschitz_cst if beta is None else beta
elif (beta is not None) and isinstance(beta, Number):
self.beta = beta
else:
raise ValueError('F must be a differentiable functional with Lipschitz-continuous gradient.')
elif F is None:
self.F = NullDifferentiableFunctional(dim=dim)
self.beta = 0
else:
raise TypeError(f'F must be of type {DifferentiableMap}.')
if isinstance(G, ProximableFunctional):
if G.dim != dim:
raise ValueError(f'G does not have the proper dimension: {G.dim}!={dim}.')
else:
self.G = G
elif G is None:
self.G = NullProximableFunctional(dim=dim)
else:
raise TypeError(f'G must be of type {ProximableFunctional}.')
if (tau is not None) and (gamma is not None):
self.tau, self.gamma = tau, gamma
elif tau is not None:
self.tau = tau
self.gamma = tau / ((self.F.diff_lipschitz_cst * tau + 1))
else:
self.tau, self.gamma = self.set_hyperparameters()
if x0 is not None:
self.x0 = np.asarray(x0)
else:
self.x0 = self.initialize_mcmc()
init_iterand = {'init_mcmc_sample': self.x0, 'mmse_raw': self.x0}
super(PMYULA, self).__init__(objective_functional=self.F + self.G, init_iterand=init_iterand, max_iter=max_iter,
min_iter=min_iter,
accuracy_threshold=accuracy_threshold, verbose=verbose)
def set_hyperparameters(self) -> Tuple[float, float]:
if isinstance(self.G, NullProximableFunctional):
tau = None
gamma = 1 / self.beta
else:
tau = 2 / self.beta
gamma = tau / ((self.beta * tau + 1))
return tau, gamma
def initialize_mcmc(self) -> np.ndarray:
return np.zeros(shape=(self.dim,), dtype=np.float)
def update_iterand(self) -> dict:
if self.iter == 0:
x = self.init_iterand['init_mcmc_sample']
mmse_raw = 0
second_moment_raw = 0
if self.pvalues is None:
p2_raw = None
else:
p2_raw = [P2Algorithm(pvalue=p) for p in self.pvalues]
if self.linops is not None:
mmse_linops = [0 for _ in self.linops]
second_moment_linops = [0 for _ in self.linops]
if self.pvalues is None:
p2_linops = [None for _ in self.linops]
else:
p2_linops = [[P2Algorithm(pvalue=p) for p in self.pvalues] for _ in self.linops]
else:
if self.linops is None:
x, mmse_raw, second_moment_raw, p2_raw = self.iterand.values()
else:
x, mmse_raw, second_moment_raw, p2_raw, mmse_linops, second_moment_linops, p2_linops = self.iterand.values()
z = rng.standard_normal(size=self.dim)
if isinstance(self.G, NullProximableFunctional):
x = x - self.gamma * self.F.gradient(x) + np.sqrt(2 * self.gamma) * z
else:
x = (1 - self.gamma / self.tau) * x - self.gamma * self.F.gradient(x) \
+ (self.gamma / self.tau) * self.G.prox(x, tau=self.tau) \
+ np.sqrt(2 * self.gamma) * z
if self.store_mcmc_samples:
self.mcmc_samples.append(x)
if self.iter > np.fmax(self.nb_burnin_iterations, 4):
if (self.iter - self.nb_burnin_iterations) % self.thinning_factor == 0:
self.count += 1
mmse_raw += x
second_moment_raw += x ** 2
if self.pvalues is not None:
for pp in p2_raw: pp.add_sample(x)
if self.linops is not None:
for i, linop in enumerate(self.linops):
y = linop(x)
mmse_linops[i] += y
second_moment_linops[i] += y ** 2
if self.pvalues is not None:
for pp in p2_linops[i]: pp.add_sample(y)
if self.linops is None:
iterand = {'mcmc_sample': x, 'mmse_raw': mmse_raw, 'second_moment_raw': second_moment_raw, 'p2_raw': p2_raw}
else:
iterand = {'mcmc_sample': x, 'mmse_raw': mmse_raw, 'second_moment_raw': second_moment_raw, 'p2_raw': p2_raw,
'mmse_linops': mmse_linops, 'second_moment_linops': second_moment_linops, 'p2_linops': p2_linops}
return iterand
def postprocess_iterand(self) -> dict:
if self.linops is None:
mmse_raw = self.iterand['mmse_raw'] / self.count
second_moment_raw = self.iterand['second_moment_raw'] / self.count
std_raw = np.sqrt(second_moment_raw - mmse_raw ** 2)
if self.pvalues is not None:
quantiles_raw = [pp.q for pp in self.iterand['p2_raw']]
else:
quantiles_raw = None
iterand = {'mcmc_sample': self.iterand['mcmc_sample'], 'mmse': mmse_raw, 'std': std_raw,
'quantiles': quantiles_raw, 'pvalues': self.pvalues}
else:
mmse_raw = self.iterand['mmse_raw'] / self.count
second_moment_raw = self.iterand['second_moment_raw'] / self.count
std_raw = np.sqrt(second_moment_raw - mmse_raw ** 2)
if self.pvalues is not None:
quantiles_raw = [pp.q for pp in self.iterand['p2_raw']]
else:
quantiles_raw = None
mmse_linops = []
std_linops = []
quantiles_linops = []
for i, linop in enumerate(self.linops):
mmse_linop = self.iterand['mmse_linops'][i] / self.count
second_moment_linop = self.iterand['second_moment_linops'][i] / self.count
std_linop = np.sqrt(second_moment_linop - mmse_linop ** 2)
if self.pvalues is not None:
quantiles_linop = [pp.q for pp in self.iterand['p2_linops'][i]]
else:
quantiles_linop = None
mmse_linops.append(mmse_linop)
std_linops.append(std_linop)
quantiles_linops.append(quantiles_linop)
iterand = {'mcmc_sample': self.iterand['mcmc_sample'], 'mmse_raw': mmse_raw, 'std_raw': std_raw,
'quantiles_raw': quantiles_raw,
'mmse_linops': mmse_linops, 'std_linops': std_linops, 'quantiles_linops': quantiles_linops,
'pvalues': self.pvalues}
return iterand
def stopping_metric(self):
if self.iter == 0:
return np.infty
elif (self.iter - self.nb_burnin_iterations) % self.thinning_factor != 0:
return np.infty
else:
return np.infty # self.diagnostics.loc[self.iter - 1, 'Relative Improvement (MMSE)']
def print_diagnostics(self):
print(dict(self.diagnostics.loc[self.iter]))
def update_diagnostics(self):
if self.iter == 0:
self.diagnostics = DataFrame(
columns=['Iter', 'Relative Improvement (MMSE)', 'Nb of samples'])
self.diagnostics.loc[self.iter, 'Iter'] = self.iter
self.diagnostics.loc[self.iter, 'Nb of samples'] = self.count
if np.linalg.norm(self.old_iterand['mmse_raw']) == 0:
self.diagnostics.loc[self.iter, 'Relative Improvement (MMSE)'] = np.infty
else:
self.diagnostics.loc[self.iter, 'Relative Improvement (MMSE)'] = np.linalg.norm(
self.old_iterand['mmse_raw'] - self.iterand['mmse_raw']) / np.linalg.norm(
self.old_iterand['mmse_raw'])
if __name__ == '__main__':
import numpy as np
import matplotlib.pyplot as plt
from pycsou.func.loss import SquaredL2Loss
from pycsou.func.penalty import NonNegativeOrthant, L1Norm, SquaredL2Norm, Segment, L2Ball, L1Ball
from pycsou.linop import Integration1D, MovingAverage1D, DownSampling
from pycsou.opt.proxalgs import APGD
rng = np.random.default_rng(0)
N = 32
x = np.repeat([0, 2, 1, 3, 0, 2, 0], N)
S = DownSampling(size=x.size, downsampling_factor=4) * MovingAverage1D(window_size=8, shape=(x.size,))
S.compute_lipschitz_cst()
I = Integration1D(size=x.size)
I.compute_lipschitz_cst()
Gop = S * I
y = S(x) + 0.05 * rng.standard_normal(size=S.shape[0])
F = (1 / 2) * SquaredL2Loss(dim=S.shape[0], data=y) * Gop
lam = 0.1 * np.max(np.abs(F.gradient(0 * x)))
G = L2Ball(dim=x.size, radius=2)
apgd = APGD(dim=x.size, F=F, G=G, min_iter=100, max_iter=1e4, accuracy_threshold=1e-4, verbose=1)
out1, _, _ = apgd.iterate()
pmyula = PMYULA(dim=x.size, F=F, G=G, max_iter=3e4, x0=out1['iterand'] + 0.05 * rng.standard_normal(x.size),
accuracy_threshold=1e-7, pvalues=(0.05, 0.95), tau=1e-4,
verbose=100, nb_burnin_iterations=200, thinning_factor=10, linops=(I,))
out2, _, _ = pmyula.iterate()
plt.figure()
plt.subplot(1, 3, 1)
plt.plot(x)
plt.title('Original Signal')
plt.subplot(1, 3, 2)
plt.plot(y)
plt.title('Data')
plt.subplot(1, 3, 3)
plt.plot(I * out1['iterand'])
plt.plot(out2['mmse_linops'][0])
plt.fill_between(np.arange(x.size), out2['quantiles_linops'][0][0], out2['quantiles_linops'][0][1], alpha=0.3)
plt.legend(['MAP', 'MMSE', '90% Credibility Intervals'])
|
406887
|
import json
from test.factories import AttendanceFactory, OrganizationFactory
from test.harness import IntegrationTest
from app import db, Attendance
class TestAttendance(IntegrationTest):
def test_attendance(self):
cfsf = OrganizationFactory(name=u"Code for San Francisco")
url = u"https://www.codeforamerica.org/api/organizations/Code-for-San-Francisco"
cfsf_att = AttendanceFactory(organization_name=u"Code for San Francisco", organization_url=url)
oakland = OrganizationFactory(name=u"Open Oakland")
url = u"https://www.codeforamerica.org/api/organizations/Open-Oakland"
oakland_att = AttendanceFactory(organization_name=u"Open Oakland", organization_url=url)
db.session.add(cfsf)
db.session.add(cfsf_att)
db.session.add(oakland)
db.session.add(oakland_att)
db.session.commit()
response = self.app.get('/api/attendance')
self.assertEquals(response.status_code, 200)
response = json.loads(response.data)
self.assertIsInstance(response, dict)
self.assertTrue("total" in response.keys())
self.assertTrue("weekly" in response.keys())
# Check amounts
attendance = Attendance.query.all()
total = 0
weekly = {}
for att in attendance:
total += att.total
for week in att.weekly.keys():
if week in weekly.keys():
weekly[week] += att.weekly[week]
else:
weekly[week] = att.weekly[week]
self.assertEqual(response["total"], total)
self.assertEqual(response["weekly"], weekly)
def test_orgs_attendance(self):
OrganizationFactory(name=u"Code for San Francisco")
url = u"https://www.codeforamerica.org/api/organizations/Code-for-San-Francisco"
AttendanceFactory(organization_name=u"Code for San Francisco", organization_url=url)
OrganizationFactory(name=u"Open Oakland")
url = u"https://www.codeforamerica.org/api/organizations/Open-Oakland"
AttendanceFactory(organization_name=u"Open Oakland", organization_url=url)
db.session.commit()
response = self.app.get('/api/organizations/attendance')
self.assertEquals(response.status_code, 200)
response = json.loads(response.data)
self.assertIsInstance(response, dict)
self.assertTrue("organization_name" in response['organizations'][0].keys())
self.assertTrue("cfapi_url" in response['organizations'][0].keys())
self.assertTrue("total" in response['organizations'][0].keys())
self.assertTrue("weekly" in response['organizations'][0].keys())
def test_org_attendance(self):
OrganizationFactory(name=u"Code for San Francisco")
url = u"https://www.codeforamerica.org/api/organizations/Code-for-San-Francisco"
AttendanceFactory(organization_name=u"Code for San Francisco", organization_url=url)
db.session.commit()
response = self.app.get('/api/organizations/Code-for-San-Francisco/attendance')
self.assertEquals(response.status_code, 200)
response = json.loads(response.data)
self.assertIsInstance(response, dict)
self.assertTrue("organization_name" in response.keys())
self.assertTrue("cfapi_url" in response.keys())
self.assertTrue("total" in response.keys())
self.assertTrue("weekly" in response.keys())
|
406960
|
from django.contrib import admin
from . import models
class VenueAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("name",)}
class VenueAPIConfigurationAdmin(admin.ModelAdmin):
list_display = ("id", "venue")
list_select_related = ("venue",)
class VenueTapManagerAdmin(admin.ModelAdmin):
list_display = ("id", "venue", "user")
list_select_related = ("venue", "user")
admin.site.register(models.Venue, VenueAdmin)
admin.site.register(models.VenueAPIConfiguration, VenueAPIConfigurationAdmin)
admin.site.register(models.VenueTapManager, VenueTapManagerAdmin)
|
406966
|
import binaryninja
from .XTENSAArch import XTENSA
XTENSA.register()
# built-in view
EM_XTENSA = 94
binaryninja.BinaryViewType['ELF'].register_arch(EM_XTENSA, binaryninja.enums.Endianness.LittleEndian, binaryninja.Architecture['XTENSA'])
|
407004
|
import math
import operator
import random
from queue import PriorityQueue
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from log import timeit
SOS_token = 2
EOS_token = 3
MAX_LENGTH = 50
class Encoder(nn.Module):
def __init__(self, input_size, embed_size, hidden_size,
n_layers=1, dropout=0.5):
super(Encoder, self).__init__()
self.input_size = input_size # 8014
self.hidden_size = hidden_size # 512
self.embed_size = embed_size # 256
self.embed = nn.Embedding(input_size, embed_size)
self.gru = nn.GRU(embed_size, hidden_size, n_layers,
dropout=dropout, bidirectional=True)
def forward(self, src, hidden=None):
embedded = self.embed(src) # [max_len, batch_size]
outputs, hidden = self.gru(embedded, hidden) # ([27, 32, 256],None)=>([27, 32, 1024],[4, 32, 512])
# sum bidirectional outputs
outputs = (outputs[:, :, :self.hidden_size] +
outputs[:, :, self.hidden_size:]) # =>[27, 32, 512] + [27, 32, 512]
return outputs, hidden
class Attention(nn.Module):
def __init__(self, hidden_size):
super(Attention, self).__init__()
self.hidden_size = hidden_size
self.attn = nn.Linear(self.hidden_size * 2, hidden_size)
self.v = nn.Parameter(torch.rand(hidden_size))
stdv = 1. / math.sqrt(self.v.size(0))
self.v.data.uniform_(-stdv, stdv)
def forward(self, hidden, encoder_outputs):
timestep = encoder_outputs.size(0)
h = hidden.repeat(timestep, 1, 1).transpose(0, 1) # [32, 512]=>[32, 27, 512]
encoder_outputs = encoder_outputs.transpose(0, 1) # [B*T*H] # [27, 32, 512]=>[32,27,512]
attn_energies = self.score(h, encoder_outputs) # =>[B*T]
return F.softmax(attn_energies, dim=1).unsqueeze(1) # [B*T]=>[B*1*T]
def score(self, hidden, encoder_outputs):
# [B*T*2H]->[B*T*H]
energy = F.relu(self.attn(torch.cat([hidden, encoder_outputs], 2)))
energy = energy.transpose(1, 2) # [B*H*T]
v = self.v.repeat(encoder_outputs.size(0), 1).unsqueeze(1) # [B*1*H]
energy = torch.bmm(v, energy) # [B*1*H] bmm [B*H*T]=>[B*1*T]
return energy.squeeze(1) # [B*T]
class Decoder(nn.Module):
def __init__(self, embed_size, hidden_size, output_size,
n_layers=1, dropout=0.2):
super(Decoder, self).__init__()
self.embed_size = embed_size # 256
self.hidden_size = hidden_size # 512
self.output_size = output_size # 10004
self.n_layers = n_layers # 1
self.embed = nn.Embedding(output_size, embed_size)
self.dropout = nn.Dropout(dropout, inplace=True)
self.attention = Attention(hidden_size)
self.gru = nn.GRU(hidden_size + embed_size, hidden_size,
n_layers, dropout=dropout)
self.out = nn.Linear(hidden_size * 2, output_size)
def forward(self, input, last_hidden, encoder_outputs): # 上一步的 output,上一步的 hidden_state
# Get the embedding of the current input word (last output word)
embedded = self.embed(input).unsqueeze(0) # (1,B,N) # [32]=>[32, 256]=>[1, 32, 256]
embedded = self.dropout(embedded)
# Calculate attention weights and apply to encoder outputs
attn_weights = self.attention(last_hidden[-1], encoder_outputs) # [32, 512][27, 32, 512]=>[32, 1, 27]
context = attn_weights.bmm(encoder_outputs.transpose(0, 1)) # (B,1,N) # [32, 1, 27]bmm[32, 27, 512]=>[32,1,512]
context = context.transpose(0, 1) # (1,B,N) # [32, 1, 512]=>[1, 32, 512]
# Combine embedded input word and attended context, run through RNN
rnn_input = torch.cat([embedded, context], 2) # [1, 32, 256] cat [1, 32, 512]=> [1, 32, 768]
output, hidden = self.gru(rnn_input, last_hidden) # in:[1, 32, 768],[1, 32, 512]=>[1, 32, 512],[1, 32, 512]
output = output.squeeze(0) # (1,B,N) -> (B,N)
context = context.squeeze(0)
output = self.out(torch.cat([output, context], 1)) # [32, 512] cat [32, 512] => [32, 512*2]
output = F.log_softmax(output, dim=1)
return output, hidden, attn_weights # [32, 10004] [1, 32, 512] [32, 1, 27]
class Seq2Seq(nn.Module):
def __init__(self, encoder, decoder):
super(Seq2Seq, self).__init__()
self.encoder = encoder
self.decoder = decoder
def forward(self, src, trg, teacher_forcing_ratio=0.5):
batch_size = src.size(1)
max_len = trg.size(0)
vocab_size = self.decoder.output_size
outputs = Variable(torch.zeros(max_len, batch_size, vocab_size)).cuda()
encoder_output, hidden = self.encoder(src) # [27, 32]=> =>[27, 32, 512],[4, 32, 512]
hidden = hidden[:self.decoder.n_layers] # [4, 32, 512][1, 32, 512]
output = Variable(trg.data[0, :]) # sos
for t in range(1, max_len):
output, hidden, attn_weights = self.decoder(
output, hidden, encoder_output) # output:[32, 10004] [1, 32, 512] [32, 1, 27]
outputs[t] = output
is_teacher = random.random() < teacher_forcing_ratio
top1 = output.data.max(1)[1] # 按照 dim=1 求解最大值和最大值索引,x[1] 得到的是最大值的索引=>top1.shape=32
output = Variable(trg.data[t] if is_teacher else top1).cuda()
return outputs
def decode(self, src, trg, method='beam-search'):
encoder_output, hidden = self.encoder(src) # [27, 32]=> =>[27, 32, 512],[4, 32, 512]
hidden = hidden[:self.decoder.n_layers] # [4, 32, 512][1, 32, 512]
if method == 'beam-search':
return self.beam_decode(trg, hidden, encoder_output)
else:
return self.greedy_decode(trg, hidden, encoder_output)
def greedy_decode(self, trg, decoder_hidden, encoder_outputs, ):
'''
:param target_tensor: target indexes tensor of shape [B, T] where B is the batch size and T is the maximum length of the output sentence
:param decoder_hidden: input tensor of shape [1, B, H] for start of the decoding
:param encoder_outputs: if you are using attention mechanism you can pass encoder outputs, [T, B, H] where T is the maximum length of input sentence
:return: decoded_batch
'''
seq_len, batch_size = trg.size()
decoded_batch = torch.zeros((batch_size, seq_len))
# decoder_input = torch.LongTensor([[EN.vocab.stoi['<sos>']] for _ in range(batch_size)]).cuda()
decoder_input = Variable(trg.data[0, :]).cuda() # sos
print(decoder_input.shape)
for t in range(seq_len):
decoder_output, decoder_hidden, _ = self.decoder(decoder_input, decoder_hidden, encoder_outputs)
topv, topi = decoder_output.data.topk(1) # [32, 10004] get candidates
topi = topi.view(-1)
decoded_batch[:, t] = topi
decoder_input = topi.detach().view(-1)
return decoded_batch
@timeit
def beam_decode(self, target_tensor, decoder_hiddens, encoder_outputs=None):
'''
:param target_tensor: target indexes tensor of shape [B, T] where B is the batch size and T is the maximum length of the output sentence
:param decoder_hiddens: input tensor of shape [1, B, H] for start of the decoding
:param encoder_outputs: if you are using attention mechanism you can pass encoder outputs, [T, B, H] where T is the maximum length of input sentence
:return: decoded_batch
'''
target_tensor = target_tensor.permute(1, 0)
beam_width = 10
topk = 1 # how many sentence do you want to generate
decoded_batch = []
# decoding goes sentence by sentence
for idx in range(target_tensor.size(0)): # batch_size
if isinstance(decoder_hiddens, tuple): # LSTM case
decoder_hidden = (
decoder_hiddens[0][:, idx, :].unsqueeze(0), decoder_hiddens[1][:, idx, :].unsqueeze(0))
else:
decoder_hidden = decoder_hiddens[:, idx, :].unsqueeze(0) # [1, B, H]=>[1,H]=>[1,1,H]
encoder_output = encoder_outputs[:, idx, :].unsqueeze(1) # [T,B,H]=>[T,H]=>[T,1,H]
# Start with the start of the sentence token
decoder_input = torch.LongTensor([SOS_token]).cuda()
# Number of sentence to generate
endnodes = []
number_required = min((topk + 1), topk - len(endnodes))
# starting node - hidden vector, previous node, word id, logp, length
node = BeamSearchNode(decoder_hidden, None, decoder_input, 0, 1)
nodes = PriorityQueue()
# start the queue
nodes.put((-node.eval(), node))
qsize = 1
# start beam search
while True:
# give up when decoding takes too long
if qsize > 2000: break
# fetch the best node
score, n = nodes.get()
# print('--best node seqs len {} '.format(n.leng))
decoder_input = n.wordid
decoder_hidden = n.h
if n.wordid.item() == EOS_token and n.prevNode != None:
endnodes.append((score, n))
# if we reached maximum # of sentences required
if len(endnodes) >= number_required:
break
else:
continue
# decode for one step using decoder
decoder_output, decoder_hidden, _ = self.decoder(decoder_input, decoder_hidden, encoder_output)
# PUT HERE REAL BEAM SEARCH OF TOP
log_prob, indexes = torch.topk(decoder_output, beam_width)
nextnodes = []
for new_k in range(beam_width):
decoded_t = indexes[0][new_k].view(-1)
log_p = log_prob[0][new_k].item()
node = BeamSearchNode(decoder_hidden, n, decoded_t, n.logp + log_p, n.leng + 1)
score = -node.eval()
nextnodes.append((score, node))
# put them into queue
for i in range(len(nextnodes)):
score, nn = nextnodes[i]
nodes.put((score, nn))
# increase qsize
qsize += len(nextnodes) - 1
# choose nbest paths, back trace them
if len(endnodes) == 0:
endnodes = [nodes.get() for _ in range(topk)]
utterances = []
for score, n in sorted(endnodes, key=operator.itemgetter(0)):
utterance = []
utterance.append(n.wordid)
# back trace
while n.prevNode != None:
n = n.prevNode
utterance.append(n.wordid)
utterance = utterance[::-1]
utterances.append(utterance)
decoded_batch.append(utterances)
return decoded_batch
class BeamSearchNode(object):
def __init__(self, hiddenstate, previousNode, wordId, logProb, length):
'''
:param hiddenstate:
:param previousNode:
:param wordId:
:param logProb:
:param length:
'''
self.h = hiddenstate
self.prevNode = previousNode
self.wordid = wordId
self.logp = logProb
self.leng = length
def eval(self, alpha=1.0):
reward = 0
# Add here a function for shaping a reward
return self.logp / float(self.leng - 1 + 1e-6) + alpha * reward # 注意这里是有惩罚参数的,参考恩达的 beam-search
def __lt__(self, other):
return self.leng < other.leng # 这里展示分数相同的时候怎么处理冲突,具体使用什么指标,根据具体情况讨论
def __gt__(self, other):
return self.leng > other.leng
|
407025
|
from setuptools import setup, find_packages
from torch.utils import cpp_extension
with open("README.md") as f:
long_description = f.read()
setup(
name="dconv_native",
version="0.1.10",
author="<NAME>",
author_email="<EMAIL>",
description="Cuda implementation of (modulated) deformable convolutions",
long_description=long_description,
long_description_content_type="text/markdown",
install_requires=["torch"],
package_dir={"": "src"},
packages=["dconv_native"],
ext_modules=[
cpp_extension.CppExtension(
"dconv_native._ops",
[
"csrc/ops/dconv.cpp",
"csrc/ops/dconv3d_gpu.cu",
"csrc/ops/dconv1d_gpu.cu"
]
),
],
cmdclass={"build_ext": cpp_extension.BuildExtension}
)
|
407033
|
import paddlex as pdx
import time
import glob
image_names = glob.glob('steel/JPEGImages/*.jpg')
model = pdx.load_model('output/hrnet/best_model')
start_time = 0
for i, image_name in enumerate(image_names):
if i == 100: # 前面100张不计时,用于warm up
start_time = time.time()
if i > 299:
break
result = model.predict(image_name)
print(i)
fps = (time.time() - start_time) / 200
fps = 1 / fps
print(f"fps:{fps}")
|
407079
|
import torch
import torch.nn as nn
class Flatten(nn.Module):
"""Flattens input by reshaping it into a one-dimensional tensor."""
def forward(self, input):
return input.view(input.size(0), -1)
class UnFlatten(nn.Module):
"""Unflattens a tensor converting it to a desired shape."""
def forward(self, input):
return input.view(-1, 16, 6, 6)
class Net(nn.Module):
def __init__(self, h_dim=576, z_dim=10) -> None:
super(Net, self).__init__()
self.encoder = nn.Sequential(
nn.Conv2d(
in_channels=3, out_channels=6, kernel_size=4, stride=2
), # [batch, 6, 15, 15]
nn.ReLU(),
nn.Conv2d(
in_channels=6, out_channels=16, kernel_size=5, stride=2
), # [batch, 16, 6, 6]
nn.ReLU(),
Flatten(),
)
self.fc1 = nn.Linear(h_dim, z_dim)
self.fc2 = nn.Linear(h_dim, z_dim)
self.fc3 = nn.Linear(z_dim, h_dim)
self.decoder = nn.Sequential(
UnFlatten(),
nn.ConvTranspose2d(in_channels=16, out_channels=6, kernel_size=5, stride=2),
nn.ReLU(),
nn.ConvTranspose2d(in_channels=6, out_channels=3, kernel_size=4, stride=2),
nn.Tanh(),
)
def reparametrize(self, h):
"""Reparametrization layer of VAE."""
mu, logvar = self.fc1(h), self.fc2(h)
std = torch.exp(logvar / 2)
eps = torch.randn_like(std)
z = mu + std * eps
return z, mu, logvar
def encode(self, x):
"""Encoder of the VAE."""
h = self.encoder(x)
z, mu, logvar = self.reparametrize(h)
return z, mu, logvar
def decode(self, z):
"""Decoder of the VAE."""
z = self.fc3(z)
z = self.decoder(z)
return z
def forward(self, x):
z, mu, logvar = self.encode(x)
z_decode = self.decode(z)
return z_decode, mu, logvar
|
407089
|
from __future__ import absolute_import
from app.logic import resultsets
from sympy import sympify, I, sqrt
def test_predicates():
assert not resultsets.is_approximatable_constant(sqrt(2))
assert not resultsets.is_approximatable_constant(sympify('2'))
assert resultsets.is_complex(2 * I + 3)
assert not resultsets.is_complex(3)
|
407103
|
import numpy as np
def vdiff(x,dim):
"""VDIFF Length-preserving first central difference.
DX=VDIFF(X,DIM) differentiates X along dimension DIM using the first
central difference; DX is the same size as X.
!!! Only works for 1-D and 2-D arrays
_____________________________________________________________________
It uses the first forwards / first backwards difference at the first and last point, respectively.
_____________________________________________________________________
Usage: x=vdiff(x,dim)
__________________________________________________________________
This is part of JLAB
(C) 2000--2011 <NAME>
Rewritten in python 2.X by <NAME>, October 2016"""
y=np.zeros(x.shape)
if x.ndim==1:
if dim==1:
if x.size<=1:
print "Error in vdiff.py: length too small to perform numerical derivatives"
return
else:
y[0]=x[1]-x[0]
for k in range(1,x.size-1):
y[k]=(x[k+1]-x[k-1])/2.
y[-1]=x[-1]-x[-2]
else:
print "Error in vdiff.py"
return
elif x.ndim==2:
if dim==1:
if x.shape[0]<=1:
print "Error in vdiff.py: length too small to perform numerical derivatives"
return
else:
y[0,:]=x[1,:]-x[0,:]
for k in range(1,x.shape[0]-1):
y[k,:]=(x[k+1,:]-x[k-1,:])/2.
y[-1,:]=x[-1,:]-x[-2,:]
elif dim==2:
if x.shape[1]<=1:
print "Error in vdiff.py: length too small to perform numerical derivatives"
return
else:
y[:,0]=x[:,1]-x[:,0]
for k in range(1,x.shape[1]-1):
y[:,k]=(x[:,k+1]-x[:,k-1])/2.
y[:,-1]=x[:,-1]-x[:,-2]
else:
print "Error in vdiff.py"
return
else:
print "Error in vdiff.py"
return
return y
|
407107
|
from django.urls import path
from . import views
app_name = 'app_author'
urlpatterns = [
path('<slug:slug>/edit', views.author_edit_view, name='author_edit'),
path('<slug:slug>', views.author_single_view, name='author_single'),
]
|
407123
|
import typing as t
from marshmallow import Schema as Schema
from marshmallow.fields import Integer
from marshmallow.fields import URL
# schema for the detail object of validation error response
validation_error_detail_schema: t.Dict[str, t.Any] = {
"type": "object",
"properties": {
"<location>": {
"type": "object",
"properties": {
"<field_name>": {
"type": "array",
"items": {
"type": "string"
}
}
}
}
}
}
# schema for validation error response
validation_error_schema: t.Dict[str, t.Any] = {
"properties": {
"detail": validation_error_detail_schema,
"message": {
"type": "string"
},
},
"type": "object"
}
# schema for generic error response
http_error_schema: t.Dict[str, t.Any] = {
"properties": {
"detail": {
"type": "object"
},
"message": {
"type": "string"
},
},
"type": "object"
}
class EmptySchema(Schema):
"""An empty schema used to generate a 204 response.
Example:
```python
@app.delete('/foo')
@output(EmptySchema)
def delete_foo():
return ''
```
It equals to:
```python
@app.delete('/foo')
@output({}, 204)
def delete_foo():
return ''
```
"""
pass
class PaginationSchema(Schema):
page = Integer()
per_page = Integer()
pages = Integer()
total = Integer()
current = URL()
next = URL()
prev = URL()
first = URL()
last = URL()
|
407138
|
from __future__ import unicode_literals
from django.apps import apps
from django.utils.translation import ugettext_lazy as _
from navigation import Link
from .permissions import permission_events_view
def get_kwargs_factory(variable_name):
def get_kwargs(context):
ContentType = apps.get_model(
app_label='contenttypes', model_name='ContentType'
)
content_type = ContentType.objects.get_for_model(
context[variable_name]
)
return {
'app_label': '"{}"'.format(content_type.app_label),
'model': '"{}"'.format(content_type.model),
'object_id': '{}.pk'.format(variable_name)
}
return get_kwargs
link_events_list = Link(
icon='fa fa-list-ol', permissions=(permission_events_view,),
text=_('Events'), view='events:events_list'
)
link_events_for_object = Link(
icon='fa fa-list-ol', permissions=(permission_events_view,),
text=_('Events'), view='events:events_for_object',
kwargs=get_kwargs_factory('resolved_object')
)
|
407142
|
import sys
# variables
username=sys.argv[1]
password=<PASSWORD>[2]
adminUrl=sys.argv[3]
applicationName=sys.argv[4]
serverName=sys.argv[5]
# connect to the server
connect(username, password, adminUrl)
# move to the managed server
serverConfig()
cd('Servers\server1')
ls()
# Deploy application
undeploy(applicationName, targets='server1', timeout=60000)
|
407202
|
from ase.io import read
from flosic_os import calculate_flosic, flosic,xyz_to_nuclei_fod,ase2pyscf,get_multiplicity
from pyscf import dft,gto
# This example shows how FLO-SIC calculations can be done in the one-shot mode.
# The necessary input is a .xyz file with the molecular geometry and the FOD positions.
# The easiest way to do a FLO-SIC one-shot calculation is to call calculate_flosic.
# This is FULL FLO-SIC.
# Let's define some parameters for that.
b = '6-311++Gss' # Basis set.
verbose = 4 # Amount of output. 4: full output.
max_cycle = 300 # Number of SCF iterations.
conv_tol = 1e-7 # Accuracy of the SCF cycle.
grids_level = 3 # Level of the numerical grid. 3 is the standard value.
xc = 'LDA,PW' # Exchange-correlation functional in the form: (exchange,correlation)
# NOTE: As there exists only one way to express the exchange for LDA, there is only one identifier.
# For LDA correlation there exist several.
# We need the systems name (= Filename) as input.
sysname = 'H2'
# Now we can call calculate_flosic.
# calculate_flosic operates fully automatic; it performs a DFT SCF cycle and then does FLO-SIC on top of that.
# The return value is a Python dictionary.
flosic_values_1 = calculate_flosic(spin=0,fname=sysname,basis=b,verbose=verbose,max_cycle=max_cycle,conv_tol=conv_tol,grid=grids_level,xc=xc)
# ALTERNATIVELY: ASE Atoms object as input.
# We need an ASE Atoms object as input.
# We also need to specify the spin.
#molecule = read('H2.xyz')
#spin = 0
#flosic_values_1 = calculate_flosic(spin=0,ase_atoms=molecule,basis=b,verbose=verbose,max_cycle=max_cycle,conv_tol=conv_tol,grid=grids_level,xc=xc)
# Another possibility to use FLO-SIC is as an post-processing step.
# This is POST-PROCESSING one-shot.
# Here we start a regular DFT calculation and then apply FLO-SIC.
# First, set up a DFT calculation (see example 01).
# The mole object can be generated by Pyflosic routines as well.
# This routine properly parses the .xyz file.
molecule = read(sysname+'.xyz')
geo,nuclei,fod1,fod2,included = xyz_to_nuclei_fod(molecule)
# Set spin and charge.
charge = 0
spin = get_multiplicity(sysname)
# Build the mole object.
mol = gto.M(atom=ase2pyscf(nuclei), basis={'default':b},spin=spin,charge=charge)
# Set up the DFT calculation.
dft_object = dft.UKS(mol)
dft_object.verbose = verbose
dft_object.max_cycle = max_cycle
dft_object.conv_tol = conv_tol
dft_object.grids.level = grids_level
dft_object.xc = xc
# Perform the DFT calculation.
dft_energy = dft_object.kernel()
# Apply FLO-SIC to the DFT calculation.
flosic_values_2 = flosic(mol,dft_object,fod1,fod2)
# Output the results. The output for FLO-SIC is given in the form of Python dictionaries.
print("ESIC: {}".format(flosic_values_1['etot_sic']-dft_energy))
print('Total energy of H2 (DFT): %0.5f (should be %0.5f)' % (dft_energy,-1.13634167738585))
print('Total energy of H2 (FLO-SIC FULL): %0.5f (should be %0.5f) ' % (flosic_values_1['etot_sic'],-1.18032726019))
print('Total energy of H2 (FLO-SIC POST-PROCESSING): % 0.5f (should be %0.5f) ' % (flosic_values_2['etot_sic'],-1.18032726019))
|
407298
|
from time import sleep
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
class FacebookLogin:
def __init__(self, driver):
self.driver = driver
self.login = "jpe230"
self.password = "<PASSWORD>"
self.__isLogged = False
def logIn(self):
print('Facebook Login')
driver = self.driver
driver.get('https://www.facebook.com/')
try:
element = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR, '[data-testid="cookie-policy-banner-accept"]')))
element.click()
except:
pass
driver.find_element_by_xpath('//*[@id="email"]').send_keys(self.login)
driver.find_element_by_xpath('//*[@id="pass"]').send_keys(self.password)
driver.find_element_by_css_selector('[data-testid="royal_login_button"]').submit()
sleep(6)
try:
element = driver.find_element_by_css_selector('input[type="search"]')
self.__isLogged = True
except NoSuchElementException:
self.__isLogged = False
def isLogged(self):
return self.__isLogged
|
407311
|
from mock import AsyncMock
import pytest
from webex_assistant_sdk.dialogue.manager import MissingHandler, SimpleDialogueManager
from webex_assistant_sdk.dialogue.rules import SimpleDialogueStateRule
from webex_assistant_sdk.models.mindmeld import DialogueState
pytestmark = pytest.mark.asyncio
async def test_rule_matching(dialogue_state, test_rule):
mock_handler = AsyncMock()
manager = SimpleDialogueManager(rules={test_rule: mock_handler})
state = DialogueState()
await manager.handle(state)
assert mock_handler.called
async def test_rule_ordering(dialogue_state: DialogueState, test_rule: SimpleDialogueStateRule):
mock_handler1 = AsyncMock()
mock_handler2 = AsyncMock()
test_rule2 = SimpleDialogueStateRule(test_rule.regex)
manager = SimpleDialogueManager(rules={test_rule: mock_handler1, test_rule2: mock_handler2})
await manager.handle(dialogue_state)
assert mock_handler1.called
assert not mock_handler2.called
# Reorder and make sure the first is still called
mock_handler1.reset_mock()
mock_handler2.reset_mock()
manager = SimpleDialogueManager(rules={test_rule: mock_handler2, test_rule2: mock_handler1})
await manager.handle(dialogue_state)
assert mock_handler2.called
assert not mock_handler1.called
def test_add_rule():
manager = SimpleDialogueManager()
@manager.add_rule(pattern=".*test.*")
def test_func(state):
pass
assert test_func in manager.rules.values()
def test_add_default():
manager = SimpleDialogueManager()
@manager.add_rule(default=True)
async def test_func(state):
pass
assert test_func not in manager.rules.values()
assert manager.default_handler == test_func
async def test_no_match_with_default(dialogue_state: DialogueState):
manager = SimpleDialogueManager()
default_mock = AsyncMock()
manager.add_rule(default=True)(default_mock)
@manager.add_rule(pattern=".*test.*")
async def pattern_test(state):
pass
dialogue_state.text = "something that won't match"
await manager.handle(dialogue_state)
assert default_mock.called
async def test_no_match_no_default(dialogue_state):
manager = SimpleDialogueManager()
with pytest.raises(MissingHandler):
await manager.handle(dialogue_state)
|
407331
|
import pytest
from django.db import transaction
from node.blockchain.models.account_state import AccountState
from node.core.database import ensure_in_transaction
from node.core.exceptions import DatabaseTransactionError
@pytest.mark.django_db
@pytest.mark.parametrize('iteration', map(str, range(3))) # we need multiple iteration for proper testing
def test_create_object_in_database(iteration):
assert not AccountState.objects.exists()
account_state = AccountState.objects.create(account_lock='0' * 64)
assert AccountState.objects.exists()
assert AccountState.objects.count() == 1
db_account_state = AccountState.objects.first()
assert db_account_state
assert db_account_state._id == account_state._id
@pytest.mark.django_db(transaction=True)
@pytest.mark.usefixtures('cleanup_for_rollback_and_commit_tests')
def test_rollback():
class TestError(Exception):
pass
try:
with transaction.atomic():
assert not AccountState.objects.exists()
account_state = AccountState.objects.create(account_lock='0' * 64)
assert AccountState.objects.exists()
assert AccountState.objects.count() == 1
db_account_state = AccountState.objects.first()
assert db_account_state
assert db_account_state._id == account_state._id
raise TestError
except TestError:
assert not AccountState.objects.exists()
@pytest.mark.django_db(transaction=True)
@pytest.mark.usefixtures('cleanup_for_rollback_and_commit_tests')
def test_commit():
with transaction.atomic():
assert not AccountState.objects.exists()
account_state = AccountState.objects.create(account_lock='0' * 64)
assert AccountState.objects.exists()
assert AccountState.objects.count() == 1
db_account_state = AccountState.objects.first()
assert db_account_state
assert db_account_state._id == account_state._id
def test_ensure_in_transaction():
@ensure_in_transaction
def test_me():
pass
with pytest.raises(DatabaseTransactionError, match='Expected to have an active transaction'):
test_me()
|
407344
|
import unittest
import munch
import basecrm
from basecrm.test.testutils import BaseTestCase
class TagsServiceTests(BaseTestCase):
def test_service_property_exists(self):
self.assertTrue(hasattr(self.client, 'tags'))
def test_method_list_exists(self):
self.assertTrue(hasattr(self.client.tags, 'list') and callable(getattr(self.client.tags, 'list')))
def test_method_create_exists(self):
self.assertTrue(hasattr(self.client.tags, 'create') and callable(getattr(self.client.tags, 'create')))
def test_method_retrieve_exists(self):
self.assertTrue(hasattr(self.client.tags, 'retrieve') and callable(getattr(self.client.tags, 'retrieve')))
def test_method_update_exists(self):
self.assertTrue(hasattr(self.client.tags, 'update') and callable(getattr(self.client.tags, 'update')))
def test_method_destroy_exists(self):
self.assertTrue(hasattr(self.client.tags, 'destroy') and callable(getattr(self.client.tags, 'destroy')))
def test_list(self):
tags = self.client.tags.list(page=1)
self.assertIsInstance(tags, list)
for tag in tags:
self.assertIsInstance(tag, munch.Munch)
def test_create(self):
self.assertIsInstance(self.tag, munch.Munch)
self.assertGreaterEqual(len(self.tag), 1)
def test_retrieve(self):
found_tag = self.client.tags.retrieve(self.tag.id);
self.assertIsInstance(found_tag, munch.Munch);
self.assertEqual(found_tag.id, self.tag.id);
def test_update(self):
updated_tag = self.client.tags.update(self.tag.id, self.tag)
self.assertIsInstance(updated_tag, munch.Munch)
self.assertGreaterEqual(len(updated_tag), 1)
def test_destroy(self):
new_tag = self.create_tag()
self.assertTrue(self.client.tags.destroy(new_tag.id))
|
407346
|
from typing import Any, Callable, List, Protocol, TypeVar, runtime_checkable
_F = TypeVar("_F", bound=Callable[..., Any])
def language_id(id: str) -> Callable[[_F], _F]:
def decorator(func: _F) -> _F:
setattr(func, "__language_id__", id)
return func
return decorator
@runtime_checkable
class HasLanguageId(Protocol):
__language_id__: str
def trigger_characters(characters: List[str]) -> Callable[[_F], _F]:
def decorator(func: _F) -> _F:
setattr(func, "__trigger_characters__", characters)
return func
return decorator
@runtime_checkable
class HasRetriggerCharacters(Protocol):
__retrigger_characters__: str
def retrigger_characters(characters: List[str]) -> Callable[[_F], _F]:
def decorator(func: _F) -> _F:
setattr(func, "__retrigger_characters__", characters)
return func
return decorator
@runtime_checkable
class HasTriggerCharacters(Protocol):
__trigger_characters__: List[str]
def all_commit_characters(characters: List[str]) -> Callable[[_F], _F]:
def decorator(func: _F) -> _F:
setattr(func, "__all_commit_characters__", characters)
return func
return decorator
@runtime_checkable
class HasAllCommitCharacters(Protocol):
__all_commit_characters__: List[str]
|
407349
|
import unittest
import asyncio
import aiohttp
import tech
import json
class TestTechMethods(unittest.TestCase):
def setUp(self):
self._loop = asyncio.get_event_loop()
self._session = aiohttp.ClientSession(loop = self._loop)
self._tech = tech.Tech(self._session)
self._loop.run_until_complete(self._tech.authenticate("email", "password"))
"""
def test_authenticate(self):
result = self._loop.run_until_complete(self._tech.authenticate("email", "password"))
#authentication = json.loads(json.dumps(result))
self.assertTrue(result)
"""
def test_list_modules(self):
result = self._loop.run_until_complete(self._tech.list_modules())
self.assertTrue(result[0])
def test_module_data(self):
result = self._loop.run_until_complete(self._tech.get_module_data("module_id"))
zones = json.loads(json.dumps(result))
self.assertTrue("zones" in zones)
def tearDown(self):
self._loop.run_until_complete(self._session.close())
if __name__ == '__main__':
unittest.main()
|
407412
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import difflib
import gc
import os
import re
import sys
import hashlib
import heapq
import lz4.frame
import math
import operator
import tempfile
import threading
import xxhash
import numpy as np
import uuid
from annoy import AnnoyIndex
from fasteners import InterProcessLock
from itertools import cycle, islice, chain, tee
from numbers import Number
from time import sleep
from pymagnitude.converter import convert as convert_vector_file
from pymagnitude.converter import DEFAULT_NGRAM_END
from pymagnitude.converter import BOW, EOW
from pymagnitude.converter import fast_md5_file
from pymagnitude.converter import char_ngrams
from pymagnitude.third_party.repoze.lru import lru_cache
try:
from itertools import imap
except ImportError:
imap = map
try:
from itertools import izip
except ImportError:
izip = zip
try:
unicode
except NameError:
unicode = str
try:
xrange
except NameError:
xrange = range
try:
sys.path.append(os.path.dirname(__file__) + '/third_party/')
from pymagnitude.third_party.internal.pysqlite2 import dbapi2 as sqlite3
db = sqlite3.connect(':memory:')
db.close()
_SQLITE_LIB = 'internal'
except Exception as e:
import sqlite3
_SQLITE_LIB = 'system'
DEFAULT_LRU_CACHE_SIZE = 1000
def _sqlite_try_max_variable_number(num):
""" Tests whether SQLite can handle num variables """
db = sqlite3.connect(':memory:')
try:
db.cursor().execute(
"SELECT 1 IN (" + ",".join(["?"] * num) + ")",
([0] * num)
).fetchall()
return num
except BaseException:
return -1
finally:
db.close()
class Magnitude(object):
SQLITE_LIB = _SQLITE_LIB
NGRAM_BEG = 1
NGRAM_END = DEFAULT_NGRAM_END
BOW = BOW
EOW = EOW
RARE_CHAR = u"\uF002".encode('utf-8')
FTS_SPECIAL = set('*^')
MMAP_THREAD_LOCK = {}
OOV_RNG_LOCK = threading.Lock()
SQLITE_MAX_VARIABLE_NUMBER = max(max((_sqlite_try_max_variable_number(n)
for n in [99, 999, 9999, 99999])), 1)
MAX_KEY_LENGTH_FOR_OOV_SIM = 1000
ENGLISH_PREFIXES = ['counter', 'electro', 'circum', 'contra', 'contro',
'crypto', 'deuter', 'franco', 'hetero', 'megalo',
'preter', 'pseudo', 'after', 'under', 'amphi',
'anglo', 'astro', 'extra', 'hydro', 'hyper', 'infra',
'inter', 'intra', 'micro', 'multi', 'ortho', 'paleo',
'photo', 'proto', 'quasi', 'retro', 'socio', 'super',
'supra', 'trans', 'ultra', 'anti', 'back', 'down',
'fore', 'hind', 'midi', 'mini', 'over', 'post',
'self', 'step', 'with', 'afro', 'ambi', 'ante',
'anti', 'arch', 'auto', 'cryo', 'demi', 'demo',
'euro', 'gyro', 'hemi', 'homo', 'hypo', 'ideo',
'idio', 'indo', 'macr', 'maxi', 'mega', 'meta',
'mono', 'mult', 'omni', 'para', 'peri', 'pleo',
'poly', 'post', 'pros', 'pyro', 'semi', 'tele',
'vice', 'dis', 'dis', 'mid', 'mis', 'off', 'out',
'pre', 'pro', 'twi', 'ana', 'apo', 'bio', 'cis',
'con', 'com', 'col', 'cor', 'dia', 'dis', 'dif',
'duo', 'eco', 'epi', 'geo', 'im ', 'iso', 'mal',
'mon', 'neo', 'non', 'pan', 'ped', 'per', 'pod',
'pre', 'pro', 'pro', 'sub', 'sup', 'sur', 'syn',
'syl', 'sym', 'tri', 'uni', 'be', 'by', 'co', 'de',
'en', 'em', 'ex', 'on', 're', 'un', 'un', 'up', 'an',
'an', 'ap', 'bi', 'co', 'de', 'di', 'di', 'du', 'en',
'el', 'em', 'ep', 'ex', 'in', 'in', 'il', 'ir', 'sy',
'a', 'a', 'a']
ENGLISH_PREFIXES = sorted(
chain.from_iterable([(p + '-', p) for p in ENGLISH_PREFIXES]),
key=lambda x: len(x), reverse=True)
ENGLISH_SUFFIXES = ['ification', 'ologist', 'ology', 'ology', 'able',
'ible', 'hood', 'ness', 'less', 'ment', 'tion',
'logy', 'like', 'ise', 'ize', 'ful', 'ess', 'ism',
'ist', 'ish', 'ity', 'ant', 'oid', 'ory', 'ing', 'fy',
'ly', 'al']
ENGLISH_SUFFIXES = sorted(
chain.from_iterable([('-' + s, s) for s in ENGLISH_SUFFIXES]),
key=lambda x: len(x), reverse=True)
def __new__(cls, *args, **kwargs):
""" Returns a concatenated magnitude object, if Magnitude parameters """
if len(args) > 0 and isinstance(args[0], Magnitude):
obj = object.__new__(ConcatenatedMagnitude, *args, **kwargs)
obj.__init__(*args, **kwargs)
else:
obj = object.__new__(cls)
return obj
"""A Magnitude class that interfaces with the underlying SQLite
data store to provide efficient access.
Attributes:
path: the file path to the magnitude file
lazy_loading: -1 = pre-load into memory, 0 = lazy loads with unbounded
in-memory cache, >0 lazy loads with an LRU cache of that
size
blocking: Even when lazy_loading is -1, the constructor will not block
it will instead pre-load into memory in a background thread,
if blocking is set to True, it will block until everything
is pre-loaded into memory
use_numpy: Returns a NumPy array if True or a list if False
case_insensitive: Searches for keys with case-insensitive search
pad_to_length: Pads to a certain length if examples are shorter than
that length or truncates if longer than that length.
truncate_left: if something needs to be truncated to the padding,
truncate off the left side
pad_left: Pads to the left.
placeholders: Extra empty dimensions to add to the vectors.
ngram_oov: Use character n-grams for generating out-of-vocabulary
vectors.
supress_warnings: Supress warnings generated
batch_size: Controls the maximum vector size used in memory directly
eager: Start loading non-critical resources in the background in
anticipation they will be used.
language: A ISO 639-1 Language Code (default: English 'en')
dtype: The dtype to use when use_numpy is True.
_number_of_values: When the path is set to None and Magnitude is being
used to solely featurize keys directly into vectors,
_number_of_values should be set to the
approximate upper-bound of the number of keys
that will be looked up with query(). If you don't know
the exact number, be conservative and pick a large
number, while keeping in mind the bigger
_number_of_values is, the more memory it will consume.
_namespace: an optional namespace that will be prepended to each query
if provided
"""
def __init__(self, path, lazy_loading=0, blocking=False,
use_numpy=True, case_insensitive=False,
pad_to_length=None, truncate_left=False,
pad_left=False, placeholders=0, ngram_oov=True,
supress_warnings=False, batch_size=3000000,
eager=True, language='en', dtype=np.float32,
_namespace=None, _number_of_values=1000000):
"""Initializes a new Magnitude object."""
self.sqlite_lib = Magnitude.SQLITE_LIB
self.closed = False
self.uid = str(uuid.uuid4()).replace("-", "")
self.fd = None
if path is None:
self.memory_db = True
self.path = ":memory:"
else:
self.memory_db = False
self.path = os.path.expanduser(path)
self._all_conns = []
self.lazy_loading = lazy_loading
self.use_numpy = use_numpy
self.case_insensitive = case_insensitive
self.pad_to_length = pad_to_length
self.truncate_left = truncate_left
self.pad_left = pad_left
self.placeholders = placeholders
self.ngram_oov = ngram_oov
self.supress_warnings = supress_warnings
self.batch_size = batch_size
self.eager = eager
self.language = language and language.lower()
self.dtype = dtype
self._namespace = _namespace
self._number_of_values = _number_of_values
# Define conns and cursors store
self._conns = {}
self._cursors = {}
self._threads = []
# Convert the input file if not .magnitude
if self.path.endswith('.bin') or \
self.path.endswith('.txt') or \
self.path.endswith('.vec'):
if not supress_warnings:
sys.stdout.write(
"""WARNING: You are attempting to directly use a `.bin`,
`.txt`, or `.vec` file with Magnitude. The file is being
converted to the `.magnitude` format (which is slow) so
that it can be used with this library. This will happen on
every run / re-boot of your computer. If you want to make
this faster pre-convert your vector model to the
`.magnitude` format with the built-in command utility:
`python -m pymagnitude.converter -i input_file -o output_file`
Refer to the README for more information.
You can pass `supress_warnings=True` to the constructor to
hide this message.""") # noqa
sys.stdout.flush()
self.path = convert_vector_file(self.path)
# Open a read-only file descriptor against the file
if not self.memory_db:
self.fd = os.open(self.path, os.O_RDONLY)
# Get metadata about the vectors
self.length = self._db().execute(
"SELECT COUNT(key) FROM magnitude") \
.fetchall()[0][0]
self.original_length = self._db().execute(
"SELECT value FROM magnitude_format WHERE key='size'") \
.fetchall()[0][0]
self.emb_dim = self._db().execute(
"SELECT value FROM magnitude_format WHERE key='dim'") \
.fetchall()[0][0]
self.precision = self._db().execute(
"SELECT value FROM magnitude_format WHERE key='precision'") \
.fetchall()[0][0]
subword_query = self._db().execute(
"SELECT value FROM magnitude_format WHERE key='subword'") \
.fetchall()
self.subword = len(subword_query) > 0 and subword_query[0][0]
if self.subword:
self.subword_start = self._db().execute(
"SELECT value FROM magnitude_format WHERE key='subword_start'")\
.fetchall()[0][0]
self.subword_end = self._db().execute(
"SELECT value FROM magnitude_format WHERE key='subword_end'") \
.fetchall()[0][0]
approx_query = self._db().execute(
"SELECT value FROM magnitude_format WHERE key='approx'") \
.fetchall()
self.approx = len(approx_query) > 0 and approx_query[0][0]
if self.approx:
self.approx_trees = self._db().execute(
"SELECT value FROM magnitude_format WHERE key='approx_trees'")\
.fetchall()[0][0]
self.dim = self.emb_dim + self.placeholders
self.highest_entropy_dimensions = [row[0] for row in self._db().execute(
"SELECT value FROM magnitude_format WHERE key='entropy'")
.fetchall()]
duplicate_keys_query = self._db().execute("""
SELECT MAX(key_count)
FROM (
SELECT COUNT(key)
AS key_count
FROM magnitude
GROUP BY key
);
""").fetchall()
self.max_duplicate_keys = (
duplicate_keys_query[0][0] if duplicate_keys_query[0][0] is not None else 1) # noqa
# Iterate to pre-load
def _preload_memory():
if not self.eager: # So that it doesn't loop over the vectors twice
for key, vector in self._iter(put_cache=True):
pass
# Start creating mmap in background
self.setup_for_mmap = False
self._all_vectors = None
self._approx_index = None
if self.eager:
mmap_thread = threading.Thread(target=self.get_vectors_mmap)
self._threads.append(mmap_thread)
mmap_thread.daemon = True
mmap_thread.start()
if self.approx:
approx_mmap_thread = threading.Thread(
target=self.get_approx_index)
self._threads.append(approx_mmap_thread)
approx_mmap_thread.daemon = True
approx_mmap_thread.start()
# Create cached methods
if self.lazy_loading <= 0:
@lru_cache(None)
def _vector_for_key_cached(*args, **kwargs):
return self._vector_for_key(*args, **kwargs)
@lru_cache(None)
def _out_of_vocab_vector_cached(*args, **kwargs):
return self._out_of_vocab_vector(*args, **kwargs)
@lru_cache(None)
def _key_for_index_cached(*args, **kwargs):
return self._key_for_index(*args, **kwargs)
self._vector_for_key_cached = _vector_for_key_cached
self._out_of_vocab_vector_cached = _out_of_vocab_vector_cached
self._key_for_index_cached = _key_for_index_cached
if self.lazy_loading == -1:
if blocking:
_preload_memory()
else:
preload_thread = threading.Thread(target=_preload_memory)
self._threads.append(preload_thread)
preload_thread.daemon = True
preload_thread.start()
elif self.lazy_loading > 0:
@lru_cache(self.lazy_loading)
def _vector_for_key_cached(*args, **kwargs):
return self._vector_for_key(*args, **kwargs)
@lru_cache(self.lazy_loading)
def _out_of_vocab_vector_cached(*args, **kwargs):
return self._out_of_vocab_vector(*args, **kwargs)
@lru_cache(self.lazy_loading)
def _key_for_index_cached(*args, **kwargs):
return self._key_for_index(*args, **kwargs)
self._vector_for_key_cached = _vector_for_key_cached
self._out_of_vocab_vector_cached = _out_of_vocab_vector_cached
self._key_for_index_cached = _key_for_index_cached
if self.eager and blocking:
self.get_vectors_mmap() # Wait for mmap to be available
if self.approx:
self.get_approx_index() # Wait for approx mmap to be available
def _setup_for_mmap(self):
# Setup variables for get_vectors_mmap()
self._all_vectors = None
self._approx_index = None
if not self.memory_db:
self.db_hash = fast_md5_file(self.path)
else:
self.db_hash = self.uid
self.md5 = hashlib.md5(",".join(
[self.path, self.db_hash, str(self.length),
str(self.dim), str(self.precision), str(self.case_insensitive)
]).encode('utf-8')).hexdigest()
self.path_to_mmap = os.path.join(tempfile.gettempdir(),
self.md5 + '.magmmap')
self.path_to_approx_mmap = os.path.join(tempfile.gettempdir(),
self.md5 + '.approx.magmmap')
if self.path_to_mmap not in Magnitude.MMAP_THREAD_LOCK:
Magnitude.MMAP_THREAD_LOCK[self.path_to_mmap] = threading.Lock()
if self.path_to_approx_mmap not in Magnitude.MMAP_THREAD_LOCK:
Magnitude.MMAP_THREAD_LOCK[self.path_to_approx_mmap] = \
threading.Lock()
self.MMAP_THREAD_LOCK = Magnitude.MMAP_THREAD_LOCK[self.path_to_mmap]
self.MMAP_PROCESS_LOCK = InterProcessLock(self.path_to_mmap + '.lock')
self.APPROX_MMAP_THREAD_LOCK = \
Magnitude.MMAP_THREAD_LOCK[self.path_to_approx_mmap]
self.APPROX_MMAP_PROCESS_LOCK = \
InterProcessLock(self.path_to_approx_mmap + '.lock')
self.setup_for_mmap = True
def _db(self, force_new=False):
"""Returns a cursor to the database. Each thread gets its
own cursor.
"""
identifier = threading.current_thread().ident
conn_exists = identifier in self._cursors
if not conn_exists or force_new:
if self.fd:
if os.name == 'nt':
conn = sqlite3.connect(self.path,
check_same_thread=False)
else:
conn = sqlite3.connect('/dev/fd/%d' % self.fd,
check_same_thread=False)
else:
conn = sqlite3.connect(self.path, check_same_thread=False)
self._create_empty_db(conn.cursor())
self._all_conns.append(conn)
if not conn_exists:
self._conns[identifier] = conn
self._cursors[identifier] = conn.cursor()
elif force_new:
return conn.cursor()
return self._cursors[identifier]
def _create_empty_db(self, db):
# Calculates the number of dimensions needed to prevent hashing from
# creating a collision error of a certain value for the number of
# expected feature values being hashed
collision_error_allowed = .001
number_of_dims = max(math.ceil(math.log(
((self._number_of_values ** 2) / (-2 * math.log(-collision_error_allowed + 1))), 100)), 2) # noqa
db.execute("DROP TABLE IF EXISTS `magnitude`;")
db.execute("""
CREATE TABLE `magnitude` (
key TEXT COLLATE NOCASE
);
""")
db.execute("""
CREATE TABLE `magnitude_format` (
key TEXT COLLATE NOCASE,
value INTEGER
);
""")
insert_format_query = """
INSERT INTO `magnitude_format`(
key,
value
)
VALUES (
?, ?
);
"""
db.execute(insert_format_query, ('size', 0))
db.execute(insert_format_query, ('dim', number_of_dims))
db.execute(insert_format_query, ('precision', 0))
def _padding_vector(self):
"""Generates a padding vector."""
if self.use_numpy:
return np.zeros((self.dim,), dtype=self.dtype)
else:
return [0.0] * self.dim
def _key_t(self, key):
"""Transforms a key to lower case depending on case
sensitivity.
"""
if self.case_insensitive and (isinstance(key, str) or
isinstance(key, unicode)):
return key.lower()
return key
def _string_dist(self, a, b):
length = max(len(a), len(b))
return length - difflib.SequenceMatcher(None, a, b).ratio() * length
def _key_shrunk_2(self, key):
"""Shrinks more than two characters to two characters
"""
return re.sub(r"([^<])\1{2,}", r"\1\1", key)
def _key_shrunk_1(self, key):
"""Shrinks more than one character to a single character
"""
return re.sub(r"([^<])\1+", r"\1", key)
def _oov_key_t(self, key):
"""Transforms a key for out-of-vocabulary lookup.
"""
is_str = isinstance(key, str) or isinstance(key, unicode)
if is_str:
key = Magnitude.BOW + self._key_t(key) + Magnitude.EOW
return is_str, self._key_shrunk_2(key)
return is_str, key
def _oov_english_stem_english_ixes(self, key):
"""Strips away common English prefixes and suffixes."""
key_lower = key.lower()
start_idx = 0
end_idx = 0
for p in Magnitude.ENGLISH_PREFIXES:
if key_lower[:len(p)] == p:
start_idx = len(p)
break
for s in Magnitude.ENGLISH_SUFFIXES:
if key_lower[-len(s):] == s:
end_idx = len(s)
break
start_idx = start_idx if max(start_idx, end_idx) == start_idx else 0
end_idx = end_idx if max(start_idx, end_idx) == end_idx else 0
stripped_key = key[start_idx:len(key) - end_idx]
if len(stripped_key) < 4:
return key
elif stripped_key != key:
return self._oov_english_stem_english_ixes(stripped_key)
else:
return stripped_key
def _oov_stem(self, key):
"""Strips away common prefixes and suffixes."""
if self.language == 'en':
return self._oov_english_stem_english_ixes(key)
return key
def _db_query_similar_keys_vector(self, key, orig_key, topn=3):
"""Finds similar keys in the database and gets the mean vector."""
def _sql_escape_single(s):
return s.replace("'", "''")
def _sql_escape_fts(s):
return ''.join("\\" + c if c in Magnitude.FTS_SPECIAL
else c for c in s).replace('"', '""')
exact_search_query = """
SELECT *
FROM `magnitude`
WHERE key = ?
ORDER BY key = ? COLLATE NOCASE DESC
LIMIT ?;
"""
if self.subword and len(key) < Magnitude.MAX_KEY_LENGTH_FOR_OOV_SIM:
current_subword_start = self.subword_end
BOW_length = len(Magnitude.BOW) # noqa: N806
EOW_length = len(Magnitude.EOW) # noqa: N806
BOWEOW_length = BOW_length + EOW_length # noqa: N806
true_key_len = len(key) - BOWEOW_length
key_shrunk_stemmed = self._oov_stem(self._key_shrunk_1(orig_key))
key_shrunk = self._key_shrunk_1(orig_key)
key_stemmed = self._oov_stem(orig_key)
beginning_and_end_clause = ""
exact_matches = []
if true_key_len <= 6:
beginning_and_end_clause = """
magnitude.key LIKE '{0}%'
AND LENGTH(magnitude.key) <= {2} DESC,
magnitude.key LIKE '%{1}'
AND LENGTH(magnitude.key) <= {2} DESC,"""
beginning_and_end_clause = beginning_and_end_clause.format(
_sql_escape_single(key[BOW_length:BOW_length + 1]),
_sql_escape_single(key[-EOW_length - 1:-EOW_length]),
str(true_key_len))
if key != orig_key:
exact_matches.append((key_shrunk, self._key_shrunk_2(orig_key)))
if key_stemmed != orig_key:
exact_matches.append((key_stemmed,))
if key_shrunk_stemmed != orig_key:
exact_matches.append((key_shrunk_stemmed,))
if len(exact_matches) > 0:
for exact_match in exact_matches:
results = []
split_results = []
limits = np.array_split(list(range(topn)), len(exact_match))
for i, e in enumerate(exact_match):
limit = len(limits[i])
split_results.extend(self._db().execute(
exact_search_query, (e, e, limit)).fetchall())
results.extend(self._db().execute(
exact_search_query, (e, e, topn)).fetchall())
if len(split_results) >= topn:
results = split_results
if len(results) > 0:
break
else:
results = []
if len(results) == 0:
search_query = """
SELECT magnitude.*
FROM magnitude_subword, magnitude
WHERE char_ngrams MATCH ?
AND magnitude.rowid = magnitude_subword.rowid
ORDER BY
(
(
LENGTH(offsets(magnitude_subword)) -
LENGTH(
REPLACE(offsets(magnitude_subword), ' ', '')
)
)
+
1
) DESC,
""" + beginning_and_end_clause + """
LENGTH(magnitude.key) ASC
LIMIT ?;
""" # noqa
while (len(results) < topn and
current_subword_start >= self.subword_start):
ngrams = list(char_ngrams(
key, current_subword_start, self.subword_end))
params = (' OR '.join('"{0}"'.format(_sql_escape_fts(n))
for n in ngrams), topn)
results = self._db().execute(search_query,
params).fetchall()
small_typo = len(results) > 0 and self._string_dist(
results[0][0].lower(), orig_key.lower()) <= 4
if key_shrunk_stemmed != orig_key and key_shrunk_stemmed != key_shrunk and not small_typo: # noqa
ngrams = list(
char_ngrams(
self._oov_key_t(key_shrunk_stemmed)[1],
current_subword_start,
self.subword_end))
params = (' OR '.join('"{0}"'.format(_sql_escape_fts(n))
for n in ngrams), topn)
results = self._db().execute(search_query,
params).fetchall()
current_subword_start -= 1
else:
# As a backup do a search with 'NOCASE'
results = self._db().execute(exact_search_query,
(orig_key, orig_key, topn)).fetchall()
final_results = []
for result in results:
result_key, vec = self._db_full_result_to_vec(result)
final_results.append(vec)
if len(final_results) > 0:
mean_vector = np.mean(final_results, axis=0)
return mean_vector / np.linalg.norm(mean_vector)
else:
return self._padding_vector()
def _seed(self, val):
"""Returns a unique seed for val and the (optional) namespace."""
if self._namespace:
return xxhash.xxh32(self._namespace + Magnitude.RARE_CHAR +
val.encode('utf-8')).intdigest()
else:
return xxhash.xxh32(val.encode('utf-8')).intdigest()
def _out_of_vocab_vector(self, key):
"""Generates a random vector based on the hash of the key."""
orig_key = key
is_str, key = self._oov_key_t(key)
if not is_str:
seed = self._seed(type(key).__name__)
Magnitude.OOV_RNG_LOCK.acquire()
np.random.seed(seed=seed)
random_vector = np.random.uniform(-1, 1, (self.emb_dim,))
Magnitude.OOV_RNG_LOCK.release()
random_vector[-1] = self.dtype(key) / np.finfo(self.dtype).max
elif not self.ngram_oov or len(key) < Magnitude.NGRAM_BEG:
seed = self._seed(key)
Magnitude.OOV_RNG_LOCK.acquire()
np.random.seed(seed=seed)
random_vector = np.random.uniform(-1, 1, (self.emb_dim,))
Magnitude.OOV_RNG_LOCK.release()
else:
ngrams = char_ngrams(key, Magnitude.NGRAM_BEG,
Magnitude.NGRAM_END)
random_vectors = []
for i, ngram in enumerate(ngrams):
seed = self._seed(ngram)
Magnitude.OOV_RNG_LOCK.acquire()
np.random.seed(seed=seed)
random_vectors.append(
np.random.uniform(-1, 1, (self.emb_dim,)))
Magnitude.OOV_RNG_LOCK.release()
random_vector = np.mean(random_vectors, axis=0)
np.random.seed()
if self.placeholders > 0:
random_vector = np.pad(random_vector, [(0, self.placeholders)],
mode='constant', constant_values=0.0)
if is_str:
random_vector = random_vector / np.linalg.norm(random_vector)
final_vector = (
random_vector *
0.3 +
self._db_query_similar_keys_vector(
key,
orig_key) *
0.7)
final_vector = final_vector / np.linalg.norm(final_vector)
else:
final_vector = random_vector
if self.use_numpy:
return final_vector
else:
return final_vector.tolist()
def _db_batch_generator(self, params):
""" Generates batches of paramaters that respect
SQLite's MAX_VARIABLE_NUMBER """
if len(params) <= Magnitude.SQLITE_MAX_VARIABLE_NUMBER:
yield params
else:
it = iter(params)
for batch in \
iter(lambda: tuple(
islice(it, Magnitude.SQLITE_MAX_VARIABLE_NUMBER)
), ()):
yield batch
def _db_result_to_vec(self, result):
"""Converts a database result to a vector."""
if self.use_numpy:
vec = np.zeros((self.dim,), dtype=self.dtype)
vec[0:self.emb_dim] = result
vec = vec / float(10**self.precision)
return vec
else:
return [v / float(10**self.precision) for v in result] + \
[0.0] * self.placeholders
def _db_full_result_to_vec(self, result, put_cache=True):
"""Converts a full database result to a vector."""
result_key = result[0]
if self._query_is_cached(result_key):
return (result_key, self.query(result_key))
else:
vec = self._db_result_to_vec(result[1:])
if put_cache:
self._vector_for_key_cached._cache.put((result_key,), vec)
return (result_key, vec)
def _vector_for_key(self, key):
"""Queries the database for a single key."""
result = self._db().execute(
"""
SELECT *
FROM `magnitude`
WHERE key = ?
ORDER BY key = ? COLLATE BINARY DESC
LIMIT 1;""",
(key, key)).fetchone()
if result is None or self._key_t(result[0]) != self._key_t(key):
return None
else:
return self._db_result_to_vec(result[1:])
def _vectors_for_keys(self, keys):
"""Queries the database for multiple keys."""
unseen_keys = tuple(key for key in keys
if not self._query_is_cached(key))
unseen_keys_map = {}
if len(unseen_keys) > 0:
unseen_keys_map = {self._key_t(k): i for i, k in
enumerate(unseen_keys)}
unseen_vectors = [None] * len(unseen_keys)
seen_keys = set()
for unseen_keys_batch in self._db_batch_generator(unseen_keys):
results = self._db().execute(
"""
SELECT *
FROM `magnitude`
WHERE key
IN (""" + ' ,'.join(['?'] * len(unseen_keys_batch)) +
""");
""",
unseen_keys_batch)
for result in results:
result_key, vec = self._db_full_result_to_vec(result)
result_key_t = self._key_t(result_key)
if result_key_t in unseen_keys_map:
i = unseen_keys_map[result_key_t]
if (
(result_key_t not in seen_keys or
result_key == unseen_keys[i]) and
(
self.case_insensitive or
result_key == unseen_keys[i])
):
seen_keys.add(result_key_t)
unseen_vectors[i] = vec
for i in range(len(unseen_vectors)):
self._vector_for_key_cached._cache.put((unseen_keys[i],),
unseen_vectors[i])
if unseen_vectors[i] is None:
unseen_vectors[i] = \
self._out_of_vocab_vector_cached(unseen_keys[i])
vectors = [self.query(key) if key not in unseen_keys_map else
unseen_vectors[unseen_keys_map[self._key_t(key)]]
for key in keys]
return vectors
def _key_for_index(self, index, return_vector=True):
"""Queries the database the key at a single index."""
columns = "key"
if return_vector:
columns = "*"
result = self._db().execute(
"""
SELECT """ + columns + """
FROM `magnitude`
WHERE rowid = ?
LIMIT 1;
""",
(int(index + 1),)).fetchone()
if result is None:
raise IndexError("The index %d is out-of-range" % index)
else:
if return_vector:
return self._db_full_result_to_vec(result)
else:
return result[0]
def _keys_for_indices(self, indices, return_vector=True):
"""Queries the database for the keys of multiple indices."""
unseen_indices = tuple(int(index + 1) for index in indices
if self._key_for_index_cached._cache.get(((index,), # noqa
frozenset([('return_vector', return_vector)]))) is None) # noqa
unseen_indices_map = {}
if len(unseen_indices) > 0:
columns = "key"
if return_vector:
columns = "*"
unseen_indices_map = {(index - 1): i for i, index in
enumerate(unseen_indices)}
unseen_keys = [None] * len(unseen_indices)
for unseen_indices_batch in \
self._db_batch_generator(unseen_indices):
results = self._db().execute(
"""
SELECT rowid, """ + columns + """
FROM `magnitude`
WHERE rowid IN (""" +
' ,'.join(['?'] * len(unseen_indices_batch)) +
""");""",
unseen_indices_batch)
for result in results:
i = unseen_indices_map[result[0] - 1]
result_key = result[1]
if return_vector:
unseen_keys[i] = self._db_full_result_to_vec(
result[1:])
else:
unseen_keys[i] = result_key
self._key_for_index_cached._cache.put(
(
(unseen_indices[i] - 1,),
frozenset([('return_vector', return_vector)])
),
unseen_keys[i]
)
for i in range(len(unseen_keys)):
if unseen_keys[i] is None:
raise IndexError("The index %d is out-of-range" %
unseen_indices[i] - 1)
keys = [self.index(index, return_vector=return_vector)
if index not in unseen_indices_map else
unseen_keys[unseen_indices_map[index]] for index in indices]
return keys
@lru_cache(DEFAULT_LRU_CACHE_SIZE, ignore_unhashable_args=True)
def query(self, q, pad_to_length=None,
pad_left=None, truncate_left=None):
"""Handles a query of keys which could be a single key, a
1-D list of keys, or a 2-D list of keys.
"""
pad_to_length = pad_to_length or self.pad_to_length
pad_left = pad_left or self.pad_left
truncate_left = truncate_left or self.truncate_left
if not isinstance(q, list): # Single key
vec = self._vector_for_key_cached(q)
if vec is None:
return self._out_of_vocab_vector_cached(q)
else:
return vec
elif isinstance(q, list) \
and (len(q) == 0 or not isinstance(q[0], list)): # 1D list
pad_to_length = pad_to_length if pad_to_length else len(q)
padding_length = max(pad_to_length - len(q), 0)
keys_length = pad_to_length - padding_length
vectors = self._vectors_for_keys(q)
if truncate_left:
vectors = vectors[-keys_length:]
else:
vectors = vectors[0:keys_length]
if self.use_numpy:
tensor = np.zeros((pad_to_length, self.dim), dtype=self.dtype)
else:
tensor = [self._padding_vector() for i in range(pad_to_length)]
if pad_left:
tensor[-keys_length:] = vectors
else:
tensor[0:keys_length] = vectors
return tensor
elif isinstance(q, list): # 2D List
max_q = max([len(subquery) for subquery in q])
pad_to_length = pad_to_length if pad_to_length else max_q
if self.use_numpy:
tensor = np.zeros((len(q), pad_to_length, self.dim),
dtype=self.dtype)
else:
tensor = [[self._padding_vector() for i in range(pad_to_length)]
for j in range(len(q))]
for row, sq in enumerate(q):
padding_length = max(pad_to_length - len(sq), 0)
keys_length = pad_to_length - padding_length
vectors = self._vectors_for_keys(sq)
if truncate_left:
vectors = vectors[-keys_length:]
else:
vectors = vectors[0:keys_length]
if pad_left:
if self.use_numpy:
tensor[row, -keys_length:] = vectors
else:
tensor[row][-keys_length:] = vectors
else:
if self.use_numpy:
tensor[row, 0:keys_length] = vectors
else:
tensor[row][0:keys_length] = vectors
return tensor
def index(self, q, return_vector=True):
"""Gets a key for an index or multiple indices."""
if isinstance(q, list) or isinstance(q, tuple):
return self._keys_for_indices(q, return_vector=return_vector)
else:
return self._key_for_index_cached(q, return_vector=return_vector)
@lru_cache(DEFAULT_LRU_CACHE_SIZE, ignore_unhashable_args=True)
def _query_numpy(self, key):
"""Returns the query for a key, forcibly converting the
resulting vector to a numpy array.
"""
key_is_ndarray = isinstance(key, np.ndarray)
key_is_list = isinstance(key, list)
key_len_ge_0 = key_is_list and len(key) > 0
key_0_is_number = key_len_ge_0 and isinstance(key[0], Number)
key_0_is_ndarray = key_len_ge_0 and isinstance(key[0], np.ndarray)
key_0_is_list = key_len_ge_0 and isinstance(key[0], list)
key_0_len_ge_0 = key_0_is_list and len(key[0]) > 0
key_0_0_is_number = (key_0_is_list and key_0_len_ge_0 and
isinstance(key[0][0], Number))
if (key_is_ndarray or key_0_is_number or key_0_is_ndarray or key_0_0_is_number): # noqa
return key
elif not self.use_numpy:
return np.asarray(self.query(key))
else:
return self.query(key)
def _query_is_cached(self, key):
"""Checks if the query been cached by Magnitude."""
return ((self._vector_for_key_cached._cache.get((key,)) is not None) or ( # noqa
self._out_of_vocab_vector_cached._cache.get((key,)) is not None))
@lru_cache(DEFAULT_LRU_CACHE_SIZE, ignore_unhashable_args=True)
def distance(self, key, q):
"""Calculates the distance from key to the key(s) in q."""
a = self._query_numpy(key)
if not isinstance(q, list):
b = self._query_numpy(q)
return np.linalg.norm(a - b)
else:
return [np.linalg.norm(a - self._query_numpy(b)) for b in q]
@lru_cache(DEFAULT_LRU_CACHE_SIZE, ignore_unhashable_args=True)
def similarity(self, key, q):
"""Calculates the similarity from key to the key(s) in q."""
a = self._query_numpy(key)
if not isinstance(q, list):
b = self._query_numpy(q)
return np.inner(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))
else:
bs = [self._query_numpy(b) for b in q]
return [np.inner(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))
for b in bs]
@lru_cache(DEFAULT_LRU_CACHE_SIZE, ignore_unhashable_args=True)
def most_similar_to_given(self, key, q):
"""Calculates the most similar key in q to key."""
distances = self.distance(key, q)
min_index, _ = min(enumerate(distances), key=operator.itemgetter(1))
return q[min_index]
@lru_cache(DEFAULT_LRU_CACHE_SIZE, ignore_unhashable_args=True)
def doesnt_match(self, q):
"""Given a set of keys, figures out which key doesn't
match the rest.
"""
mean_vector = np.mean(self._query_numpy([[sq] for sq in q]), axis=0)
mean_unit_vector = mean_vector / np.linalg.norm(mean_vector)
distances = [np.linalg.norm(mean_unit_vector - self._query_numpy(b))
for b in q]
max_index, _ = max(enumerate(distances), key=operator.itemgetter(1))
return q[max_index]
def _db_query_similarity(
self,
positive,
negative,
min_similarity=None,
topn=10,
exclude_keys=set(),
return_similarities=False,
method='distance',
effort=1.0):
"""Runs a database query to find vectors close to vector."""
COSMUL = method == '3cosmul' # noqa: N806
APPROX = method == 'approx' # noqa: N806
DISTANCE = not COSMUL and not APPROX # noqa: N806
exclude_keys = {self._key_t(exclude_key)
for exclude_key in exclude_keys}
if topn is None:
topn = self.length
filter_topn = self.max_duplicate_keys * (topn + len(exclude_keys))
if min_similarity is not None:
min_similarity = min_similarity * -1
# Find mean unit vector
if (DISTANCE or APPROX) and (len(negative) > 0 or len(positive) > 1):
positive_vecs = np.sum(self._query_numpy(positive), axis=0)
if len(negative) > 0:
negative_vecs = -1.0 * np.sum(self._query_numpy(negative),
axis=0)
else:
negative_vecs = np.zeros((self.dim,), dtype=self.dtype)
mean_vector = (positive_vecs + negative_vecs) / \
float(len(positive) + len(negative))
mean_unit_vector = mean_vector / np.linalg.norm(mean_vector)
elif (DISTANCE or APPROX):
mean_unit_vector = self._query_numpy(positive[0])
elif COSMUL:
positive_vecs = self._query_numpy(positive)
if len(negative) > 0:
negative_vecs = self._query_numpy(negative)
else:
negative_vecs = np.zeros((0, self.dim))
# Calculate topn closest in batches over all vectors
if DISTANCE or COSMUL:
filtered_indices = []
for batch_start, _, batch in \
self.get_vectors_mmap_batch_generator():
if DISTANCE:
similiarities = -1 * np.dot(batch, mean_unit_vector)
elif COSMUL:
positive_similiarities = [
((1 + np.dot(batch, vec)) / 2)
for vec in positive_vecs
]
negative_similiarities = [
((1 + np.dot(batch, vec)) / 2)
for vec in negative_vecs
]
similiarities = -1 * (
np.prod(positive_similiarities, axis=0) /
(np.prod(negative_similiarities, axis=0) + 0.000001))
partition_results = np.argpartition(similiarities, min(
filter_topn, self.batch_size - 1))[:filter_topn]
for index in partition_results:
if (min_similarity is None or
similiarities[index] <= min_similarity):
if len(filtered_indices) < filter_topn:
heapq.heappush(filtered_indices, (
similiarities[index],
batch_start + index))
else:
heapq.heappushpop(filtered_indices, (
similiarities[index],
batch_start + index))
# Get the final topn from all batches
topn_indices = heapq.nsmallest(filter_topn, filtered_indices,
key=lambda x: x[0])
topn_indices = iter(topn_indices)
elif APPROX:
approx_index = self.get_approx_index()
search_k = int(effort * filter_topn * self.approx_trees)
nns = approx_index.get_nns_by_vector(
mean_unit_vector,
filter_topn,
search_k=search_k,
include_distances=True)
topn_indices = izip(nns[1], nns[0])
topn_indices = imap(lambda di: (di[0] ** 2 * .5 - 1, di[1]),
topn_indices)
# Tee topn_indices iterator
topn_indices_1, topn_indices_2 = tee(topn_indices)
# Retrieve the keys of the vectors
keys = self.index([i[1] for i in topn_indices_1],
return_vector=False)
# Build the result
results = []
for key, similarity in izip(keys, topn_indices_2):
key_t = self._key_t(key)
if len(results) >= topn:
break
if key_t in exclude_keys:
continue
exclude_keys.add(key_t)
if return_similarities:
results.append((key, -1 * similarity[0]))
else:
results.append(key)
return results
def _handle_pos_neg_args(self, positive, negative):
if not isinstance(
positive,
list) or (
len(positive) > 0 and isinstance(
positive[0],
Number)):
positive = [positive]
if not isinstance(
negative,
list) or (
len(negative) > 0 and isinstance(
negative[0],
Number)):
negative = [negative]
return positive, negative
def _exclude_set(self, positive, negative):
def _is_vec(elem):
return isinstance(elem, np.ndarray) or \
(isinstance(elem, list) and len(elem) > 0 and
isinstance(elem[0], Number))
return frozenset((elem for elem in chain.from_iterable(
[positive, negative]) if not _is_vec(elem)))
@lru_cache(DEFAULT_LRU_CACHE_SIZE, ignore_unhashable_args=True)
def most_similar(self, positive, negative=[], topn=10, min_similarity=None,
return_similarities=True):
"""Finds the topn most similar vectors under or equal
to max distance.
"""
positive, negative = self._handle_pos_neg_args(positive, negative)
return self._db_query_similarity(
positive=positive,
negative=negative,
min_similarity=min_similarity,
topn=topn,
exclude_keys=self._exclude_set(
positive,
negative),
return_similarities=return_similarities,
method='distance')
@lru_cache(DEFAULT_LRU_CACHE_SIZE, ignore_unhashable_args=True)
def most_similar_cosmul(self, positive, negative=[], topn=10,
min_similarity=None, return_similarities=True):
"""Finds the topn most similar vectors under or equal to max
distance using 3CosMul:
[Levy and Goldberg](http://www.aclweb.org/anthology/W14-1618)
"""
positive, negative = self._handle_pos_neg_args(positive, negative)
results = self._db_query_similarity(
positive=positive,
negative=negative,
min_similarity=min_similarity,
topn=topn,
exclude_keys=self._exclude_set(
positive,
negative),
return_similarities=return_similarities,
method='3cosmul')
return results
@lru_cache(DEFAULT_LRU_CACHE_SIZE, ignore_unhashable_args=True)
def most_similar_approx(
self,
positive,
negative=[],
topn=10,
min_similarity=None,
return_similarities=True,
effort=1.0):
"""Approximates the topn most similar vectors under or equal to max
distance using Annoy:
https://github.com/spotify/annoy
"""
if not self.approx:
raise RuntimeError("The `.magnitude` file you are using does not \
support the `most_similar_approx` function. If you are using a pre-built \
`.magnitude` file, visit Magnitude's git repository page's README and download \
the 'Heavy' model instead. If you converted this `.magnitude` file yourself \
you will need to re-convert the file passing the `-a` flag to the converter to \
build the appropriate indexes into the `.magnitude` file.")
positive, negative = self._handle_pos_neg_args(positive, negative)
effort = min(max(0, effort), 1.0)
results = self._db_query_similarity(
positive=positive,
negative=negative,
min_similarity=min_similarity,
topn=topn,
exclude_keys=self._exclude_set(
positive,
negative),
return_similarities=return_similarities,
method='approx',
effort=effort)
return results
@lru_cache(DEFAULT_LRU_CACHE_SIZE, ignore_unhashable_args=True)
def closer_than(self, key, q, topn=None):
"""Finds all keys closer to key than q is to key."""
epsilon = (10.0 / 10**6)
min_similarity = self.similarity(key, q) + epsilon
return self.most_similar(key, topn=topn, min_similarity=min_similarity,
return_similarities=False)
def get_vectors_mmap(self):
"""Gets a numpy.memmap of all vectors, blocks if it is still
being built.
"""
if self._all_vectors is None:
while True:
if not self.setup_for_mmap:
self._setup_for_mmap()
try:
if not self.memory_db:
all_vectors = np.memmap(
self.path_to_mmap, dtype=self.dtype, mode='r',
shape=(self.length, self.dim))
self._all_vectors = all_vectors
else:
all_vectors = np.zeros((0, self.dim))
self._all_vectors = all_vectors
break
except BaseException:
path_to_mmap_temp = self.path_to_mmap + '.tmp'
tlock = self.MMAP_THREAD_LOCK.acquire(False)
plock = self.MMAP_PROCESS_LOCK.acquire(0)
if tlock and plock:
values = imap(
lambda kv: kv[1], self._iter(
put_cache=self.lazy_loading == -1))
try:
with open(path_to_mmap_temp, "w+b") as mmap_file:
all_vectors = np.memmap(
mmap_file, dtype=self.dtype, mode='w+',
shape=(self.length, self.dim))
for i, value in enumerate(values):
all_vectors[i] = value
all_vectors.flush()
del all_vectors
if not self.closed:
os.rename(path_to_mmap_temp, self.path_to_mmap)
else:
return
finally:
self.MMAP_THREAD_LOCK.release()
try:
self.MMAP_PROCESS_LOCK.release()
except BaseException:
pass
sleep(1) # Block before trying again
return self._all_vectors
def get_vectors_mmap_batch_generator(self):
"""Gets batches of get_vectors_mmap()."""
all_vectors = self.get_vectors_mmap()
if self.length > self.batch_size:
for i in range(all_vectors.shape[0]):
batch_start = i * self.batch_size
batch_end = min(batch_start + self.batch_size,
all_vectors.shape[0])
if batch_start >= all_vectors.shape[0]:
break
yield (batch_start, batch_end,
all_vectors[batch_start:batch_end])
if batch_end == all_vectors.shape[0]:
break
else:
yield (0, self.length, all_vectors)
def get_approx_index_chunks(self):
"""Gets decompressed chunks of the AnnoyIndex of the vectors from
the database."""
try:
db = self._db(force_new=True)
with lz4.frame.LZ4FrameDecompressor() as decompressor:
chunks = db.execute(
"""
SELECT rowid,index_file
FROM `magnitude_approx`
WHERE trees = ?
""", (self.approx_trees,))
for chunk in chunks:
yield decompressor.decompress(chunk[1])
if self.closed:
return
except Exception as e:
if self.closed:
pass
else:
raise e
def get_approx_index(self):
"""Gets an AnnoyIndex of the vectors from the database."""
chunks = self.get_approx_index_chunks()
if self._approx_index is None:
while True:
if not self.setup_for_mmap:
self._setup_for_mmap()
try:
approx_index = AnnoyIndex(self.emb_dim, metric='angular')
approx_index.load(self.path_to_approx_mmap)
self._approx_index = approx_index
break
except BaseException:
path_to_approx_mmap_temp = self.path_to_approx_mmap \
+ '.tmp'
tlock = self.APPROX_MMAP_THREAD_LOCK.acquire(False)
plock = self.APPROX_MMAP_PROCESS_LOCK.acquire(0)
if tlock and plock:
try:
with open(path_to_approx_mmap_temp, "w+b") \
as mmap_file:
for chunk in chunks:
mmap_file.write(chunk)
if not self.closed:
os.rename(path_to_approx_mmap_temp,
self.path_to_approx_mmap)
else:
return
finally:
self.APPROX_MMAP_THREAD_LOCK.release()
try:
self.APPROX_MMAP_PROCESS_LOCK.release()
except BaseException:
pass
sleep(1) # Block before trying again
return self._approx_index
def _iter(self, put_cache):
"""Yields keys and vectors for all vectors in the store."""
try:
db = self._db(force_new=True)
results = db.execute(
"""
SELECT *
FROM `magnitude`
""")
for result in results:
yield self._db_full_result_to_vec(result,
put_cache=put_cache)
if self.closed:
return
except Exception as e:
if self.closed:
pass
else:
raise e
def __iter__(self):
"""Yields keys and vectors for all vectors in the store."""
return self._iter(put_cache=True)
def __len__(self):
"""Returns the number of vectors."""
return self.length
def __contains__(self, key):
"""Checks whether a key exists in the vectors"""
return self._vector_for_key_cached(key) is not None
def __getitem__(self, q):
"""Performs the index method when indexed."""
if isinstance(q, slice):
return self.index(list(range(*q.indices(self.length))),
return_vector=True)
else:
return self.index(q, return_vector=True)
def close(self):
"""Cleans up the object"""
self.closed = True
while any([t.is_alive() for t in self._threads]):
sleep(.5)
for conn in self._all_conns:
try:
conn.close()
except Exception:
pass
if hasattr(self, 'fd'):
try:
os.close(self.fd)
except BaseException:
pass
try:
self._all_vectors._mmap.close()
except BaseException:
pass
try:
del self._all_vectors
gc.collect()
except BaseException:
pass
try:
self._approx_index.unload()
except BaseException:
pass
if (hasattr(self, 'MMAP_PROCESS_LOCK') and
hasattr(self.MMAP_PROCESS_LOCK, 'lockfile') and
self.MMAP_PROCESS_LOCK.lockfile is not None):
self.MMAP_PROCESS_LOCK.lockfile.close()
if (hasattr(self, 'APPROX_MMAP_PROCESS_LOCK') and
hasattr(self.APPROX_MMAP_PROCESS_LOCK, 'lockfile') and
self.APPROX_MMAP_PROCESS_LOCK.lockfile is not None):
self.APPROX_MMAP_PROCESS_LOCK.lockfile.close()
def __del__(self):
""" Destructor for the class """
self.close()
class FeaturizerMagnitude(Magnitude):
"""A FeaturizerMagnitude class that subclasses Magnitude and acts as
a way to featurize arbitrary python
Attributes:
number_of_values: number_of_values should be set to the
approximate upper-bound of the number of
feature values that will be looked up with query().
If you don't know the exact number, be conservative
and pick a large number, while keeping in mind the
bigger number_of_values is, the more memory it will
consume
namespace: an optional namespace that will be prepended to each query
if provided
"""
def __init__(self, number_of_values=1000000, namespace=None, **kwargs):
self.namespace = namespace
super(
FeaturizerMagnitude,
self).__init__(
None,
_number_of_values=number_of_values,
_namespace=self.namespace,
**kwargs)
class ConcatenatedMagnitude(object):
"""A ConcatenatedMagnitude class that acts as a concatenated interface
to querying multiple magnitude objects.
Attributes:
*args: each arg should be a Magnitude object
"""
def __init__(self, *args, **kwargs):
if len(args) < 2:
raise RuntimeError(
"Must concatenate at least 2 Magnitude objects.")
self.magnitudes = args
self.dim = sum([m.dim for m in self.magnitudes])
all_use_numpy = [m.use_numpy for m in self.magnitudes]
if not all(use_numpy == all_use_numpy[0]
for use_numpy in all_use_numpy):
raise RuntimeError(
"All magnitude objects must have the same use_numpy value.")
self.use_numpy = all_use_numpy[0]
def _take(self, q, multikey, i):
"""Selects only the i'th element from the inner-most axis and
reduces the dimensions of the tensor q by 1.
"""
if multikey == -1:
return q
else:
cut = np.take(q, [i], axis=multikey)
result = np.reshape(cut, np.shape(cut)[0:-1]).tolist()
return result
def _hstack(self, l, use_numpy):
"""Horizontally stacks NumPy arrays or Python lists"""
if use_numpy:
return np.concatenate(l, axis=-1)
else:
return list(chain.from_iterable(l))
def _dstack(self, l, use_numpy):
"""Depth stacks NumPy arrays or Python lists"""
if use_numpy:
return np.concatenate(l, axis=-1)
else:
return [self._hstack((l3[example] for l3 in l),
use_numpy=use_numpy) for example in xrange(len(l[0]))] # noqa
@lru_cache(DEFAULT_LRU_CACHE_SIZE, ignore_unhashable_args=True)
def query(self, q, pad_to_length=None,
pad_left=None, truncate_left=None):
"""Handles a query of keys which could be a single key, a
1-D list of keys, or a 2-D list of keys.
"""
# Check if keys are specified for each concatenated model
multikey = -1
if isinstance(q, tuple):
multikey = 0
if isinstance(q, list) and isinstance(q[0], tuple):
multikey = 1
if (isinstance(q, list) and isinstance(q[0], list) and
isinstance(q[0][0], tuple)):
multikey = 2
# Define args
pad_to_length = pad_to_length or self.magnitudes[0].pad_to_length
pad_left = pad_left or self.magnitudes[0].pad_left
truncate_left = truncate_left or self.magnitudes[0].truncate_left
# Query each model with the right set of keys
v = [m.query(self._take(q, multikey, i)) for i, m in enumerate(
self.magnitudes)]
if not isinstance(q, list): # Single key
return self._hstack(v, self.use_numpy)
elif isinstance(q, list) \
and (len(q) == 0 or not isinstance(q[0], list)): # 1D list
return self._hstack(v, self.use_numpy)
elif isinstance(q, list): # 2D List
return self._dstack(v, self.use_numpy)
class MagnitudeUtils(object):
"""A MagnitudeUtils class that contains static helper utilities."""
@staticmethod
def batchify(X, y, batch_size): # noqa: N803
""" Creates an iterator that chunks `X` and `y` into batches
that each contain `batch_size` elements and loops forever"""
X_batch_generator = cycle([X[i: i + batch_size] # noqa: N806
for i in xrange(0, len(X), batch_size)])
y_batch_generator = cycle([y[i: i + batch_size]
for i in xrange(0, len(y), batch_size)])
return izip(X_batch_generator, y_batch_generator)
@staticmethod
def class_encoding():
"""Creates a set of functions to add a new class, convert a
class into an integer, and the integer back to a class."""
class_to_int_map = {}
int_to_class_map = None
def add_class(c):
global int_to_class_map
int_to_class_map = None
return class_to_int_map.setdefault(
c, len(class_to_int_map))
def class_to_int(c):
return class_to_int_map[c]
def int_to_class(i):
global int_to_class_map
if int_to_class_map is None:
int_to_class_map = {v: k
for k, v in (
(
hasattr(class_to_int_map, 'iteritems') and # noqa
class_to_int_map.iteritems
) or
class_to_int_map.items
)()}
return int_to_class_map[i]
return add_class, class_to_int, int_to_class
@staticmethod
def to_categorical(y, num_classes=None):
"""Converts a class vector (integers) to binary class matrix.
"""
y = np.array(y, dtype='int')
input_shape = y.shape
if input_shape and input_shape[-1] == 1 and len(input_shape) > 1:
input_shape = tuple(input_shape[:-1])
y = y.ravel()
if not num_classes:
num_classes = np.max(y) + 1
n = y.shape[0]
categorical = np.zeros((n, num_classes), dtype=np.float32)
categorical[np.arange(n), y] = 1
output_shape = input_shape + (num_classes,)
categorical = np.reshape(categorical, output_shape)
return categorical
@staticmethod
def from_categorical(categorical):
"""Converts a binary class matrix to a class vector (integers)"""
return np.argmax(categorical, axis=1)
|
407445
|
import asyncio
from distutils.version import LooseVersion
import uuid
import bluesky
from bluesky.run_engine import RunEngine, TransitionError
from bluesky.plans import scan
from bluesky.preprocessors import SupplementalData
import event_model
import ophyd.sim
import pymongo
import pytest
# Make module-scoped versions of these fixtures to avoid paying for
# intake-server startup each time.
@pytest.fixture(scope='module')
def hw():
return ophyd.sim.hw() # a SimpleNamespace of simulated devices
@pytest.fixture(scope='module')
def RE(request):
loop = asyncio.new_event_loop()
loop.set_debug(True)
RE = RunEngine({}, loop=loop)
def clean_event_loop():
if RE.state not in ('idle', 'panicked'):
try:
RE.halt()
except TransitionError:
pass
loop.call_soon_threadsafe(loop.stop)
if LooseVersion(bluesky.__version__) >= LooseVersion('1.6.0'):
RE._th.join()
loop.close()
request.addfinalizer(clean_event_loop)
return RE
SIM_DETECTORS = {'scalar': 'det',
'image': 'direct_img',
'external_image': 'img'}
@pytest.fixture(params=['scalar', 'image', 'external_image'], scope='module')
def detector(request, hw):
return getattr(hw, SIM_DETECTORS[request.param])
@pytest.fixture(scope='module')
def example_data(hw, detector, RE): # noqa
sd = SupplementalData(baseline=[hw.motor])
RE.preprocessors.append(sd)
docs = []
def collect(name, doc):
docs.append((name, event_model.sanitize_doc(doc)))
uid, = RE(scan([detector], hw.motor, -1, 1, 20), collect)
return uid, docs
@pytest.fixture(scope='module')
def db_factory(request):
def inner():
database_name = f'test-{str(uuid.uuid4())}'
uri = f'mongodb://localhost:27017/'
client = pymongo.MongoClient(uri)
def drop():
client.drop_database(database_name)
request.addfinalizer(drop)
return client[database_name]
return inner
|
407453
|
from .architectures import build_model
from .losses import build_loss
__all__ = ['build_model', 'build_loss']
|
407468
|
from LSP.plugin.core.logging import debug
from LSP.plugin.core.protocol import Request
from LSP.plugin.core.registry import windows
from LSP.plugin.core.types import ClientStates
from LSP.plugin.core.typing import Any, Generator
from LSP.plugin.documents import DocumentSyncListener
from os.path import join
from setup import add_config
from setup import close_test_view
from setup import expand
from setup import make_stdio_test_config
from setup import remove_config
from setup import TIMEOUT_TIME
from setup import YieldPromise
from sublime_plugin import view_event_listeners
from unittesting import DeferrableTestCase
import sublime
class WindowDocumentHandlerTests(DeferrableTestCase):
def ensure_document_listener_created(self) -> bool:
assert self.view
# Bug in ST3? Either that, or CI runs with ST window not in focus and that makes ST3 not trigger some
# events like on_load_async, on_activated, on_deactivated. That makes things not properly initialize on
# opening file (manager missing in DocumentSyncListener)
# Revisit this once we're on ST4.
for listener in view_event_listeners[self.view.id()]:
if isinstance(listener, DocumentSyncListener):
sublime.set_timeout_async(listener.on_activated_async)
return True
return False
def setUp(self) -> Generator:
init_options = {
"serverResponse": {
"capabilities": {
"textDocumentSync": {
"openClose": True,
"change": 1,
"save": True
},
}
}
}
self.window = sublime.active_window()
self.assertTrue(self.window)
self.session1 = None
self.session2 = None
self.config1 = make_stdio_test_config()
self.config1.init_options.assign(init_options)
self.config2 = make_stdio_test_config()
self.config2.init_options.assign(init_options)
self.config2.name = "TEST-2"
self.config2.status_key = "lsp_TEST-2"
self.wm = windows.lookup(self.window)
add_config(self.config1)
add_config(self.config2)
self.wm._configs.all[self.config1.name] = self.config1
self.wm._configs.all[self.config2.name] = self.config2
def test_sends_did_open_to_multiple_sessions(self) -> Generator:
filename = expand(join("$packages", "LSP", "tests", "testfile.txt"), self.window)
open_view = self.window.find_open_file(filename)
yield from close_test_view(open_view)
self.view = self.window.open_file(filename)
yield {"condition": lambda: not self.view.is_loading(), "timeout": TIMEOUT_TIME}
self.assertTrue(self.wm._configs.match_view(self.view))
# self.init_view_settings()
yield {"condition": self.ensure_document_listener_created, "timeout": TIMEOUT_TIME}
yield {
"condition": lambda: self.wm.get_session(self.config1.name, self.view.file_name()) is not None,
"timeout": TIMEOUT_TIME}
yield {
"condition": lambda: self.wm.get_session(self.config2.name, self.view.file_name()) is not None,
"timeout": TIMEOUT_TIME}
self.session1 = self.wm.get_session(self.config1.name, self.view.file_name())
self.session2 = self.wm.get_session(self.config2.name, self.view.file_name())
self.assertIsNotNone(self.session1)
self.assertIsNotNone(self.session2)
self.assertEqual(self.session1.config.name, self.config1.name)
self.assertEqual(self.session2.config.name, self.config2.name)
yield {"condition": lambda: self.session1.state == ClientStates.READY, "timeout": TIMEOUT_TIME}
yield {"condition": lambda: self.session2.state == ClientStates.READY, "timeout": TIMEOUT_TIME}
yield from self.await_message("initialize")
yield from self.await_message("initialized")
yield from self.await_message("textDocument/didOpen")
self.view.run_command("insert", {"characters": "a"})
yield from self.await_message("textDocument/didChange")
self.assertEqual(self.view.get_status("lsp_TEST"), "TEST")
self.assertEqual(self.view.get_status("lsp_TEST-2"), "TEST-2")
yield from close_test_view(self.view)
yield from self.await_message("textDocument/didClose")
def doCleanups(self) -> Generator:
try:
yield from close_test_view(self.view)
except Exception:
pass
if self.session1:
sublime.set_timeout_async(self.session1.end_async)
yield lambda: self.session1.state == ClientStates.STOPPING
if self.session2:
sublime.set_timeout_async(self.session2.end_async)
yield lambda: self.session2.state == ClientStates.STOPPING
try:
remove_config(self.config2)
except ValueError:
pass
try:
remove_config(self.config1)
except ValueError:
pass
self.wm._configs.all.pop(self.config2.name, None)
self.wm._configs.all.pop(self.config1.name, None)
yield from super().doCleanups()
def await_message(self, method: str) -> Generator:
promise1 = YieldPromise()
promise2 = YieldPromise()
def handler1(params: Any) -> None:
promise1.fulfill(params)
def handler2(params: Any) -> None:
promise2.fulfill(params)
def error_handler(params: 'Any') -> None:
debug("Got error:", params, "awaiting timeout :(")
self.session1.send_request(Request("$test/getReceived", {"method": method}), handler1, error_handler)
self.session2.send_request(Request("$test/getReceived", {"method": method}), handler2, error_handler)
yield {"condition": promise1, "timeout": TIMEOUT_TIME}
yield {"condition": promise2, "timeout": TIMEOUT_TIME}
|
407486
|
from __future__ import annotations
from .. import query_utils
from . import contract_creation_blocks_statements
async_query_contract_creation_block = query_utils.with_connection(
contract_creation_blocks_statements.async_select_contract_creation_block,
'contract_creation_blocks',
)
async_query_contract_creation_blocks = query_utils.with_connection(
contract_creation_blocks_statements.async_select_contract_creation_blocks,
'contract_creation_blocks',
)
|
407489
|
from datetime import datetime
def add_watermark(ax):
ax.text(
0.97, 0.9, f"Generated {datetime.now()}",
transform=ax.transAxes, fontsize=12, color="gray", alpha=0.5,
ha="right", va="center",
)
|
407490
|
from django.core.exceptions import ValidationError
import jsonschema
from jsonschema.exceptions import SchemaError
def validate_json_schema(value):
"""Wrap jsonschema validation with serializers.ValidationError"""
try:
jsonschema.Draft4Validator.check_schema(value)
except SchemaError as e:
raise ValidationError('Invalid schema: {}'.format(e.message))
|
407496
|
import os
import setuptools
with open("version.txt") as f:
VERSION = f.read().strip()
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
atari = ["gym[atari,accept-rom-license]~=0.21", "opencv-python~=4.0"]
gym_minigrid = ["gym-minigrid~=1.0"]
petting_zoo = ["pettingzoo[sisl,atari,classic]~=1.11"]
test = ["pytest~=6.2", "pytest-lazy-fixture~=0.6"]
setuptools.setup(
name="rlhive",
version=VERSION,
description="A package to support RL research",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/chandar-lab/RLHive",
project_urls={
"Bug Tracker": "https://github.com/chandar-lab/RLHive/issues",
},
packages=setuptools.find_packages(),
include_package_data=True,
package_data={"hive": ["configs/**.yml"]},
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
python_requires=">=3.6",
install_requires=[
"gym~=0.21",
"numpy~=1.18",
"PyYAML~=5.4",
"torch~=1.6",
"wandb>=0.10.30",
"matplotlib~=3.0",
"pandas~=1.0",
],
extras_require={
"atari": atari,
"gym_minigrid": gym_minigrid,
"petting_zoo": petting_zoo,
"test": test,
"all": atari + gym_minigrid + petting_zoo + test,
},
entry_points={
"console_scripts": [
"hive_single_agent_loop = hive.runners.single_agent_loop:main",
"hive_multi_agent_loop = hive.runners.multi_agent_loop:main",
]
},
)
|
407504
|
from wagtail.images.formats import Format, register_image_format
register_image_format(
Format(
'bleed',
'Bleed into left/right margins',
'richtext-image image-bleed',
'width-1170'
)
)
|
407517
|
import csv
from StringIO import StringIO
def csv_res2_dict_lst(res):
"""Convert CSV string with a header into list of dictionaries"""
return list(csv.DictReader(StringIO(res), delimiter=","))
def expected_repnames(repos_cfg):
"""Generate expected repository names '{account_name}/{repo_name}'"""
templ = "{account_name}/{repo_name}"
lst = []
for account_name, rep_cfg in repos_cfg.items():
for repo_name in rep_cfg.keys():
lst.append(
templ.format(account_name=account_name, repo_name=repo_name)
)
return sorted(lst)
def test_simple_call(binbb):
# Just try to run it and hope it will not fail
binbb.sysexec("repo", "list")
binbb.sysexec("repo", "list", "-f", "csv")
binbb.sysexec("repo", "list", "-f", "value")
binbb.sysexec("repo", "list", "-c", "Owner", "-c", "Repo Name")
binbb.sysexec("repo", "list", "-c", "Owner", "-c", "Repo Name", "-f", "csv")
def test_listed_names_csv(binbb, repos_cfg):
res = binbb.sysexec("repo", "list", "-f", "csv")
recs = csv_res2_dict_lst(res)
resnames = ["{rec[Owner]}/{rec[Repo Name]}".format(rec=rec) for rec in recs]
resnames = sorted(resnames)
expected = expected_repnames(repos_cfg)
assert resnames == expected
def test_listed_names_value(binbb, repos_cfg):
# Just try to run it and hope it will not fail
bbcmd = ["repo", "list", "-f", "value", "-c" "Owner", "-c", "Repo Name"]
res = binbb.sysexec(*bbcmd)
recs = res.strip().splitlines()
recs = [line.split(" ", 1) for line in recs]
templ = "{owner}/{repo}"
resnames = [templ.format(owner=owner, repo=repo) for owner, repo in recs]
resnames = sorted(resnames)
expected = expected_repnames(repos_cfg)
assert resnames == expected
|
407550
|
import json
import unittest
from tests import setup_django_settings
from mengenali.views import download
from django.test.client import RequestFactory
class EndToEndTest(unittest.TestCase):
test_performance = False
def setUp(self):
self.factory = RequestFactory()
setup_django_settings()
def do_end_to_end(self, kelurahan, tps, file_name):
request = self.factory.get(
f'/download/{kelurahan}/{tps}/{file_name}.JPG?extractDigits=true&calculateNumbers=true&storeFiles=false')
response = download(request, kelurahan, tps, f"{file_name}.JPG")
return json.loads(response.content)
def assert_result(self, result, success, min, max):
outcome = result['outcome']
self.assertEqual(result['success'], success)
if success:
self.assertEqual(outcome['jokowi'], 11)
self.assertEqual(outcome['prabowo'], 92)
self.assertEqual(outcome['jumlah'], 103)
self.assertEqual(outcome['tidakSah'], 8)
if self.test_performance:
self.assertGreaterEqual(result['duration'], min)
self.assertLessEqual(result['duration'], max)
def test_1_12_1(self):
res = self.do_end_to_end(1, 12, 1)
self.assert_result(res, True, 1.5, 6)
def test_1_13_3(self):
res = self.do_end_to_end(1, 13, 3)
self.assert_result(res, True, 2.5, 7)
def test_2_21_4(self):
res = self.do_end_to_end(2, 21, 4)
self.assert_result(res, True, 4, 8)
def test_2_22_5(self):
res = self.do_end_to_end(2, 22, 5)
self.assert_result(res, True, 4, 6)
def test_1_13_2(self):
res = self.do_end_to_end(1, 13, 2)
self.assert_result(res, False, 2.5, 7)
def test_3_10_6(self):
res = self.do_end_to_end(3, 10, 6)
self.assert_result(res, False, 2.5, 7)
def test_3_10_8(self):
res = self.do_end_to_end(3, 10, 8)
self.assert_result(res, True, 2.5, 7)
|
407561
|
def test():
from kale.utils import pod_utils as _kale_pod_utils
_kale_pod_utils.snapshot_pipeline_step(
"T",
"test",
"/path/to/nb")
|
407662
|
entries = [
{
"env-title": "atari-alien",
"env-variant": "No-op start",
"score": 1536.05,
},
{
"env-title": "atari-amidar",
"env-variant": "No-op start",
"score": 497.62,
},
{
"env-title": "atari-assault",
"env-variant": "No-op start",
"score": 12086.86,
},
{
"env-title": "atari-asterix",
"env-variant": "No-op start",
"score": 29692.50,
},
{
"env-title": "atari-asteroids",
"env-variant": "No-op start",
"score": 3508.10,
},
{
"env-title": "atari-atlantis",
"env-variant": "No-op start",
"score": 773355.50,
},
{
"env-title": "atari-bank-heist",
"env-variant": "No-op start",
"score": 1200.35,
},
{
"env-title": "atari-battle-zone",
"env-variant": "No-op start",
"score": 13015.00,
},
{
"env-title": "atari-beam-rider",
"env-variant": "No-op start",
"score": 8219.92,
},
{
"env-title": "atari-berzerk",
"env-variant": "No-op start",
"score": 888.30,
},
{
"env-title": "atari-bowling",
"env-variant": "No-op start",
"score": 35.73,
},
{
"env-title": "atari-boxing",
"env-variant": "No-op start",
"score": 96.30,
},
{
"env-title": "atari-breakout",
"env-variant": "No-op start",
"score": 640.43,
},
{
"env-title": "atari-centipede",
"env-variant": "No-op start",
"score": 5528.13,
},
{
"env-title": "atari-chopper-command",
"env-variant": "No-op start",
"score": 5012.00,
},
{
"env-title": "atari-crazy-climber",
"env-variant": "No-op start",
"score": 136211.50,
},
{
"env-title": "atari-defender",
"env-variant": "No-op start",
"score": 58718.25,
},
{
"env-title": "atari-demon-attack",
"env-variant": "No-op start",
"score": 107264.73,
},
{
"env-title": "atari-double-dunk",
"env-variant": "No-op start",
"score": -0.35,
},
{
"env-title": "atari-enduro",
"env-variant": "No-op start",
"score": 0.00,
},
{
"env-title": "atari-fishing-derby",
"env-variant": "No-op start",
"score": 32.08,
},
{
"env-title": "atari-freeway",
"env-variant": "No-op start",
"score": 0.00,
},
{
"env-title": "atari-frostbite",
"env-variant": "No-op start",
"score": 269.65,
},
{
"env-title": "atari-gopher",
"env-variant": "No-op start",
"score": 1002.40,
},
{
"env-title": "atari-gravitar",
"env-variant": "No-op start",
"score": 211.50,
},
{
"env-title": "atari-hero",
"env-variant": "No-op start",
"score": 33853.15,
},
{
"env-title": "atari-ice-hockey",
"env-variant": "No-op start",
"score": -5.25,
},
{
"env-title": "atari-jamesbond",
"env-variant": "No-op start",
"score": 440.00,
},
{
"env-title": "atari-kangaroo",
"env-variant": "No-op start",
"score": 47.00,
},
{
"env-title": "atari-krull",
"env-variant": "No-op start",
"score": 9247.60,
},
{
"env-title": "atari-kung-fu-master",
"env-variant": "No-op start",
"score": 42259.00,
},
{
"env-title": "atari-montezuma-revenge",
"env-variant": "No-op start",
"score": 0.00,
},
{
"env-title": "atari-ms-pacman",
"env-variant": "No-op start",
"score": 6501.71,
},
{
"env-title": "atari-name-this-game",
"env-variant": "No-op start",
"score": 6049.55,
},
{
"env-title": "atari-phoenix",
"env-variant": "No-op start",
"score": 33068.15,
},
{
"env-title": "atari-pitfall",
"env-variant": "No-op start",
"score": -11.14,
},
{
"env-title": "atari-pong",
"env-variant": "No-op start",
"score": 20.40,
},
{
"env-title": "atari-private-eye",
"env-variant": "No-op start",
"score": 92.42,
},
{
"env-title": "atari-qbert",
"env-variant": "No-op start",
"score": 18901.25,
},
{
"env-title": "atari-riverraid",
"env-variant": "No-op start",
"score": 17401.90,
},
{
"env-title": "atari-road-runner",
"env-variant": "No-op start",
"score": 37505.00,
},
{
"env-title": "atari-robotank",
"env-variant": "No-op start",
"score": 2.30,
},
{
"env-title": "atari-seaquest",
"env-variant": "No-op start",
"score": 1716.90,
},
{
"env-title": "atari-skiing",
"env-variant": "No-op start",
"score": -29975.00,
},
{
"env-title": "atari-solaris",
"env-variant": "No-op start",
"score": 2368.40,
},
{
"env-title": "atari-space-invaders",
"env-variant": "No-op start",
"score": 1726.28,
},
{
"env-title": "atari-star-gunner",
"env-variant": "No-op start",
"score": 69139.00,
},
{
"env-title": "atari-surround",
"env-variant": "No-op start",
"score": -8.13,
},
{
"env-title": "atari-tennis",
"env-variant": "No-op start",
"score": -1.89,
},
{
"env-title": "atari-time-pilot",
"env-variant": "No-op start",
"score": 6617.50,
},
{
"env-title": "atari-tutankham",
"env-variant": "No-op start",
"score": 267.82,
},
{
"env-title": "atari-up-n-down",
"env-variant": "No-op start",
"score": 273058.10,
},
{
"env-title": "atari-venture",
"env-variant": "No-op start",
"score": 0.00,
},
{
"env-title": "atari-video-pinball",
"env-variant": "No-op start",
"score": 228642.52,
},
{
"env-title": "atari-wizard-of-wor",
"env-variant": "No-op start",
"score": 4203.00,
},
{
"env-title": "atari-yars-revenge",
"env-variant": "No-op start",
"score": 80530.13,
},
{
"env-title": "atari-zaxxon",
"env-variant": "No-op start",
"score": 1148.50,
},
]
|
407665
|
import numpy as np
import cudarray as ca
from ..wrap import blas
from ..linalg import matmul_shape
class Dot(object):
def __init__(self, a, b, out=None):
self.batch_size = a.shape[0]
self.a = a
self.b = b
if a.dtype != b.dtype:
raise ValueError('dtype mismatch')
out_shape = (self.batch_size,) + matmul_shape(a.shape[1:], b.shape[1:])
if out is None:
out = base.empty(out_shape, dtype=a.dtype)
else:
if out_shape != out.shape:
raise ValueError('out.shape does not match result')
if a.dtype != out.dtype:
raise ValueError('dtype mismatch')
self.out = out
a_stride = np.prod(a.shape[1:])
b_stride = np.prod(b.shape[1:])
out_stride = np.prod(out.shape[1:])
self.blas_batch = blas.BLASBatch_f(
a._data, b._data, out._data, self.batch_size, a_stride, b_stride,
out_stride
)
if a.ndim == b.ndim == 3:
m, k = a.shape[1:3]
n = b.shape[2]
def fun():
self.blas_batch.gemm(blas.no_trans_op, blas.no_trans_op, m, n,
k, 1.0, 0.0)
return self.out
self.perform = fun
else:
raise ValueError('invalid array dimensionality')
def dot(a, b, out=None):
return Dot(a, b, out).perform()
|
407673
|
from __future__ import absolute_import, print_function, unicode_literals
import struct
from wolframclient.utils import six
from wolframclient.utils.datastructures import Settings
if six.JYTHON:
pass
if six.PY2:
def _bytes(value):
return chr(value)
else:
def _bytes(value):
return bytes((value,))
WXF_VERSION = b"8"
WXF_HEADER_SEPARATOR = b":"
WXF_HEADER_COMPRESS = b"C"
# The list of all the WXF tokens.
WXF_CONSTANTS = Settings(
Function=b"f",
Symbol=b"s",
String=b"S",
BinaryString=b"B",
Integer8=b"C",
Integer16=b"j",
Integer32=b"i",
Integer64=b"L",
Real64=b"r",
BigInteger=b"I",
BigReal=b"R",
PackedArray=_bytes(0xC1),
NumericArray=_bytes(0xC2),
Association=b"A",
Rule=b"-",
RuleDelayed=b":",
)
# The list of all array value type tokens.
ARRAY_TYPES = Settings(
Integer8=_bytes(0x00),
Integer16=_bytes(0x01),
Integer32=_bytes(0x02),
Integer64=_bytes(0x03),
UnsignedInteger8=_bytes(0x10),
UnsignedInteger16=_bytes(0x11),
UnsignedInteger32=_bytes(0x12),
UnsignedInteger64=_bytes(0x13),
Real32=_bytes(0x22),
Real64=_bytes(0x23),
ComplexReal32=_bytes(0x33),
ComplexReal64=_bytes(0x34),
)
ARRAY_TYPES_FROM_WXF_TYPES = {v: k for k, v in ARRAY_TYPES.items()}
ARRAY_TYPES_ELEM_SIZE = {
ARRAY_TYPES.Integer8: 1,
ARRAY_TYPES.Integer16: 2,
ARRAY_TYPES.Integer32: 4,
ARRAY_TYPES.Integer64: 8,
ARRAY_TYPES.UnsignedInteger8: 1,
ARRAY_TYPES.UnsignedInteger16: 2,
ARRAY_TYPES.UnsignedInteger32: 4,
ARRAY_TYPES.UnsignedInteger64: 8,
ARRAY_TYPES.Real32: 4,
ARRAY_TYPES.Real64: 8,
ARRAY_TYPES.ComplexReal32: 8,
ARRAY_TYPES.ComplexReal64: 16,
}
""" A set of all valid value type tokens for PackedArray.
There is no restriction for NumericArray value types. """
VALID_PACKED_ARRAY_TYPES = frozenset(
(
ARRAY_TYPES.Integer8,
ARRAY_TYPES.Integer16,
ARRAY_TYPES.Integer32,
ARRAY_TYPES.Integer64,
ARRAY_TYPES.Real32,
ARRAY_TYPES.Real64,
ARRAY_TYPES.ComplexReal32,
ARRAY_TYPES.ComplexReal64,
)
)
VALID_PACKED_ARRAY_LABEL_TYPES = frozenset(
(
"Integer8",
"Integer16",
"Integer32",
"Integer64",
"Real32",
"Real64",
"ComplexReal32",
"ComplexReal64",
)
)
STRUCT_MAPPING = Settings(
Integer8=struct.Struct(b"<b"),
UnsignedInteger8=struct.Struct(b"<B"),
Integer16=struct.Struct(b"<h"),
UnsignedInteger16=struct.Struct(b"<H"),
Integer32=struct.Struct(b"<i"),
UnsignedInteger32=struct.Struct(b"<I"),
Integer64=struct.Struct(b"<q"),
UnsignedInteger64=struct.Struct(b"<Q"),
Real32=struct.Struct(b"<f"),
Real64=struct.Struct(b"<d"),
ComplexReal32=struct.Struct(b"<f"),
ComplexReal64=struct.Struct(b"<d"),
)
|
407676
|
import re
import argparse
import os.path
from argparse import RawTextHelpFormatter
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter)
parser.add_argument('--mapping_file', type=str, default="Resnet50_kcp_ws.m", help="<name of your mapping file with layer specs>")
parser.add_argument('--outfile', type=str, default="out.m", help='output file name')
opt = parser.parse_args()
print('Begin processing')
base_path = '../../data/'
check = 0
if os.path.exists(base_path + 'mapping/' + opt.mapping_file):
with open(base_path + 'model/' + opt.outfile, "w") as fo:
with open(base_path + 'mapping/' + opt.mapping_file, "r") as fm:
for line in fm:
if(re.search("Dataflow",line) or re.search("SpatialMap",line) or re.search("TemporalMap",line) or re.search("Cluster",line)):
check = 1
continue
elif check == 1:
check = 0
continue
else:
fo.write(line)
print("Model file created")
else:
print("Mapping file not found, please provide one")
|
407724
|
from pytest import fixture
@fixture(autouse=True)
def ensure_no_local_config(no_local_config):
pass
|
407727
|
def psd(data, fs, df_exp):
""" psd of data """
from math import ceil, log
from numpy import fft, linspace
a = ceil(log(data.shape[0])/log(2))
b = ceil(log(fs/df_exp+2)/log(2))
nfft = 2**int(max(a,b))
Y0 = fft.fft(data, nfft, 0)
Y = Y0[:nfft/2,:]/data.shape[0]
Pxx = abs(Y)
#Pangle = angle(Y(1:nfft/2,:));
f = fs/2*linspace(0,1,nfft/2)
return (Pxx, f)
|
407730
|
import tensorflow as tf
from trainer.trainer import Trainer
from utils.multi_gpu_wrapper import MultiGpuWrapper as mgw
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_bool('enbl_multi_gpu', False, 'Enable training with multiple gpus')
tf.app.flags.DEFINE_string('data_path', './data/tfrecord', 'path to data tfrecords')
tf.app.flags.DEFINE_string('net_cfg', './cfgs/w30_s4.cfg', 'config file of network')
tf.app.flags.DEFINE_bool('eval_only', False, 'Eval mode')
tf.app.flags.DEFINE_bool('resume_training', False, 'resume training')
def main(unused_argv):
"""Main entry.
Args:
* unused_argv: unused arguments (after FLAGS is parsed)
"""
tf.logging.set_verbosity(tf.logging.INFO)
if FLAGS.enbl_multi_gpu:
mgw.init()
trainer = Trainer(data_path=FLAGS.data_path, netcfg=FLAGS.net_cfg)
trainer.build_graph(is_train=True)
trainer.build_graph(is_train=False)
if FLAGS.eval_only:
trainer.eval()
else:
trainer.train()
if __name__ == '__main__':
tf.app.run()
|
407771
|
import aiohttp_jinja2
from aiohttp import web
from aiohttp_security import remember, forget, authorized_userid
from aiohttpdemo_blog import db
from aiohttpdemo_blog.forms import validate_login_form
def redirect(router, route_name):
location = router[route_name].url_for()
return web.HTTPFound(location)
@aiohttp_jinja2.template('index.html')
async def index(request):
username = await authorized_userid(request)
if not username:
raise redirect(request.app.router, 'login')
async with request.app['db_pool'].acquire() as conn:
current_user = await db.get_user_by_name(conn, username)
posts = await db.get_posts_with_joined_users(conn)
return {'user': current_user, 'posts': posts}
@aiohttp_jinja2.template('login.html')
async def login(request):
username = await authorized_userid(request)
if username:
raise redirect(request.app.router, 'index')
if request.method == 'POST':
form = await request.post()
async with request.app['db_pool'].acquire() as conn:
error = await validate_login_form(conn, form)
if error:
return {'error': error}
else:
response = redirect(request.app.router, 'index')
user = await db.get_user_by_name(conn, form['username'])
await remember(request, response, user['username'])
raise response
return {}
async def logout(request):
response = redirect(request.app.router, 'login')
await forget(request, response)
return response
@aiohttp_jinja2.template('create_post.html')
async def create_post(request):
username = await authorized_userid(request)
if not username:
raise redirect(request.app.router, 'login')
if request.method == 'POST':
form = await request.post()
async with request.app['db_pool'].acquire() as conn:
current_user = await db.get_user_by_name(conn, username)
await db.create_post(conn, form['body'], current_user['id'])
raise redirect(request.app.router, 'index')
return {}
|
407802
|
import cv2
MODEL_PATH = '/home/ubuntu/models/'
models = (
cv2.CascadeClassifier(MODEL_PATH + 'haarcascade_frontalface_default.xml'),
cv2.CascadeClassifier(MODEL_PATH + 'haarcascade_profileface.xml'),
)
|
407833
|
import pytest
from indy import did
from plenum.common.exceptions import RequestRejectedException
from plenum.common.constants import TRUSTEE_STRING
from plenum.test.pool_transactions.helper import sdk_add_new_nym
from plenum.test.helper import sdk_get_and_check_replies, sdk_sign_and_submit_op
def test_new_DID_cannot_update_another_DID(looper,
sdk_pool_handle,
sdk_wallet_trustee,
sdk_wallet_handle):
"""Create trustee"""
trustee_did, trustee_verkey = looper.loop.run_until_complete(
did.create_and_store_my_did(sdk_wallet_trustee[0], "{}"))
"""Add trustee to ledger"""
sdk_add_new_nym(looper, sdk_pool_handle,
sdk_wallet_trustee, 'newTrustee', TRUSTEE_STRING, verkey=trustee_verkey, dest=trustee_did)
"""new DID (no role)"""
new_no_role_did, new_no_role_verkey = looper.loop.run_until_complete(
did.create_and_store_my_did(sdk_wallet_trustee[0], "{}"))
"""Adding new DID (no role) to ledger"""
sdk_add_new_nym(looper, sdk_pool_handle,
sdk_wallet_trustee,
'noRole',
verkey=new_no_role_verkey, dest=new_no_role_did)
"""Nym transaction to update Trustee DID that makes no change to verkey or role"""
op = {'type': '1',
'dest': trustee_did
}
"""Submitting the transaction fails"""
with pytest.raises(RequestRejectedException):
req = sdk_sign_and_submit_op(looper, sdk_pool_handle, sdk_wallet_trustee, op)
sdk_get_and_check_replies(looper, [req])
|
407836
|
import os
import sys
import numpy as np
import matplotlib.image as mpimg
from ..core.data import Data
from ..util import tryremove
class SintelData(Data):
SINTEL_URL = 'http://files.is.tue.mpg.de/sintel/MPI-Sintel-complete.zip'
dirs = ['sintel']
def __init__(self, data_dir, stat_log_dir=None,
development=True, fast_dir=None):
super().__init__(data_dir, stat_log_dir,
development=development,
fast_dir=fast_dir)
def _fetch_if_missing(self):
local_path = os.path.join(self.data_dir, 'sintel')
if not os.path.isdir(local_path):
self._download_and_extract(self.SINTEL_URL, local_path)
def get_raw_dirs(self):
dirs = []
for folder in ['training/clean', 'training/final', 'test/clean', 'test/final']:
top_dir = os.path.join(self.current_dir, 'sintel/' + folder)
for sub_dir in os.listdir(top_dir):
dirs.append(os.path.join(top_dir, sub_dir))
return dirs
|
407848
|
import pandas as pd
import os, glob
import seaborn as sns
import matplotlib.pyplot as plt
from datetime import datetime
import matplotlib
import numpy as np
from configparser import ConfigParser, MissingSectionHeaderError, NoOptionError, NoSectionError
from simba.drop_bp_cords import *
from simba.rw_dfs import *
matplotlib.use('Agg')
def pup_retrieval_1(cofigini, prob_pup, prob_mother, dist_start_crit, carry_frames_seconds, smooth_factor, corenest_name, nest_name, dam_name, pup_name, smooth_function, max_time, carry_classifier_name, approach_classifier_name, dig_classifier_name):
# cofigini = r"Z:\DeepLabCut\DLC_extract\Troubleshooting\Pup_retrieval_1\project_folder\project_config.ini"
# prob_pup = 0.025
# prob_mother = 0.5
# dist_start_crit = 80
# carry_frames_seconds = 3
# smooth_factor = 5
# corenest_name = 'corenest'
# nest_name = 'nest'
# dam_name = '1_mother'
# pup_name = '2_pup'
# smooth_function = 'gaussian'
# max_time = 90
# carry_classifier_name = 'carry'
# approach_classifier_name = 'approach'
# dig_classifier_name = 'doggy'
config = ConfigParser()
configFile = str(cofigini)
try:
config.read(configFile)
except MissingSectionHeaderError:
print('ERROR: Not a valid project_config file. Please check the project_config.ini path.')
project_path = config.get('General settings', 'project_path')
no_classifiers = config.getint('SML settings', 'no_targets')
animals_no = config.getint('General settings', 'animal_no')
classifier_list = [carry_classifier_name, approach_classifier_name, dig_classifier_name]
for i in range(len(classifier_list)):
if classifier_list[i].lower() == 'none': classifier_list[i] = 'none'
for clf in range(no_classifiers):
classifier_list.append(config.get('SML settings', 'target_name_' + str(clf+1)))
dist_pup_cornest_col = corenest_name + '_ ' + pup_name + '_distance'
dist_mother_cornest_col = corenest_name + '_' + dam_name + '_distance'
pup_in_corenest_col = corenest_name + '_ ' + pup_name + '_in_zone'
pup_in_nest_col = nest_name + '_ ' + pup_name + '_in_zone'
features_extracted_path = os.path.join(project_path, 'csv', 'features_extracted')
machine_results_path = os.path.join(project_path, 'csv', 'machine_results')
logs_dir_path = os.path.join(project_path, 'logs')
features_files = glob.glob(features_extracted_path + '/*.csv')
video_info_df = pd.read_csv(os.path.join(logs_dir_path, 'video_info.csv'))
video_info_df["Video"] = video_info_df["Video"].astype(str)
date_time_string = datetime.now().strftime('%Y%m%d%H%M%S')
Xcols, Ycols, Pcols = getBpNames(cofigini)
multiAnimalIDList = config.get('Multi animal IDs', 'id_list')
if not multiAnimalIDList:
multiAnimalIDList = []
for animal in range(animals_no):
multiAnimalIDList.append('Animal_' + str(animal + 1))
multiAnimalStatus = False
print('Applying settings for classical tracking...')
else:
multiAnimalIDList = multiAnimalIDList.split(",")
multiAnimalStatus = True
print('Applying settings for multi-animal tracking...')
animalBpDict = create_body_part_dictionary(multiAnimalStatus, multiAnimalIDList, animals_no, Xcols, Ycols, [], [])
body_parts_mother_x = animalBpDict[dam_name]['X_bps']
body_parts_pup_x = animalBpDict[pup_name]['X_bps']
body_parts_mother = [sub.replace('_x', '_p') for sub in body_parts_mother_x]
body_parts_pup = [sub.replace('_x', '_p') for sub in body_parts_pup_x]
colnames = ["Video", "Pup in nest (Frame)", "Pup in nest (s)", "Min_distance (pup to corenest)", "Reason (pup in nest)", "Pup in core-nest (frame)", "Pup in core-nest (s)", "Reason (pup in core-nest)"]
for classifier in classifier_list:
col_name_1, col_name_2 = classifier + ' (total time)', classifier + ' (before retrieval)'
col_name_3, col_name_4 = classifier + ' (latency to first event)', classifier + ' (number of events)'
col_name_5, col_name_6 = classifier + ' (Mean duration)', classifier + ' (mean interval)'
colnames.extend((col_name_1, col_name_2, col_name_3, col_name_4, col_name_5, col_name_6))
if (approach_classifier_name != 'none') and (dig_classifier_name != 'none'):
colnames.extend(("Dig_time_after_approach_before_retrieval"," Dig_events_after_approach", "Mean_duration_dig_after_approach"))
out_df = pd.DataFrame(columns=colnames)
def generate_figure(dataframe, ycolname, xlabel, ylabel, title, date_time_string, hue, video_base_name):
current_figure = sns.scatterplot(x=dataframe.index, y=dataframe[ycolname], hue=dataframe[hue], legend=False)
current_figure.set(xlabel=xlabel, ylabel=ylabel, title=title)
save_plot_name = title + '_' + video_base_name + '_' + date_time_string + '.png'
save_plot_path = os.path.join(logs_dir_path, video_base_name)
if not os.path.exists(save_plot_path): os.makedirs(save_plot_path)
image_save_path = os.path.join(save_plot_path, save_plot_name)
current_figure.figure.savefig(image_save_path, bbox_inches="tight")
current_figure.clear()
for counter, video_file in enumerate(features_files):
video_base_name = os.path.basename(video_file)
video_base_name_no_file_ending = video_base_name.replace('.csv', '')
print('Processing video ' + video_base_name_no_file_ending + '. Video ' + str(counter+1) + ' / ' + str(len(features_files)))
curr_feature_df = pd.read_csv(video_file, index_col=0)
curr_machine_results_df = pd.read_csv(os.path.join(machine_results_path, video_base_name), usecols=classifier_list)
curr_df = pd.concat([curr_feature_df,curr_machine_results_df], axis=1)
del curr_feature_df; del curr_machine_results_df
curr_df = curr_df.fillna(method='ffill')
video_fps = video_info_df['fps'][video_info_df['Video'] == video_base_name.replace('.csv', '')].values[0]
if max_time != 'none':
max_frames = int(video_fps * max_time)
if max_frames < len(curr_df):
curr_df = curr_df.head(max_frames)
for col_name in [dist_pup_cornest_col, dist_mother_cornest_col, pup_in_corenest_col, pup_in_nest_col]:
if col_name not in curr_df:
print('SimBA could not find a column named ' + str(col_name) + '. Make sure you have entered the correct ROI/pup/dam names')
carry_frames = int(video_fps * carry_frames_seconds)
curr_df['mean_p_mother'] = curr_df[body_parts_mother].mean(axis=1)
curr_df['mean_p_pup'] = curr_df[body_parts_pup].mean(axis=1)
curr_df['cumsum_nest_pup'] = curr_df[pup_in_nest_col].cumsum()
generate_figure(curr_df, dist_mother_cornest_col, 'frame number', 'distance (mm)', 'distance between mother and corenest - before pre-processing', date_time_string, 'cumsum_nest_pup', video_base_name_no_file_ending)
generate_figure(curr_df, dist_pup_cornest_col, 'frame number', 'distance (mm)', 'distance between pup and corenest - - before pre-processing', date_time_string, 'cumsum_nest_pup', video_base_name_no_file_ending)
for classifier in classifier_list:
curr_df.loc[curr_df['mean_p_mother'] < prob_mother, classifier] = 0
first_row = curr_df[curr_df[dist_pup_cornest_col] > dist_start_crit].index[0]
curr_df.loc[0:first_row, dist_pup_cornest_col] = curr_df.loc[first_row, dist_pup_cornest_col]
curr_df.loc[0:first_row, pup_in_corenest_col] = 0
curr_df.loc[0:first_row, pup_in_nest_col] = 0
if 1 in curr_df[pup_in_nest_col].values:
for nest_frame in curr_df[curr_df[pup_in_nest_col] == 1].index.tolist():
sliced_df = curr_df[carry_classifier_name].loc[nest_frame - carry_frames+1:nest_frame].tolist()
if sum(sliced_df) == 0:
curr_df.at[nest_frame, pup_in_nest_col] = 0
else:
break
rows_with_low_mean_pup_prob = curr_df[curr_df['mean_p_pup'] < prob_pup].index.tolist()
curr_df.loc[rows_with_low_mean_pup_prob, pup_in_corenest_col] = 0
curr_df.loc[rows_with_low_mean_pup_prob, pup_in_nest_col] = 0
if smooth_function == 'gaussian':
curr_df[dist_pup_cornest_col] = curr_df[dist_pup_cornest_col].rolling(window=video_fps, win_type='gaussian', center=True).mean(std=smooth_factor).fillna(curr_df[dist_pup_cornest_col])
curr_df[dist_mother_cornest_col] = curr_df[dist_mother_cornest_col].rolling(window=video_fps, win_type='gaussian',center=True).mean(std=smooth_factor).fillna(curr_df[dist_mother_cornest_col])
generate_figure(curr_df, dist_pup_cornest_col, 'frame number','distance (mm)', 'smoothened_distance_between_pup_and_core_nestt', date_time_string, dist_pup_cornest_col, video_base_name_no_file_ending)
generate_figure(curr_df, dist_mother_cornest_col, 'frame number', 'distance (mm)', 'smoothened_distance_between_mother_and_core_nest', date_time_string, dist_mother_cornest_col, video_base_name_no_file_ending)
closest_dist_between_pup_and_zone = round(curr_df[dist_pup_cornest_col].min(), 3)
if 1 in curr_df[pup_in_nest_col].values:
frame_when_pup_is_in_zone = curr_df[curr_df[pup_in_nest_col] == 1].index.min()
time_seconds_until_zone = round(frame_when_pup_is_in_zone / video_fps, 3)
reason_zone = "Pup in nest"
else:
frame_when_pup_is_in_zone = len(curr_df)
time_seconds_until_zone = round(frame_when_pup_is_in_zone / video_fps, 3)
reason_zone = "Pup not retrieved"
if 1 in curr_df[pup_in_corenest_col].values:
frame_when_pup_is_in_core_nest = curr_df[curr_df[pup_in_corenest_col] == 1].index.min()
time_seconds_until_corenest = round(frame_when_pup_is_in_core_nest / video_fps, 3)
reason_corenest = "Pup in core-nest"
else:
frame_when_pup_is_in_core_nest = len(curr_df)
time_seconds_until_corenest = round(frame_when_pup_is_in_core_nest / video_fps, 3)
reason_corenest = "Pup not in core-nest"
latency_list, total_time_list, before_retrieval_list = [], [], []
for classifier in classifier_list:
total_time_list.append(round(curr_df[classifier].sum() / video_fps, 3))
before_retrieval_list.append(round(curr_df.loc[0: frame_when_pup_is_in_zone, classifier].sum() / video_fps, 3))
latency_list.append(round(curr_df[curr_df[classifier] == 1].index.min() / video_fps, 3))
event_time_between_list, event_counter_list = [], []
for classifier in classifier_list:
groupDf = pd.DataFrame()
v = (curr_df[classifier] != curr_df[classifier].shift()).cumsum()
u = curr_df.groupby(v)[classifier].agg(['all', 'count'])
m = u['all'] & u['count'].ge(1)
groupDf['groups'] = curr_df.groupby(v).apply(lambda x: (x.index[0], x.index[-1]))[m]
event_counter_list.append(len(groupDf))
if len(groupDf) > 1:
between_list = []
iterator = groupDf.iterrows()
for (i, row1), (j, row2) in zip(iterator, iterator):
between_list.append(round((row2[0][0] - row1[0][1]) / video_fps, 3))
else:
between_list = [np.nan]
event_time_between_list.append(round(sum(between_list) / len(between_list), 3))
before_enter_corenest_df = curr_df.loc[0: frame_when_pup_is_in_core_nest-1]
mean_length_list = []
for classifier in classifier_list:
groupDf = pd.DataFrame()
v = (before_enter_corenest_df[classifier] != before_enter_corenest_df[classifier].shift()).cumsum()
u = before_enter_corenest_df.groupby(v)[classifier].agg(['all', 'count'])
m = u['all'] & u['count'].ge(1)
groupDf['groups'] = before_enter_corenest_df.groupby(v).apply(lambda x: (x.index[0], x.index[-1]))[m]
bout_len_list = []
for index, row in groupDf.iterrows():
bout_len_list.append(round((row[0][1] - row[0][0]) / video_fps , 3))
try:
mean_length_list.append(round(sum(bout_len_list) / len(bout_len_list), 3))
except ZeroDivisionError:
mean_length_list.append(0)
curr_df['cumsum_nest_pup'] = curr_df[pup_in_nest_col].expanding(1).sum()
print(curr_df['cumsum_nest_pup'])
retr_time_frame = curr_df[curr_df[pup_in_nest_col] == 1].index.min()
if not retr_time_frame: retr_time_frame = len(curr_df)
retr_time_s = round(retr_time_frame / video_fps, 3)
# Dig time after first approach until retrieval
if (approach_classifier_name != 'none') and (dig_classifier_name != 'none'):
first_approach = curr_df[curr_df[approach_classifier_name] == 1].index.min()
approach_to_retrieval_df = curr_df.loc[first_approach:retr_time_frame].reset_index(drop=True)
dig_time_after_approach_to_ret = round(approach_to_retrieval_df[dig_classifier_name].sum() / video_fps, 3)
groupDf = pd.DataFrame()
v = (approach_to_retrieval_df[dig_classifier_name] != approach_to_retrieval_df[dig_classifier_name].shift()).cumsum()
u = approach_to_retrieval_df.groupby(v)[dig_classifier_name].agg(['all', 'count'])
m = u['all'] & u['count'].ge(1)
try:
groupDf['groups'] = approach_to_retrieval_df.groupby(v).apply(lambda x: (x.index[0], x.index[-1]))[m]
dig_events_after_approach = len(groupDf)
if dig_events_after_approach > 0:
duration_list = []
for index, row in groupDf.iterrows():
duration_list.append(round((row[0][1] - row[0][0]) / video_fps, 3))
mean_duration_dig_after_approach = (sum(duration_list) / len(duration_list))
else:
mean_duration_dig_after_approach = np.nan
except KeyError:
mean_duration_dig_after_approach = np.nan
generate_figure(curr_df, dist_pup_cornest_col, 'frame number', 'distance (mm)', 'cumulative time of pup in nest', date_time_string, 'cumsum_nest_pup', video_base_name_no_file_ending)
generate_figure(curr_df, dist_mother_cornest_col, 'frame number', 'distance (mm)','distance corenest to mother', date_time_string, 'cumsum_nest_pup', video_base_name_no_file_ending)
out_list = [video_base_name_no_file_ending, frame_when_pup_is_in_zone, time_seconds_until_zone, closest_dist_between_pup_and_zone, reason_zone, frame_when_pup_is_in_core_nest, time_seconds_until_corenest, reason_corenest]
for clf in range(len(classifier_list)):
total_time, before_ret, latency = total_time_list[clf], before_retrieval_list[clf], latency_list[clf]
events, mean_dur, mean_interval = event_counter_list[clf], mean_length_list[clf], event_time_between_list[clf]
out_list.extend((total_time, before_ret, latency, events, mean_dur, mean_interval))
if (approach_classifier_name != 'none') and (dig_classifier_name != 'none'):
out_list.extend((dig_time_after_approach_to_ret, dig_events_after_approach, mean_duration_dig_after_approach))
out_df.loc[len(out_df)] = out_list
pup_ret_file_path = os.path.join(logs_dir_path, 'Pup_retrieval_' + date_time_string + '.csv')
out_df.to_csv(pup_ret_file_path)
log_file_index = ['Date_time', 'Videos_#', 'Pub probability', 'Dam probability', 'Start distance criterion', 'Carry frames seconds', 'Smooth factor', 'Core-nest name', \
'Nest name', 'Dam name', 'Pup name', 'Smooth function', 'Carry classifier name', 'Approach classifier name', 'Dig classifiername']
log_list = [date_time_string, str(len(features_files)), prob_pup, prob_mother, dist_start_crit, carry_frames_seconds, smooth_factor, corenest_name, nest_name, dam_name, pup_name, smooth_function, carry_classifier_name, approach_classifier_name, dig_classifier_name]
log_df = pd.DataFrame(log_list, index=log_file_index, columns=['Session values'])
log_name = 'Log_pup_retrieval_' + str(date_time_string) + '.csv'
log_save_path = os.path.join(logs_dir_path, log_name)
log_df.to_csv(log_save_path)
figure_df = out_df.copy()
figure_df['Experiment'] = 1
swarm_plot = sns.boxplot(x="Experiment", y="Pup in nest (s)", data=figure_df)
swarm_plot = sns.swarmplot(x="Experiment", y="Pup in nest (s)", data=figure_df, color="white")
swarm_plot.set(xlabel='', ylabel="Pup in nest (s)", title='Summary - pup retrieval time (s)')
swarm_plot_name = 'Summary_pup_retrieval_times' + '_' + date_time_string + '.png'
save_plot_path = os.path.join(logs_dir_path, swarm_plot_name)
swarm_plot.figure.savefig(save_plot_path, bbox_inches="tight")
swarm_plot.clear()
print('All videos analysed, summary file saved @ ' + str(pup_ret_file_path))
print('Summary plot saved @ ' + str(save_plot_path))
print('Summary log file saved @ ' + str(log_save_path))
|
407854
|
from unittest import TestCase
from unittest.mock import MagicMock
from signalflow_algorithms.algorithms.graph import Graph, Branch, Node
class TestGraph(TestCase):
def test_graph_copy(self):
# Create graph
graph = Graph()
node_1 = Node(graph)
node_2 = Node(graph)
node_3 = Node(graph)
node_4 = Node(graph)
Branch(node_1, node_2)
Branch(node_2, node_2)
Branch(node_2, node_3)
Branch(node_3, node_1)
Branch(node_1, node_4)
# Make copy
graph_copy = graph.copy()
# Assert
nodes_original = list(graph.nodes)
nodes_original.sort(key=lambda n: n.id)
nodes_copy = list(graph_copy.nodes)
nodes_copy.sort(key=lambda n: n.id)
branches_original = list(graph.branches)
branches_original.sort(key=lambda n: n.id)
branches_copy = list(graph_copy.branches)
branches_copy.sort(key=lambda n: n.id)
for node_original, node_copy in zip(nodes_original, nodes_copy):
self.assertTrue(self.__node_equals(node_original, node_copy))
for branch_original, branch_copy in zip(branches_original,
branches_copy):
self.assertTrue(self.__branch_equals(branch_original, branch_copy))
def test_subgraph(self):
graph = Graph()
node = Node(graph)
subgraph = graph.subgraph({node})
self.assertEqual(1, len(subgraph.nodes))
self.assertEqual(0, len(graph.nodes))
self.assertEqual(node, list(subgraph.nodes)[0])
self.assertEqual(node.graph, subgraph)
graph = Graph()
node_1 = Node(graph)
node_2 = Node(graph)
node_3 = Node(graph)
branch_1 = Branch(node_1, node_2, "1")
branch_2 = Branch(node_2, node_1, "2")
Branch(node_2, node_3, "3")
Branch(node_3, node_1, "4")
branch_5 = Branch(node_3, node_3, "5")
branch_6 = Branch(node_2, node_2, "6")
subgraph = graph.subgraph([node_1, node_2])
self.assertCountEqual([node_1, node_2], subgraph.nodes)
self.assertCountEqual([node_3], graph.nodes)
self.assertCountEqual([branch_1, branch_2, branch_6],
subgraph.branches)
self.assertCountEqual([branch_5], graph.branches)
self.assertEqual(node_1.graph, subgraph)
self.assertEqual(node_2.graph, subgraph)
self.assertEqual(node_3.graph, graph)
self.assertEqual(branch_1.graph, subgraph)
self.assertEqual(branch_2.graph, subgraph)
self.assertEqual(branch_5.graph, graph)
self.assertEqual(branch_6.graph, subgraph)
def test_add_remove_nodes(self):
graph = Graph()
node1 = MagicMock(Node)
node2 = MagicMock(Node)
node3 = MagicMock(Node)
node1.graph = graph
node2.graph = graph
node3.graph = graph
graph.add_node(node1)
self.assertSetEqual({node1}, graph.nodes)
graph.add_node(node2)
graph.add_node(node3)
def add_node_again():
graph.add_node(node1)
self.assertRaises(ValueError, add_node_again)
self.assertSetEqual({node1, node2, node3}, graph.nodes)
node4 = MagicMock(Node)
node4.graph = MagicMock(Graph)
def add_node4():
graph.add_node(node4)
self.assertRaises(ValueError, add_node4)
self.assertSetEqual({node1, node2, node3}, graph.nodes)
node4.graph = graph
node4.ingoing = [1, 2, 3]
self.assertRaises(ValueError, add_node4)
self.assertSetEqual({node1, node2, node3}, graph.nodes)
graph.remove_node(node1)
self.assertSetEqual({node2, node3}, graph.nodes)
graph.remove_node(node2)
graph.remove_node(node3)
self.assertSetEqual(set(), graph.nodes)
def test_add_remove_branches(self):
graph = Graph()
branch1 = MagicMock(Branch)
branch2 = MagicMock(Branch)
branch3 = MagicMock(Branch)
branch1.start = MagicMock(Node)
branch1.end = MagicMock(Node)
branch1.graph = graph
branch2.start = MagicMock(Node)
branch2.end = MagicMock(Node)
branch2.graph = graph
branch3.start = MagicMock(Node)
branch3.end = MagicMock(Node)
branch3.graph = graph
graph.add_branch(branch1)
self.assertSetEqual({branch1}, graph.branches)
graph.add_branch(branch2)
graph.add_branch(branch3)
self.assertSetEqual({branch1, branch2, branch3}, graph.branches)
def add_branch_again():
graph.add_branch(branch1)
self.assertRaises(ValueError, add_branch_again)
branch4 = MagicMock(Branch)
branch4.start = MagicMock(Node)
branch4.end = MagicMock(Node)
branch4.graph = None
def add_branch4():
graph.add_branch(branch4)
branch4.end = None
self.assertRaises(ValueError, add_branch4)
self.assertSetEqual({branch1, branch2, branch3}, graph.branches)
branch1.start = None
branch1.end = None
branch2.start = None
branch2.end = None
branch3.start = None
branch3.end = None
graph.remove_branch(branch1)
self.assertSetEqual({branch2, branch3}, graph.branches)
graph.remove_branch(branch2)
graph.remove_branch(branch3)
self.assertSetEqual(set(), graph.branches)
def test_to_dict(self):
return
def __node_equals(self, node_1: Node, node_2: Node):
# Check attributes
if node_1.id != node_2.id:
return False
# Check ingoing branches
not_matched = node_2.ingoing
for branch_1 in node_1.ingoing:
for branch_2 in not_matched.copy():
if self.__branch_equals(branch_1, branch_2):
not_matched.remove(branch_2)
if len(not_matched) != 0:
return False
# Check outgoing branches
not_matched = node_2.outgoing
for branch_1 in node_1.outgoing:
for branch_2 in not_matched.copy():
if self.__branch_equals(branch_1, branch_2):
not_matched.remove(branch_2)
if len(not_matched) != 0:
return False
return True
def __branch_equals(self, branch_1: Branch, branch_2: Branch):
# Check ids of branches and ids of start and end nodes
if branch_1.id != branch_2.id or \
branch_1.weight != branch_2.weight or \
branch_1.start.id != branch_2.start.id or \
branch_1.end.id != branch_2.end.id:
return False
return True
class TestBranch(TestCase):
def test_properties(self):
node1 = MagicMock(Node)
node2 = MagicMock(Node)
graph = MagicMock(Graph)
node1.graph = graph
node2.graph = graph
branch = Branch(node1, node2, "1234")
self.assertEqual(node1, branch.start)
self.assertEqual(node2, branch.end)
self.assertEqual("1234", branch.weight)
self.assertEqual(graph, branch.graph)
branch.weight = "1111"
self.assertEqual("1111", branch.weight)
def set_graph(graph: Graph):
branch.graph = graph
graph2 = MagicMock(Graph)
self.assertRaises(ValueError, set_graph, graph2)
branch.graph = None
self.assertEqual(None, branch.graph)
branch.graph = graph2
self.assertEqual(graph2, branch.graph)
def test_add_remove(self):
node1 = MagicMock(Node)
node2 = MagicMock(Node)
graph = MagicMock(Graph)
node1.graph = graph
node2.graph = graph
branch = Branch(node1, node2, "1234")
graph.add_branch.assert_called_once_with(branch)
node1.add_outgoing_branch.assert_called_once_with(branch)
node2.add_ingoing_branch.assert_called_once_with(branch)
self.assertEqual(graph, branch.graph)
branch.remove()
self.assertIsNone(branch.graph)
node1.remove_outgoing_branch.assert_called_once_with(branch)
node2.remove_ingoing_branch.assert_called_once_with(branch)
def test_connect(self):
node1 = MagicMock(Node)
node2 = MagicMock(Node)
graph = MagicMock(Graph)
node1.graph = graph
node2.graph = graph
branch = Branch(node1, node2, "1234")
branch.remove()
node3 = MagicMock(Node)
node4 = MagicMock(Node)
node3.graph = graph
node4.graph = graph
branch.reconnect(node3, node4)
self.assertEqual(graph, branch.graph)
node3.add_outgoing_branch.assert_called_once_with(branch)
node4.add_ingoing_branch.assert_called_once_with(branch)
def test_nodes_from_different_graphs(self):
node1 = MagicMock(Node)
node2 = MagicMock(Node)
graph = MagicMock(Graph)
graph2 = MagicMock(Graph)
node1.graph = graph
node2.graph = graph2
def create_branch():
Branch(node1, node2, "1234")
self.assertRaises(ValueError, create_branch)
def test_to_dict(self):
node1 = MagicMock(Node)
node2 = MagicMock(Node)
graph = MagicMock(Graph)
node1.graph = graph
node2.graph = graph
hex1 = MagicMock()
hex1.hex = '1234'
node1.id = hex1
hex2 = MagicMock()
hex2.hex = '5678'
node2.id = hex2
branch = Branch(node1, node2, "1234")
dict = {'id': branch.id.hex,
'weight': '1234',
'start': '1234',
'end': '5678',
}
self.assertEqual(dict, branch.to_dict())
class TestNode(TestCase):
def test_properties(self):
graph = MagicMock(Graph)
node = Node(graph)
self.assertEqual(graph, node.graph)
b1 = MagicMock(Branch)
b1.graph = graph
b1.end = node
node.add_ingoing_branch(b1)
self.assertIn(b1, node.ingoing)
b2 = MagicMock(Branch)
b2.graph = graph
b2.start = node
node.add_outgoing_branch(b2)
self.assertIn(b2, node.outgoing)
node.remove_ingoing_branch(b1)
self.assertEqual(set(), node.ingoing)
node.remove_outgoing_branch(b2)
self.assertEqual(set(), node.outgoing)
def test_to_dict(self):
graph = MagicMock(Graph)
node = Node(graph)
dict = {
'id': node.id.hex
}
self.assertEqual(dict, node.to_dict())
def test_set_graph(self):
graph = MagicMock(Graph)
node = Node(graph)
self.assertEqual(graph, node.graph)
graph2 = MagicMock(Graph)
def set_graph():
node.graph = graph2
self.assertRaises(ValueError, set_graph)
node.graph = None
self.assertIsNone(node.graph)
node.graph = graph2
self.assertEqual(graph2, node.graph)
def test_add_invalid_branches(self):
graph = MagicMock(Graph)
graph2 = MagicMock(Graph)
node = Node(graph)
self.assertEqual(graph, node.graph)
b1 = MagicMock(Branch)
b1.graph = graph2
b1.end = node
def add_ingoing():
node.add_ingoing_branch(b1)
self.assertRaises(ValueError, add_ingoing)
b2 = MagicMock(Branch)
b2.graph = graph2
b2.start = node
def add_outgoing():
node.add_outgoing_branch(b2)
self.assertRaises(ValueError, add_outgoing)
b1.graph = graph
b1.start = MagicMock(Node)
b1.end = MagicMock(Node)
b2.graph = graph
b2.start = MagicMock(Node)
b2.end = MagicMock(Node)
self.assertRaises(ValueError, add_ingoing)
self.assertRaises(ValueError, add_outgoing)
node.graph = None
b1.start = node
b1.end = node
b2.start = node
b2.end = node
self.assertRaises(ValueError, add_ingoing)
self.assertRaises(ValueError, add_outgoing)
node.graph = graph
add_ingoing()
add_outgoing()
self.assertRaises(ValueError, add_ingoing)
self.assertRaises(ValueError, add_outgoing)
|
407881
|
import numpy as np
import pytest
from scipy.sparse.linalg import lsqr
import krylov
from .helpers import assert_consistent
from .linear_problems import (
complex_shape,
complex_unsymmetric,
hermitian_indefinite,
hpd,
real_shape,
real_unsymmetric,
spd_dense,
spd_sparse,
symmetric_indefinite,
)
@pytest.mark.parametrize(
"A_b",
[
spd_dense((5,)),
spd_sparse((5,)),
spd_sparse((5, 1)),
# spd_sparse((5, 3)),
# # spd_rhs_0((5,)),
# # spd_rhs_0sol0(),
hpd(),
symmetric_indefinite(),
hermitian_indefinite(),
real_unsymmetric(),
complex_unsymmetric(),
real_shape(3, 2),
complex_shape(3, 2),
],
)
def test_cgls(A_b):
A, b = A_b
# compute reference solution
ref, *_ = lsqr(A, b)
callback_counter = 0
def callback(*_):
nonlocal callback_counter
callback_counter += 1
sol, info = krylov.cgls(A, b, tol=1.0e-7, callback=callback)
assert sol is not None
assert callback_counter == info.numsteps + 1
assert info.success
assert_consistent(A, b, info, sol, 1.0e-7)
assert np.all(np.abs(sol - ref) < 1.0e7 * np.abs(ref))
def test_nonzero_x0():
A, b = complex_unsymmetric()
x0 = np.arange(1, A.shape[1] + 1).astype(complex)
x0c = x0.copy()
sol, info = krylov.cgls(A, b, x0=x0, tol=1.0e-7, maxiter=10)
assert np.all(x0c == x0)
print("info:")
print(info)
assert info.success
assert_consistent(A, b, info, sol, 1.0e-7, x0=x0)
|
407935
|
from spark.config.domain import DomainType
class ConfigNormalizer:
def __init__(self, config_set):
self._config_set = config_set
self._param_list = config_set.get_params()
self._normalized_config = self.__init_normalized_config()
def __init_normalized_config(self):
normalized_config = []
for param in self._param_list:
domain = param.get_domain()
norm_values = ConfigNormalizer.normalize_domain(domain)
normalized_config.append(norm_values)
return normalized_config
def get_all_possible_normalized_configs(self):
return self._normalized_config
# TODO make this API clean
# Currently it has assumption that normalized_2D_array will have values
# from param in same order of self._param_list.
def denormalize_config_set(self, normalized_2D_array):
assert len(normalized_2D_array) == len(self._param_list)
res = list()
i = 0
for param in self._param_list:
res.append(ConfigNormalizer.denormalize_array(param, normalized_2D_array[i]))
i = i + 1
return res
def denormalize_config(self, normalized_config_array):
"""
:param normalized_config_array: Contains array of normalized values of each parameter specified in the same
order of param_list of the normalizer. For e.g., [0.1, 0.9] will be input for
param_list = ['spark.executor.cores', 'spark.executor.memory']
:return: Denormalized config array
"""
assert len(normalized_config_array) == len(self._param_list)
res = list()
i = 0
for param in self._param_list:
res.append(ConfigNormalizer.denormalize_value(param, normalized_config_array[i]))
i = i + 1
return res
def normalize_config(self, denormalized_config_array):
"""
:param denormalized_config_array: Contains array of values of each parameter specified in the same
order of param_list of the normalizer. For e.g., [10, 23899] will be input for
param_list = ['spark.executor.cores', 'spark.executor.memory']
:return: Normalized config.
"""
assert len(denormalized_config_array) == len(self._param_list)
res = list()
i = 0
for param in self._param_list:
res.append(ConfigNormalizer.normalize_value(param, denormalized_config_array[i]))
i = i + 1
return res
def get_params(self):
return self._param_list
@staticmethod
def norm_function(a, min_norm, max_norm):
if (max_norm == min_norm):
return (a - min_norm)
elif (max_norm <= a):
return 1
else:
return (1/float(max_norm - min_norm)) * (a - min_norm)
@staticmethod
def normalize_domain(domain):
norm_values = map(lambda a: ConfigNormalizer.norm_function(a, domain.get_min(), domain.get_max()),
domain.get_possible_values())
return norm_values
@staticmethod
def normalize_value(param, value):
domain = param.get_domain()
return ConfigNormalizer.norm_function(value, domain.get_min(), domain.get_max())
@staticmethod
def normalize(param, value_list):
domain = param.get_domain()
return list(map(lambda a: ConfigNormalizer.norm_function(a, domain.get_min(), domain.get_max()), value_list))
@staticmethod
def denorm_func(min_norm, max_norm, domain_type):
if domain_type == DomainType.INT:
return lambda a: round(a * (max_norm - min_norm)) + min_norm
else:
return lambda a: float(a * (max_norm - min_norm)) + min_norm
@staticmethod
def denormalize_array(param, value):
domain = param.get_domain()
denormlizer_func = \
ConfigNormalizer.denorm_func(domain.get_min(), domain.get_max(), domain.get_type())
return map(denormlizer_func, value)
@staticmethod
def denormalize_value(param, value):
domain = param.get_domain()
denormlizer_func = ConfigNormalizer.denorm_func(domain.get_min(), domain.get_max(), domain.get_type())
return denormlizer_func(value)
|
407953
|
from __future__ import absolute_import, division, print_function
import json
from base64 import b64decode
from flask import Blueprint, current_app, jsonify, request
import appr.api.impl.registry
from appr.api.app import getvalues, repo_name
from appr.exception import (
ApprException, ChannelNotFound, InvalidParams, InvalidRelease, InvalidUsage,
PackageAlreadyExists, PackageNotFound, PackageReleaseNotFound, UnableToLockResource,
UnauthorizedAccess, Unsupported)
from appr.models import DEFAULT_MEDIA_TYPE, Blob, Channel, Package
registry_app = Blueprint(
'registry',
__name__, )
@registry_app.errorhandler(Unsupported)
@registry_app.errorhandler(PackageAlreadyExists)
@registry_app.errorhandler(InvalidRelease)
@registry_app.errorhandler(UnableToLockResource)
@registry_app.errorhandler(UnauthorizedAccess)
@registry_app.errorhandler(PackageNotFound)
@registry_app.errorhandler(PackageReleaseNotFound)
@registry_app.errorhandler(ApprException)
@registry_app.errorhandler(InvalidUsage)
@registry_app.errorhandler(InvalidParams)
@registry_app.errorhandler(ChannelNotFound)
def render_error(error):
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
@registry_app.before_request
def pre_request_logging():
jsonbody = request.get_json(force=True, silent=True)
values = request.values.to_dict()
if jsonbody:
values.update(jsonbody)
current_app.logger.info("request", extra={
"remote_addr": request.remote_addr,
"http_method": request.method,
"original_url": request.url,
"path": request.path,
"data": values,
"headers": dict(request.headers.to_list())})
@registry_app.route("/test_error")
def test_error():
raise InvalidUsage("error message", {"path": request.path})
def _pull(data, json_format=True):
if json_format:
resp = jsonify(data)
else:
resp = current_app.make_response(b64decode(data['blob']))
resp.headers['Content-Disposition'] = data['filename']
resp.mimetype = 'application/x-gzip'
return resp
@registry_app.route(
"/api/v1/packages/<string:namespace>/<string:package_name>/blobs/sha256/<string:digest>",
methods=['GET'], strict_slashes=False)
def blobs(namespace, package_name, digest):
reponame = repo_name(namespace, package_name)
data = appr.api.impl.registry.pull_blob(reponame, digest, blob_class=Blob)
json_format = request.args.get('format', None) == 'json'
return _pull(data, json_format=json_format)
@registry_app.route(
"/api/v1/packages/<string:namespace>/<string:package_name>/blobs/sha256/<string:digest>/json",
methods=['GET'], strict_slashes=False)
def blobs_json(namespace, package_name, digest):
reponame = repo_name(namespace, package_name)
data = appr.api.impl.registry.pull_blob(reponame, digest, blob_class=Blob)
return _pull(data, json_format=True)
@registry_app.route(
"/api/v1/packages/<string:namespace>/<string:package_name>/<string:release>/<string:media_type>/pull",
methods=['GET'], strict_slashes=False)
def pull(namespace, package_name, release, media_type):
reponame = repo_name(namespace, package_name)
data = appr.api.impl.registry.pull(reponame, release, media_type, Package, blob_class=Blob)
json_format = request.args.get('format', None) == 'json'
return _pull(data, json_format=json_format)
@registry_app.route(
"/api/v1/packages/<string:namespace>/<string:package_name>/<string:release>/<string:media_type>/pull/json",
methods=['GET'], strict_slashes=False)
def pull_json(namespace, package_name, release, media_type):
reponame = repo_name(namespace, package_name)
data = appr.api.impl.registry.pull(reponame, release, media_type, Package, blob_class=Blob)
return _pull(data, json_format=True)
@registry_app.route("/api/v1/packages/<string:namespace>/<string:package_name>", methods=['POST'],
strict_slashes=False)
def push(namespace, package_name):
reponame = repo_name(namespace, package_name)
values = getvalues()
release = values['release']
media_type = values.get('media_type', DEFAULT_MEDIA_TYPE)
force = (values.get('force', 'false') == 'true')
metadata = values.get('metadata', None)
blob = Blob(reponame, values['blob'])
result = appr.api.impl.registry.push(reponame, release, media_type, blob, force, Package,
metadata=metadata)
return jsonify(result)
@registry_app.route(
"/api/v1/packages/<string:namespace>/<string:package_name>/<string:release>/<string:media_type>",
methods=['DELETE'], strict_slashes=False)
def delete_package(namespace, package_name, release, media_type):
reponame = repo_name(namespace, package_name)
result = appr.api.impl.registry.delete_package(reponame, release, media_type,
package_class=Package)
return jsonify(result)
@registry_app.route("/api/v1/packages", methods=['GET'], strict_slashes=False)
def list_packages():
values = getvalues()
namespace = values.get('namespace', None)
result = appr.api.impl.registry.list_packages(namespace, Package, search=values.get(
'query', None), media_type=values.get('media_type', None))
resp = current_app.make_response(json.dumps(result))
resp.mimetype = 'application/json'
return resp
@registry_app.route("/api/v1/packages/search", methods=['GET'], strict_slashes=False)
def search_packages():
values = getvalues()
query = values.get("q")
result = appr.api.impl.registry.search(query, Package)
return jsonify(result)
@registry_app.route(
"/api/v1/packages/<string:namespace>/<string:package_name>/<string:release>/<string:media_type>",
methods=['GET'], strict_slashes=False)
def show_package(namespace, package_name, release, media_type):
reponame = repo_name(namespace, package_name)
result = appr.api.impl.registry.show_package(reponame, release, media_type,
channel_class=Channel, package_class=Package)
return jsonify(result)
@registry_app.route("/api/v1/packages/<string:namespace>/<string:package_name>", methods=['GET'],
strict_slashes=False)
def show_package_releases(namespace, package_name):
reponame = repo_name(namespace, package_name)
media_type = getvalues().get('media_type', None)
result = appr.api.impl.registry.show_package_releases(reponame, media_type=media_type,
package_class=Package)
return jsonify(result)
@registry_app.route("/api/v1/packages/<string:namespace>/<string:package_name>/<string:release>",
methods=['GET'], strict_slashes=False)
def show_package_release_manifests(namespace, package_name, release):
reponame = repo_name(namespace, package_name)
result = appr.api.impl.registry.show_package_manifests(reponame, release,
package_class=Package)
return jsonify(result)
# CHANNELS
@registry_app.route("/api/v1/packages/<string:namespace>/<string:package_name>/channels", methods=[
'GET'], strict_slashes=False)
def list_channels(namespace, package_name):
reponame = repo_name(namespace, package_name)
result = appr.api.impl.registry.list_channels(reponame, Channel)
resp = current_app.make_response(json.dumps(result))
resp.mimetype = 'application/json'
return resp
@registry_app.route(
"/api/v1/packages/<string:namespace>/<string:package_name>/channels/<string:channel_name>",
methods=['GET'], strict_slashes=False)
def show_channel(namespace, package_name, channel_name):
reponame = repo_name(namespace, package_name)
result = appr.api.impl.registry.show_channel(reponame, channel_name, Channel)
return jsonify(result)
@registry_app.route(
"/api/v1/packages/<string:namespace>/<string:package_name>/channels/<string:channel_name>/<string:release>",
methods=['POST'], strict_slashes=False)
def add_channel_release(namespace, package_name, channel_name, release):
reponame = repo_name(namespace, package_name)
result = appr.api.impl.registry.add_channel_release(
reponame, channel_name, release, channel_class=Channel, package_class=Package)
return jsonify(result)
@registry_app.route(
"/api/v1/packages/<string:namespace>/<string:package_name>/channels/<string:channel_name>/<string:release>",
methods=['DELETE'], strict_slashes=False)
def delete_channel_release(namespace, package_name, channel_name, release):
reponame = repo_name(namespace, package_name)
result = appr.api.impl.registry.delete_channel_release(
reponame, channel_name, release, channel_class=Channel, package_class=Package)
return jsonify(result)
@registry_app.route(
"/api/v1/packages/<string:namespace>/<string:package_name>/channels/<string:channel_name>",
methods=['DELETE'], strict_slashes=False)
def delete_channel(namespace, package_name, channel_name):
reponame = repo_name(namespace, package_name)
result = appr.api.impl.registry.delete_channel(reponame, channel_name, channel_class=Channel)
return jsonify(result)
|
407983
|
from datetime import datetime, timedelta
from typing import List, Optional
import numpy as np
def datetime_range(
granularity: timedelta, # np.timedelta64(1, 'D') or python timedelta
start_time: datetime,
end_time: Optional[datetime] = None,
num_points: Optional[int] = None,
) -> List[datetime]:
"""Generates a range of datetimes with a given granularity.
You can either use `end_time` or `num_points` to set the end of the range.
Args:
granularity: Frequency of the data points. Eg: 1 day, 2 hours.
start_time: Start of the range.
end_time: End of the range.
num_points: Number of data points to generate.
Returns:
A list of datetimes.
"""
if end_time is None and num_points is None:
raise ValueError(
"Both `end_time` and `num_points` are not set. One in the two arguments is required."
)
if end_time is not None and num_points is not None:
raise ValueError(
"Both `end_time` or `num_points` are set. Please use only one."
)
if end_time is not None:
num_points = int((end_time - start_time) / granularity)
return [start_time + i * granularity for i in range(num_points)] # type: ignore
def delta_from_start(time_points: np.ndarray) -> np.ndarray:
"""Returns the timedeltas to the first element of the array."""
# todo move this
return time_points - np.full(len(time_points), time_points[0])
|
407985
|
from __future__ import division
import torch
from torch import nn
import numpy as np
def compute_errors_test(gt, pred):
gt = gt.numpy()
pred = pred.numpy()
thresh = np.maximum((gt / pred), (pred / gt))
a1 = (thresh < 1.25 ).mean()
a2 = (thresh < 1.25 ** 2).mean()
a3 = (thresh < 1.25 ** 3).mean()
rmse = (gt - pred) ** 2
rmse = np.sqrt(rmse.mean())
rmse_log = (np.log(gt) - np.log(pred)) ** 2
rmse_log = np.sqrt(rmse_log.mean())
abs_diff = np.mean(np.abs(gt - pred))
abs_rel = np.mean(np.abs(gt - pred) / gt)
sq_rel = np.mean(((gt - pred)**2) / gt)
return abs_rel, abs_diff, sq_rel, rmse, rmse_log, a1, a2, a3
def compute_errors_train(gt, pred, valid):
abs_diff, abs_rel, sq_rel, a1, a2, a3 = 0,0,0,0,0,0
batch_size = gt.size(0)
for current_gt, current_pred, current_valid in zip(gt, pred, valid):
valid_gt = current_gt[current_valid]
valid_pred = current_pred[current_valid]
if len(valid_gt) == 0:
continue
else:
thresh = torch.max((valid_gt / valid_pred), (valid_pred / valid_gt))
a1 += (thresh < 1.25).float().mean()
a2 += (thresh < 1.25 ** 2).float().mean()
a3 += (thresh < 1.25 ** 3).float().mean()
abs_diff += torch.mean(torch.abs(valid_gt - valid_pred))
abs_rel += torch.mean(torch.abs(valid_gt - valid_pred) / valid_gt)
sq_rel += torch.mean(((valid_gt - valid_pred)**2) / valid_gt)
return [metric / batch_size for metric in [abs_rel, abs_diff, sq_rel, a1, a2, a3]]
|
407986
|
import argparse
import os
EMPTY_RESPONSE = 'EMPTY_RESPONSE'
def print_dialogs(dialogs, history_size, sparse=True, print_next_post=False, delimeter='\\n'):
if sparse:
step = history_size + 1
else:
step = 1
if print_next_post:
window_sz = history_size + 1
else:
window_sz = history_size
for i in range(0, len(dialogs), step):
history = ' {} '.format(delimeter).join(dialogs[i:i + history_size])
if i + window_sz < len(dialogs):
response = dialogs[i + history_size]
if print_next_post:
next_post = dialogs[i + history_size + 1]
print("1 {}\t{}\t{}".format(history, response, next_post))
else:
print("1 {}\t{}".format(history, response))
else:
remaining_turns = history.split(' {} '.format(delimeter))
if not print_next_post:
if len(remaining_turns) > 1:
history = ' {} '.format(delimeter).join(remaining_turns[0:-1])
response = remaining_turns[-1]
print("1 {}\t{}".format(history, response))
else:
if len(remaining_turns) > 2:
history = ' {} '.format(delimeter).join(remaining_turns[0:-2])
response = remaining_turns[-2]
next_post = remaining_turns[-1]
print("1 {}\t{}\t{}".format(history, response, next_post))
def handle_session(session, history_size, sparse=True, print_next_post=False):
if not session:
return
dialogs = []
for idx, post, response in session:
dialogs.append(post)
dialogs.append(response)
dialogs = list(filter(lambda item: item != EMPTY_RESPONSE, dialogs))
print_dialogs(dialogs, history_size, sparse, print_next_post)
def build_single_turn_fb_dialog(file_name, history_size, sparse=True, print_next_post=False):
with open(file_name) as f:
line = f.readline()
session = []
prev_id = -1
while line:
line = line.strip()
item_arr = line.split('\t')
id_ = int(item_arr[0].split()[0])
post = ' '.join(item_arr[0].split()[1:]).lower()
if len(item_arr) > 1:
response = item_arr[1].lower()
else:
response = 'EMPTY_RESPONSE'
if id_ <= prev_id:
handle_session(session, history_size, sparse, print_next_post)
session = [(id_, post, response)]
else:
session.append((id_, post, response))
line = f.readline()
prev_id = id_
handle_session(session, history_size, sparse, print_next_post)
def main(fb_dialog_file, history_size, sparse=True, print_next_post=False):
assert os.path.isfile(fb_dialog_file)
build_single_turn_fb_dialog(fb_dialog_file, history_size, sparse, print_next_post)
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--fb_dialog_file', type=str, required=True)
parser.add_argument('--history_size', type=int, required=True)
parser.add_argument('--sparse', type=str2bool, default=True, required=True)
parser.add_argument('--print_next_post', type=str2bool, default=False, required=True)
opt = parser.parse_args()
fb_dialog_file = opt.fb_dialog_file
history_size = opt.history_size
sparse = opt.sparse
print_next_post = opt.print_next_post
main(fb_dialog_file, history_size, sparse, print_next_post)
|
408017
|
import sys, os
import socket
from distutils import sysconfig
from Package import Package
petsc_text = r'''
#include <stdlib.h>
#include <stdio.h>
#include <mpi.h>
#include <petsc.h>
int main(int argc, char* argv[]) {
PetscInitialize(&argc, &argv, PETSC_NULL, PETSC_NULL);
printf("MPI version %d.%d\n", MPI_VERSION, MPI_SUBVERSION);
printf("Petsc version %d.%d.%d\n", PETSC_VERSION_MAJOR,PETSC_VERSION_MINOR,PETSC_VERSION_SUBMINOR);
PetscFinalize();
return EXIT_SUCCESS;
}
'''
def parse_conf(ctx, conf_path, lib_dirs, libs):
vars = {}
sysconfig.parse_makefile(conf_path, vars)
flag_dict = ctx.env.ParseFlags(vars['PACKAGES_LIBS'])
lib_dirs.extend(flag_dict['LIBPATH'])
for ii in range(len(libs)):
libs[ii].extend(flag_dict['LIBS'])
def find_conf(ctx, base, inc_dirs, lib_dirs, libs, extra_libs):
# PETSc 3.1
conf_path = os.path.join(base, 'conf', 'petscvariables')
if os.path.exists(conf_path):
parse_conf(ctx, conf_path, lib_dirs, libs)
# PETSC 2.3.3
conf_path = os.path.join(base, 'bmake', 'petscconf')
if os.path.exists(conf_path):
vars = {}
sysconfig.parse_makefile(conf_path, vars)
if 'PETSC_ARCH' in vars:
arch = vars['PETSC_ARCH']
inc_dirs.extend([os.path.join(base, 'bmake', arch)])
lib_dirs.extend([os.path.join(base, 'lib', arch)])
conf_path = os.path.join(base, 'bmake', arch, 'petscconf')
parse_conf(ctx, conf_path, lib_dirs, libs)
class PETSc(Package):
def __init__(self, **kwargs):
defaults = {
#'download_url': 'http://ftp.mcs.anl.gov/pub/petsc/release-snapshots/petsc-lite-3.7.6.tar.gz',
'download_url': 'http://ftp.mcs.anl.gov/pub/petsc/release-snapshots/petsc-lite-3.12.3.tar.gz',
}
defaults.update(kwargs)
super(PETSc, self).__init__(**defaults)
self.sub_dirs = [('include','lib')]
self.libs = [['cmumps', 'HYPRE', 'sundials_cvode', 'petsc', 'scalapack', 'parmetis', 'dmumps', 'smumps', 'zmumps', 'mumps_common', 'cmumps', 'scalapack', 'petsc', 'pord', 'parmetis', 'petsc', 'dmumps', 'sundials_nvecparallel', 'sundials_nvecserial', 'petsc', 'sundials_cvode', 'smumps']]
# ['petsc', 'cmumps', 'dmumps', 'HYPRE', 'mumps_common', 'pord', 'scalapack', 'smumps', 'sundials_cvode', 'sundials_nvecparallel', 'sundials_nvecserial', 'zmumps', 'parmetis']]
self.headers = ['petsc.h']
self.check_text = petsc_text
self.static = False
if os.environ.get("PE_ENV") is not None: # if on hazelhen
print("Same for Petsc.")
# on hazel hen login node do not run MPI test program because this is not possible (only compile)
self.run = False
self.number_output_lines = 4121
def check(self, ctx):
if os.environ.get("PE_ENV") is not None: # if on hazelhen
ctx.Message('Not checking for PETSc ... ')
ctx.Result(True)
return True
env = ctx.env
# --with-cc='+env["CC"]+'\
# debugging build handler
if self.have_option(env, "PETSC_DEBUG"):
# debug build with MUMPS
print("PETSc debugging build is on!")
self.set_build_handler([
'mkdir -p ${PREFIX}',
'./configure --prefix=${PREFIX} --with-debugging=yes --with-shared-libraries=1 \
--with-blas-lapack-lib=${LAPACK_DIR}/lib/libopenblas.so\
---with-cc='+env["mpicc"]+'\
--download-mumps --download-scalapack --download-parmetis --download-metis --download-ptscotch --download-sundials --download-hypre \
| tee out.txt',
'$$(sed -n \'/Configure stage complete./{n;p;}\' out.txt) | tee out2.txt',
'$$(sed -n \'/Now to install the libraries do:/{n;p;}\' out2.txt)',
'ln -fs ${PREFIX}/lib/libparmetis.so ${PREFIX}/lib/parmetis.so' # create parmetis.so link for chaste
]) # --with-batch benötig
else:
# standard release build with MUMPS
# This needs bison installed
# for metis to work, we need --download-mumps --download-scalapack --download-parmetis --download-metis --download-ptscotch
if not socket.gethostname() == "cmcs05":
self.set_build_handler([
'mkdir -p ${PREFIX}',
#'PATH=${PATH}:${DEPENDENCIES_DIR}/bison/install/bin \
'./configure --prefix=${PREFIX} --with-debugging=no --with-shared-libraries=1 \
--with-blas-lapack-lib=${LAPACK_DIR}/lib/libopenblas.so\
---with-cc='+env["mpicc"]+'\
--download-mumps --download-scalapack --download-parmetis --download-metis --download-ptscotch --download-sundials --download-hypre \
COPTFLAGS=-O3\
CXXOPTFLAGS=-O3\
FOPTFLAGS=-O3 | tee out.txt',
'$$(sed -n \'/Configure stage complete./{n;p;}\' out.txt) | tee out2.txt', # do it twice, the first time fails with PGI
'$$(sed -n \'/Configure stage complete./{n;p;}\' out.txt) | tee out2.txt',
'$$(sed -n \'/Now to install the libraries do:/{n;p;}\' out2.txt)',
'$$(sed -n \'/Now to install the libraries do:/{n;p;}\' out2.txt)',
'ln -fs ${PREFIX}/lib/libparmetis.so ${PREFIX}/lib/parmetis.so' # create parmetis.so link for chaste
])
else: # nur für development der gpu-isierung benötigt (üblicherweise auf Rechner cmcs05):
self.set_build_handler([
'mkdir -p ${PREFIX}',
#'PATH=${PATH}:${DEPENDENCIES_DIR}/bison/install/bin \
'./configure --prefix=${PREFIX} --with-debugging=no --with-shared-libraries=1 \
--with-blas-lapack-lib=${LAPACK_DIR}/lib/libopenblas.so\
---with-cc='+env["mpicc"]+'\
--download-mumps --download-scalapack --download-parmetis --download-metis --download-ptscotch --download-sundials --download-hypre \
COPTFLAGS=-O3\
CXXOPTFLAGS=-O3\
--with-mpi-dir=${MPI_DIR} --with-batch\
FOPTFLAGS=-O3 | tee out.txt',
'$$(sed -n \'/Configure stage complete./{n;p;}\' out.txt) | tee out2.txt', # do it twice, the first time fails with PGI
'$$(sed -n \'/Configure stage complete./{n;p;}\' out.txt) | tee out2.txt',
'$$(sed -n \'/Now to install the libraries do:/{n;p;}\' out2.txt)',
'$$(sed -n \'/Now to install the libraries do:/{n;p;}\' out2.txt)',
'ln -fs ${PREFIX}/lib/libparmetis.so ${PREFIX}/lib/parmetis.so' # create parmetis.so link for chaste
])
#ctx.Message('----------------------------------------------------\nNote that PETSc has been updated to version 3.12.3. \nTo update, run \'scons PETSC_REDOWNLOAD=True\'.\n(This message is independent of the currently installed version.)\n----------------------------------------------------\n')
ctx.Message('Checking for PETSc ... ')
self.check_options(env)
res = super(PETSc, self).check(ctx, loc_callback=find_conf)
#self.check_required(res[0], ctx)
# if installation of petsc fails, retry without mumps and extra packages like parmetis, hdf5 or hypre
if not res[0] and socket.gethostname()!= 'cmcs09':
ctx.Log('Retry without MUMPS\n')
ctx.Message('Retry to install a fall-back PETSc without MUMPS, Hypre, SUNDIALS and ParMETIS ...')
if "PETSC_REDOWNLOAD" in Package.one_shot_options:
Package.one_shot_options.remove('PETSC_REDOWNLOAD')
if "PETSC_REBUILD" in Package.one_shot_options:
Package.one_shot_options.remove('PETSC_REBUILD')
if self.have_option(env, "PETSC_DEBUG"):
# debug build, without MUMPS
self.set_build_handler([
'mkdir -p ${PREFIX}',
'./configure --prefix=${PREFIX} --with-shared-libraries=1 --with-debugging=yes \
--with-blas-lapack-lib=${LAPACK_DIR}/lib/libopenblas.so\
--with-mpi-dir=${MPI_DIR} --with-batch\
--with-cc='+env["mpicc"]+' | tee out.txt',
'$$(sed -n \'/Configure stage complete./{n;p;}\' out.txt) | tee out2.txt',
'$$(sed -n \'/Now to install the libraries do:/{n;p;}\' out2.txt)',
])
# FC, CC und CXX nicht angeben, er nimmt sie sowieso, sagt nur er wuerde es ignorieren, hat das aber schon über nen anderen Kanal....
# 'make all', # do not add -j option, because it is not supported by Makefile of PETSc
# 'echo "sleep 3 s" && sleep 3',
# 'make install',
# 'make test',
else:
# release build without MUMPS
self.set_build_handler([
'mkdir -p ${PREFIX}',
'./configure --prefix=${PREFIX} --with-shared-libraries=1 --with-debugging=no \
--with-blas-lapack-lib=${LAPACK_DIR}/lib/libopenblas.so\
--with-cc='+env["mpicc"]+'\
COPTFLAGS=-O3\
CXXOPTFLAGS=-O3\
FOPTFLAGS=-O3 | tee out.txt',
'$$(sed -n \'/Configure stage complete./{n;p;}\' out.txt) | tee out2.txt || make',
'$$(sed -n \'/Now to install the libraries do:/{n;p;}\' out2.txt) || make install',
])
self.libs = ['petsc']
self.number_output_lines = 3990
res = super(PETSc, self).check(ctx, loc_callback=find_conf)
self.check_required(res[0], ctx)
self.check_required(res[0], ctx)
ctx.Result(res[0])
return res[0]
|
408033
|
import importlib
import pkgutil
from contextlib import (
suppress,
)
from functools import (
lru_cache,
)
from types import (
ModuleType,
)
from typing import (
Callable,
Union,
)
from .exceptions import (
MinosImportException,
)
@lru_cache()
def import_module(module_name: str) -> Union[type, Callable, ModuleType]:
"""Import the given module from a package"""
parts = module_name.rsplit(".", 1)
try:
kallable = _import_module(parts[0])
except ImportError:
raise MinosImportException(f"Error importing {module_name!r}: the module does not exist")
if len(parts) > 1:
try:
kallable = getattr(kallable, parts[1])
except AttributeError:
raise MinosImportException(f"Error importing {module_name!r}: the qualname does not exist.")
return kallable
def _import_module(module_name: str) -> Union[type, Callable, ModuleType]:
try:
return importlib.import_module(module_name)
except ImportError as exc:
if "." in module_name:
with suppress(MinosImportException):
return import_module(module_name)
raise exc
def classname(cls: Union[type, Callable]) -> str:
"""Compute the given class full name.
:param cls: Target class.
:return: An string object.
"""
if isinstance(cls, ModuleType):
return cls.__name__
# noinspection PyUnresolvedReferences
return f"{cls.__module__}.{cls.__qualname__}"
def get_internal_modules() -> list[ModuleType]:
"""Get the list of internal ``minos`` modules.
:return: A list of modules.
"""
return _import_submodules("minos") + _import_submodules("minos.plugins")
def _import_submodules(prefix: str) -> list[ModuleType]:
try:
base = importlib.import_module(prefix)
except ModuleNotFoundError:
return list()
modules = list()
for loader, module_name, _ in pkgutil.iter_modules(base.__path__):
module = importlib.import_module(f"{prefix}.{module_name}")
modules.append(module)
return modules
|
408048
|
from . import views
from rest_framework.routers import SimpleRouter
router = SimpleRouter()
router.register("posts", views.PostViewSet, "posts")
urlpatterns = router.urls
|
408120
|
from copy import deepcopy
from dataclasses import dataclass, field
from random import shuffle
from typing import Optional
import logging
from paiargparse import pai_dataclass, pai_meta
from tfaip import TrainerPipelineParams, TrainerPipelineParamsBase, PipelineMode
from calamari_ocr.ocr.dataset.datareader.base import CalamariDataGeneratorParams
from calamari_ocr.ocr.dataset.datareader.file import FileDataParams
from calamari_ocr.ocr.dataset.datareader.pagexml.reader import PageXML
from calamari_ocr.ocr.dataset.params import DATA_GENERATOR_CHOICES
logger = logging.getLogger(__name__)
@pai_dataclass(alt="TrainVal")
@dataclass
class CalamariDefaultTrainerPipelineParams(
TrainerPipelineParams[CalamariDataGeneratorParams, CalamariDataGeneratorParams]
):
train: CalamariDataGeneratorParams = field(
default_factory=FileDataParams,
metadata=pai_meta(choices=DATA_GENERATOR_CHOICES, mode="flat"),
)
val: CalamariDataGeneratorParams = field(
default_factory=FileDataParams,
metadata=pai_meta(choices=DATA_GENERATOR_CHOICES, mode="flat"),
)
@pai_dataclass(alt="TrainOnly")
@dataclass
class CalamariTrainOnlyPipelineParams(
TrainerPipelineParamsBase[CalamariDataGeneratorParams, CalamariDataGeneratorParams]
):
def train_gen(self) -> CalamariDataGeneratorParams:
return self.train
def val_gen(self) -> Optional[CalamariDataGeneratorParams]:
return None
train: CalamariDataGeneratorParams = field(
default_factory=FileDataParams,
metadata=pai_meta(choices=DATA_GENERATOR_CHOICES, mode="flat"),
)
@pai_dataclass(alt="SplitTrain")
@dataclass
class CalamariSplitTrainerPipelineParams(
TrainerPipelineParams[CalamariDataGeneratorParams, CalamariDataGeneratorParams]
):
train: CalamariDataGeneratorParams = field(
default_factory=FileDataParams,
metadata=pai_meta(
choices=[FileDataParams, PageXML],
enforce_choices=True,
mode="flat",
),
)
validation_split_ratio: float = field(
default=0.2,
metadata=pai_meta(help="Use factor of n of the training dataset for validation."),
)
val: Optional[CalamariDataGeneratorParams] = field(default=None, metadata=pai_meta(mode="ignore"))
def __post_init__(self):
if self.val is not None:
# Already initialized
return
if not 0 < self.validation_split_ratio < 1:
raise ValueError("validation_split_ratio must be in (0, 1)")
# resolve all files so we can split them
self.train.prepare_for_mode(PipelineMode.TRAINING)
self.val = deepcopy(self.train)
samples = len(self.train)
n = int(self.validation_split_ratio * samples)
if n == 0:
raise ValueError(
f"Ratio is to small since {self.validation_split_ratio} * {samples} = {n}. "
f"Increase the amount of data or the split ratio."
)
logger.info(
f"Splitting training and validation files with ratio {self.validation_split_ratio}: "
f"{n}/{samples - n} for validation/training."
)
indices = list(range(samples))
shuffle(indices)
# split train and val img/gt files. Use train settings
self.train.select(indices[n:])
self.val.select(indices[:n])
|
408130
|
import openmdao.api as om
from openaerostruct.common.reynolds_comp import ReynoldsComp
from openaerostruct.common.atmos_comp import AtmosComp
class AtmosGroup(om.Group):
def setup(self):
self.add_subsystem(
"atmos",
AtmosComp(),
promotes_inputs=["altitude", "Mach_number"],
promotes_outputs=["T", "P", "rho", "speed_of_sound", "mu", "v"],
)
self.add_subsystem("reynolds", ReynoldsComp(), promotes_inputs=["rho", "mu", "v"], promotes_outputs=["re"])
|
408143
|
import numpy as np
from extra_keras_metrics import F1Score
from sklearn.metrics import f1_score as baseline
from .utils import compare_metrics
def f1score_score(y_true, y_pred):
return baseline(y_true, np.round(y_pred).astype(int))
def test_precision():
compare_metrics(F1Score(), f1score_score)
|
408162
|
import bisect
import numpy as np
class ReadCoverage:
def __init__(self, in_paf):
self.paf = in_paf
self.glob_mean = None
self.glob_std = None
self.coverage_map = dict()
self.ctg_lens = dict()
self._make_coverage_map()
self._get_glob_mean()
@staticmethod
def _tabulate_coverage(cov_list):
current_coverage = 0
coverage_list = []
seen = set()
for header, pos in cov_list:
if header in seen:
current_coverage -= 1
coverage_list.append((pos, current_coverage))
seen.remove(header)
else:
current_coverage += 1
coverage_list.append((pos, current_coverage))
seen.add(header)
return coverage_list
@staticmethod
def _smooth_supported_breaks(sup_breaks, break_types):
"""
If there are multiple low coverage breaks in close proximity,
merge it into one break at the lowest coverage point.
"""
i = 0
j = 1
while j < len(sup_breaks):
if break_types[i] == 'l' and break_types[j] == 'l':
if abs(sup_breaks[i][0] - sup_breaks[j][0]) < 100000:
# Merge these two break points
sup_breaks[i] = min([sup_breaks[i], sup_breaks[j]], key=lambda x: x[1])
sup_breaks.pop(j)
else:
i += 1
j += 1
else:
i += 1
j += 1
return [z[0] for z in sup_breaks]
def _trim_ends(self, dist=25000):
""" Remove the ends of the contigs from the coverage map. """
for i in self.coverage_map:
# Start with the beginning of the contig
start_idx = 0
end_idx = 0
for j in range(len(self.coverage_map[i])):
if self.coverage_map[i][j][0] < dist:
start_idx = j
if self.coverage_map[i][j][0] > self.ctg_lens[i] - dist:
end_idx = j-1
break
self.coverage_map[i] = self.coverage_map[i][start_idx:end_idx]
# Remove contigs which don't have coverage info.
header_keys = list(self.coverage_map.keys())
for i in header_keys:
if not self.coverage_map[i]:
self.coverage_map.pop(i)
def _make_coverage_map(self):
"""
Populate self.coverage_map. This is a dictionary that associates each contig header with a list of alignment
positions and their coverage levels.
"""
# Associate with each contig header, a list of (query header, start), (query header, end)
alns_pos = dict()
with open(self.paf, 'r') as f:
for line in f:
L1 = line.rstrip().split('\t')
# Only consider an alignment if at least 75% of the read aligned.
if abs((int(L1[3]) - int(L1[2])))/int(L1[1]) >= 0.75:
if L1[5] in alns_pos:
alns_pos[L1[5]].append((L1[0], int(L1[7])))
alns_pos[L1[5]].append((L1[0], int(L1[8])))
else:
alns_pos[L1[5]] = [(L1[0], int(L1[7])), (L1[0], int(L1[8]))]
self.ctg_lens[L1[5]] = int(L1[6])
# Sort these coverage positions and get the coverage map
for i in alns_pos:
self.coverage_map[i] = self._tabulate_coverage(sorted(alns_pos[i], key=lambda x: x[1]))
self._trim_ends()
def _get_glob_mean(self):
L1 = []
for i in self.coverage_map:
# In the case where we have multiple coverage values for one position, take the last one.
last_pos = 0
curr_val = 0
for j in self.coverage_map[i]:
if j[0] == last_pos:
curr_val = j[1]
else:
last_pos = j[0]
L1.append(curr_val)
cur_val = j[1]
L1 = np.asarray(L1, dtype=np.int32)
self.glob_mean = np.median(L1)
self.glob_std = np.sqrt(self.glob_mean)
def _get_index_range(self, header, start_ind, distance):
"""
Get the list of indices that are contained within a distance around the start index
"""
all_inds = []
low_counter = 1
# Check if the start point is at the end of the contig
if start_ind == len(self.coverage_map[header]):
start_ind -= 1
start_pos = self.coverage_map[header][start_ind][0]
is_low = False
# Get all coverage map indices representing regions 50kbp upstream of the start
while not is_low:
next_ind = start_ind-low_counter
if next_ind < 0:
is_low = True
else:
next_pos = self.coverage_map[header][next_ind][0]
if start_pos - next_pos > distance:
is_low = True
else:
all_inds.append(next_ind)
low_counter += 1
# Repeat for 50kbp downstream
high_counter = 1
is_high = False
while not is_high:
next_ind = start_ind + high_counter
if next_ind >= len(self.coverage_map[header]):
is_high = True
else:
next_pos = self.coverage_map[header][next_ind][0]
if next_pos - start_pos > distance:
is_high = True
else:
all_inds.append(next_ind)
high_counter += 1
return sorted(all_inds)
def check_break_cov(self, header, in_breaks, min_cov=None, max_cov=None):
"""
Given a list of potential break points, verify if those break points occur around low or high coverage
areas. If so, replace the candidate break point with the low/high coverage break point.
:param header: contig header for these breaks
:param in_breaks: list of candidate break points
:param min_cov: break at coverage levels below this value
:param max_cov: break at coverage levels above this value
:return: list of real break points, or empty list if not breaking is recommended
"""
# Check that we have coverage info for this contig.
if header not in self.coverage_map:
return []
if min_cov is None or max_cov is None:
# Automatically calculate min and max coverage
min_cov = max(self.glob_mean - (self.glob_std*3), 0)
max_cov = self.glob_mean + (self.glob_std*3)
supported_breaks = []
break_types = []
for i in in_breaks:
# Get the coverage for the position closest to this potential break point
ins_ind = bisect.bisect_left(self.coverage_map[header], (i, 0))
# Get the coverage for positions within 50 kbp of the candidate coverage position
# Exclude positions near the ends of the sequence.
ind_range = self._get_index_range(header, ins_ind, 50000)
if len(set(ind_range)) > 1:
# Check for low coverage
lowest_cov = min(self.coverage_map[header][ind_range[0]:ind_range[-1]], key=lambda x: x[1])
if lowest_cov[1] < min_cov:
supported_breaks.append(lowest_cov)
break_types.append('l')
continue
# Check for high coverage
highest_cov = max(self.coverage_map[header][ind_range[0]:ind_range[-1]], key=lambda x: x[1])
if highest_cov[1] > max_cov:
supported_breaks.append(highest_cov)
break_types.append('h')
return self._smooth_supported_breaks(supported_breaks, break_types)
|
408195
|
from binascii import hexlify, unhexlify
from dataclasses import dataclass
import random
import uuid
from flask import (
Flask,
Blueprint,
request,
render_template,
)
from Crypto.Random import (
get_random_bytes
)
from crypto.crypto import (
get_rsa_key,
get_public_key,
sign_message,
validate_signature,
blind_message,
unblind_message,
hash_message,
)
app = Flask(__name__)
if app.config['DEBUG']:
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
source_domain = Blueprint('source_domain', __name__)
destination_domain = Blueprint('destination_domain', __name__)
# normally everything handled by 'blinding_service' would be a browser internal
blinding_service = Blueprint('blinding_service', __name__)
# list of oustanding unredeemed csrf tokens
csrf_tokens = set()
# basic data structure for linked items
@dataclass
class LinkedItem:
name: str
_id: int
value: int
click_data_src: str
report_data_dest: str
all_linked_items = [
LinkedItem('Cool LinkedItem 1', 1, 10, uuid.uuid4(), uuid.uuid4()),
LinkedItem('Cool LinkedItem 2', 2, 25, uuid.uuid4(), uuid.uuid4()),
LinkedItem('Cool LinkedItem 3', 3, 55, uuid.uuid4(), uuid.uuid4()),
LinkedItem('Cool LinkedItem 4', 4, 125, uuid.uuid4(), uuid.uuid4()),
LinkedItem('Cool LinkedItem 5', 5, 1150, uuid.uuid4(), uuid.uuid4()),
]
def build_new_csrf_token():
csrf_token = get_random_bytes(16).hex()
csrf_tokens.add(csrf_token)
return csrf_token
def validate_csrf_token(csrf_token):
if csrf_token in csrf_tokens:
csrf_tokens.remove(csrf_token)
return True
else:
return False
@app.context_processor
def utility_processor():
return {
'build_new_csrf_token': build_new_csrf_token
}
@app.route('/')
def hello_world():
return 'Hello, World!'
@source_domain.route('/')
def source_domain_index():
ads = random.sample(all_linked_items, 2)
return render_template('source.html', ads=ads)
@source_domain.route('/.well-known/public-key/<string:destination_domain>/<string:click_data_src>')
def source_domain_public_key(
destination_domain: str,
click_data_src: str,
):
return hexlify(get_public_key(
('source-domain.com', destination_domain, click_data_src)
))
@source_domain.route('/.well-known/blind-signing', methods=['POST'])
def source_domain_blind_signing():
destination_domain = request.form['destination_domain']
click_data_src = request.form['click_data_src']
blinded_nonce_src = unhexlify(request.form['blinded_nonce_src'])
csrf_token = request.form['csrf_token']
if not validate_csrf_token(csrf_token):
raise Exception('invalid csrf token')
return hexlify(sign_message(
get_rsa_key(('source-domain.com', destination_domain, click_data_src)),
blinded_nonce_src
))
@source_domain.route('/.well-known/report', methods=['POST'])
def source_domain_report():
destination_domain = request.form['destination_domain']
click_data_src = request.form['click_data_src']
report_data_dest = request.form['report_data_dest']
nonce = unhexlify(request.form['nonce'])
hashed_nonce = hash_message(nonce, 256)
signature_src = unhexlify(request.form['signature_src'])
signature_dest = unhexlify(request.form['signature_dest'])
source_domain_public_key = get_public_key(
('source-domain.com', destination_domain, click_data_src)
)
# this would normally make an http request out to a service like key transparency
destination_domain_public_key = get_public_key(('destination-domain.com', report_data_dest))
signature_src_valid = validate_signature(
source_domain_public_key,
hashed_nonce,
signature_src
)
signature_dest_valid = validate_signature(
destination_domain_public_key,
hashed_nonce,
signature_dest
)
valid = str(signature_src_valid and signature_dest_valid)
app.logger.info(
f'report recieved: valid: {valid} \n'
f'click_data_src: {click_data_src}, \n'
f'report_data_dest: {report_data_dest}\n'
f'signature_src_valid: {signature_src_valid} \n'
f'signature_dest_valid: {signature_dest_valid}'
)
return valid
@destination_domain.route('/', defaults={'linked_item_id': None})
@destination_domain.route('/linked_items/<int:linked_item_id>')
def destination_domain_linked_items(
linked_item_id: int
):
if linked_item_id is None:
linked_items = all_linked_items
else:
linked_items = [
linked_item for linked_item in all_linked_items
if linked_item._id == linked_item_id
]
return render_template('destination.html', linked_items=linked_items)
@destination_domain.route('/.well-known/public-key/<string:report_data_dest>')
def destination_domain_public_key(
report_data_dest: str,
):
return hexlify(get_public_key(('destination-domain.com', report_data_dest)))
@destination_domain.route('/.well-known/blind-signing', methods=['POST'])
def destination_domain_blind_signing():
report_data_dest = request.form['report_data_dest']
blinded_nonce_dest = unhexlify(request.form['blinded_nonce_dest'])
csrf_token = request.form['csrf_token']
if not validate_csrf_token(csrf_token):
raise Exception('invalid csrf token')
return hexlify(sign_message(
get_rsa_key(('destination-domain.com', report_data_dest)),
blinded_nonce_dest
))
# normally everything handled by 'blinding_service' would be a browser internal
@blinding_service.route('/')
def hello_blinding_service():
return 'Hello from blinding-service.com!'
@blinding_service.route('/.well-known/blind', methods=['POST'])
def blinding_service_blind_message():
public_key = unhexlify(request.form['public_key'])
message = unhexlify(request.form['message'])
hashed = hash_message(message, 256)
blinding_factor = unhexlify(request.form['blinding_factor'])
return hexlify(blind_message(public_key, hashed, blinding_factor))
@blinding_service.route('/.well-known/unblind', methods=['POST'])
def blinding_service_unblind_message():
public_key = unhexlify(request.form['public_key'])
message = unhexlify(request.form['message'])
hash_msg = hash_message(message, 256)
blinded_message = unhexlify(request.form['blind_message'])
blinding_factor = unhexlify(request.form['blinding_factor'])
unblinded = unblind_message(public_key, blinded_message, blinding_factor)
valid = validate_signature(public_key, hash_msg, unblinded)
if not valid:
raise Exception('unblinded signature is invalid')
return hexlify(unblinded)
@blinding_service.route('/.well-known/random-bytes/<int:bytecount>')
def random_bytes(bytecount):
return hexlify(get_random_bytes(bytecount))
app.register_blueprint(source_domain, url_prefix='/source-domain.com')
app.register_blueprint(destination_domain, url_prefix='/destination-domain.com')
app.register_blueprint(blinding_service, url_prefix='/blinding-service.com')
if __name__ == '__main__':
app.run(
extra_files=['.client/*']
)
|
408213
|
import warnings
from django.shortcuts import render, redirect
from django.views.generic import View
from django.contrib.auth import get_user_model
from dashboard.views.utils import Util, page_manage
class Register(View):
@page_manage
def get(self, request):
context = Util.get_context(request)
return render(request, 'dashboard/register.html', context=context)
@page_manage
def post(self, request):
email = request.POST['email']
password = request.POST['password']
vendor = request.POST['vendor']
aws_access_key = request.POST['aws_access_key']
aws_secret_key = request.POST['aws_secret_key']
aws_region = request.POST['aws_region']
normalized_email = get_user_model().objects.normalize_email(email)
users = get_user_model().objects.all().filter(email=normalized_email)
if len(users) > 0:
if len(users) > 1:
warnings.warn('there are {} users with email {}'.format(len(users), email))
Util.add_alert(request, '이미 계정이 존재합니다.')
return redirect('register')
elif len(password) < 7:
Util.add_alert(request, '비밀번호는 7자 이상입니다.')
return redirect('register')
elif not Util.is_valid_access_key(aws_access_key, aws_secret_key):
Util.add_alert(request, '유효한 AccessKey 를 입력해주세요.')
return redirect('register')
else:
credentials = {}
if vendor == 'aws':
credentials['aws'] = {
'access_key': aws_access_key,
'secret_key': aws_secret_key,
'region': aws_region,
}
get_user_model().objects.create_user(
email, password,
credentials=credentials,
)
Util.add_alert(request, '회원가입에 성공하였습니다.')
return redirect('index')
|
408252
|
str=input("Enter string:")
str_list=list(str)
rev_list=[]
rev_str=""
length=len(str_list)
for i in range(-1,-(length+1),-1):
rev_list.append(str_list[i])
rev=rev_str.join(rev_list)
print(rev)
print("Character in even index:")
for i in range(0,len(str),2):
print(str[i])
|
408254
|
import sys
from enum import Enum
class flag(Enum):
UNVISITED = -1
VISITED = -2
AL = []
dfs_num = []
ts = []
def toposort(u):
global AL
global dfs_num
global ts
dfs_num[u] = flag.VISITED.value
for v, w in AL[u]:
if dfs_num[v] == flag.UNVISITED.value:
toposort(v)
ts.append(u)
def main():
global AL
global dfs_num
global ts
fp = open('toposort_in.txt', 'r')
V = int(fp.readline().strip())
AL = [[] for _ in range(V)]
for u in range(V):
tkn = list(map(int, fp.readline().strip().split()))
k = tkn[0]
for i in range(k):
v, w = tkn[2*i+1], tkn[2*i+2]
AL[u].append((v, w))
print('Topological Sort (the input graph must be DAG)')
dfs_num = [flag.UNVISITED.value] * V
for u in range(V):
if dfs_num[u] == flag.UNVISITED.value:
toposort(u)
ts = ts[::-1]
print(' '.join(map(str, ts)))
main()
|
408284
|
from __future__ import division
from __future__ import print_function
from past.utils import old_div
def f(x):
"""Runge's function."""
return old_div(1,(1 + x**2))
# Plot f
import matplotlib.pyplot as plt
import numpy as np
xcoor = np.linspace(-3, 3, 101)
ycoor = f(xcoor)
plt.plot(xcoor, ycoor)
plt.savefig('f_plot.png')
# Compute f'(x) symbolically and make a Python function out of it
import sympy as sm
x = sm.Symbol('x')
f_expr = f(x)
print(f_expr)
df_expr = sm.diff(f_expr, x)
print(df_expr)
df = sm.lambdify(x, df_expr) # turn expression into Python function
# Plot f'(x)
plt.figure()
plt.plot(xcoor, df(xcoor))
plt.savefig('df_plot.png')
plt.show()
|
408296
|
from all_models.models.A0001_user import *
from all_models.models.A0002_config import *
from all_models.models.A0003_attribute import *
from all_models.models.A0004_globals import *
from all_models.models.A0005_interface import *
from all_models.models.A0006_testcase import *
from all_models.models.A0007_task import *
from all_models.models.A0008_standard_interface import *
from all_models.models.A0009_python_manage import *
from all_models.models.A0010_webprotal import *
from all_models.models.A0011_version_manage import *
from all_models.models.A0012_admin import *
from all_models.models.A0013_ui_test import *
from all_models.models.A0014_ui_task import *
from all_models.models.A0014_ui_testcase import *
from all_models.models.A0015_ui_globals import *
from all_models.models.A0016_ui_version_manage import *
from all_models.models.A0017_ui_package_manage import *
from all_models.models.A0018_ui_mobile_server import *
from all_models.models.A0020_deployment_tool import *
from all_models.models.A0021_task_suite import *
|
408341
|
from django.db import models
# Create your models here.
from sqlorders.models import DbConfig
from users.models import UserAccounts
class DbQuerySchemas(models.Model):
"""
存储远程查询数据库的库名
"""
id = models.AutoField(primary_key=True, verbose_name=u'主键ID')
cid = models.ForeignKey(DbConfig, blank=True, null=True, on_delete=models.SET_NULL, verbose_name='数据库')
schema = models.CharField(null=False, max_length=64, default='', verbose_name=u'库名')
created_at = models.DateTimeField(auto_now_add=True, verbose_name=u'创建时间')
updated_at = models.DateTimeField(auto_now=True, verbose_name=u'更新时间')
def __str__(self):
return '.'.join([self.cid.comment, self.schema])
def display_comment(self):
return self.cid.comment
display_comment.short_description = '主机'
class Meta:
verbose_name = u'DB查询库'
verbose_name_plural = verbose_name
unique_together = (('cid', 'schema'),)
default_permissions = ()
app_label = 'sqlquery'
db_table = 'yasql_sqlquery_schemas'
class DbQueryTables(models.Model):
"""
存储远程查询数据库的表名
"""
id = models.AutoField(primary_key=True, verbose_name=u'主键ID')
schema = models.ForeignKey(DbQuerySchemas, blank=True, null=True, on_delete=models.SET_NULL, verbose_name='库名')
table = models.CharField(null=False, max_length=128, default='', verbose_name=u'表名')
created_at = models.DateTimeField(auto_now_add=True, verbose_name=u'创建时间')
updated_at = models.DateTimeField(auto_now=True, verbose_name=u'更新时间')
def __str__(self):
return '.'.join([self.schema.cid.comment, self.schema.schema, self.table])
def display_comment(self):
return self.schema.cid.comment
def display_schema(self):
return self.schema.schema
display_comment.short_description = '主机'
display_schema.short_description = '库名'
class Meta:
verbose_name = u'DB查询表'
verbose_name_plural = verbose_name
default_permissions = ()
app_label = 'sqlquery'
db_table = 'yasql_sqlquery_tables'
class DbQueryUserPrivs(models.Model):
id = models.AutoField(primary_key=True, verbose_name=u'主键ID')
user = models.ForeignKey(UserAccounts, blank=True, null=True, on_delete=models.SET_NULL, verbose_name='用户')
schemas = models.ManyToManyField(DbQuerySchemas, verbose_name='允许访问的库')
created_at = models.DateTimeField(auto_now_add=True, verbose_name=u'创建时间')
updated_at = models.DateTimeField(auto_now=True, verbose_name=u'更新时间')
def __str__(self):
return self.user.username
class Meta:
verbose_name = u'DB查询用户权限'
verbose_name_plural = verbose_name
default_permissions = ()
app_label = 'sqlquery'
db_table = 'yasql_sqlquery_user_privileges'
class DbQueryGroupPrivs(models.Model):
id = models.AutoField(primary_key=True, verbose_name=u'主键ID')
group = models.CharField(null=False, max_length=128, default='', verbose_name=u'组名')
user = models.ManyToManyField(UserAccounts, verbose_name='用户')
schemas = models.ManyToManyField(DbQuerySchemas, verbose_name='允许访问的库')
created_at = models.DateTimeField(auto_now_add=True, verbose_name=u'创建时间')
updated_at = models.DateTimeField(auto_now=True, verbose_name=u'更新时间')
def __str__(self):
return self.group
class Meta:
verbose_name = u'DB查询组权限'
verbose_name_plural = verbose_name
default_permissions = ()
app_label = 'sqlquery'
db_table = 'yasql_sqlquery_group_privileges'
class DbQueryUserAllowedTables(models.Model):
id = models.AutoField(primary_key=True, verbose_name=u'主键ID')
tables = models.ForeignKey(DbQueryTables, blank=True, null=True, on_delete=models.SET_NULL, verbose_name='表')
user_privs = models.ForeignKey(DbQueryUserPrivs, blank=True, null=True, on_delete=models.SET_NULL,
verbose_name='权限')
created_at = models.DateTimeField(auto_now_add=True, verbose_name=u'创建时间')
updated_at = models.DateTimeField(auto_now=True, verbose_name=u'更新时间')
def __str__(self):
return '.'.join([self.tables.schema.cid.comment, self.tables.schema.schema, self.tables.table])
class Meta:
verbose_name = u'允许用户访问的表'
verbose_name_plural = verbose_name
default_permissions = ()
app_label = 'sqlquery'
db_table = 'yasql_sqlquery_user_allowed_tables'
class DbQueryUserDenyTables(models.Model):
id = models.AutoField(primary_key=True, verbose_name=u'主键ID')
tables = models.ForeignKey(DbQueryTables, blank=True, null=True, on_delete=models.SET_NULL, verbose_name='表')
user_privs = models.ForeignKey(DbQueryUserPrivs, blank=True, null=True, on_delete=models.SET_NULL,
verbose_name='权限')
created_at = models.DateTimeField(auto_now_add=True, verbose_name=u'创建时间')
updated_at = models.DateTimeField(auto_now=True, verbose_name=u'更新时间')
def __str__(self):
return '.'.join([self.tables.schema.cid.comment, self.tables.schema.schema, self.tables.table])
class Meta:
verbose_name = u'禁止用户访问的表'
verbose_name_plural = verbose_name
default_permissions = ()
app_label = 'sqlquery'
db_table = 'yasql_sqlquery_user_deny_tables'
class DbQueryGroupAllowedTables(models.Model):
id = models.AutoField(primary_key=True, verbose_name=u'主键ID')
tables = models.ForeignKey(DbQueryTables, blank=True, null=True, on_delete=models.SET_NULL, verbose_name='表')
group_privs = models.ForeignKey(DbQueryGroupPrivs, blank=True, null=True, on_delete=models.SET_NULL,
verbose_name='权限')
created_at = models.DateTimeField(auto_now_add=True, verbose_name=u'创建时间')
updated_at = models.DateTimeField(auto_now=True, verbose_name=u'更新时间')
def __str__(self):
return '.'.join([self.tables.schema.cid.comment, self.tables.schema.schema, self.tables.table])
class Meta:
verbose_name = u'允许组访问的表'
verbose_name_plural = verbose_name
default_permissions = ()
app_label = 'sqlquery'
db_table = 'yasql_sqlquery_group_allowed_tables'
class DbQueryGroupDenyTables(models.Model):
id = models.AutoField(primary_key=True, verbose_name=u'主键ID')
tables = models.ForeignKey(DbQueryTables, blank=True, null=True, on_delete=models.SET_NULL, verbose_name='表')
group_privs = models.ForeignKey(DbQueryGroupPrivs, blank=True, null=True, on_delete=models.SET_NULL,
verbose_name='权限')
created_at = models.DateTimeField(auto_now_add=True, verbose_name=u'创建时间')
updated_at = models.DateTimeField(auto_now=True, verbose_name=u'更新时间')
def __str__(self):
return '.'.join([self.tables.schema.cid.comment, self.tables.schema.schema, self.tables.table])
class Meta:
verbose_name = u'禁止组访问的表'
verbose_name_plural = verbose_name
default_permissions = ()
app_label = 'sqlquery'
db_table = 'yasql_sqlquery_group_deny_tables'
class DbQueryLog(models.Model):
id = models.AutoField(primary_key=True, verbose_name=u'主键id')
username = models.CharField(max_length=64, null=False, verbose_name=u'用户名')
host = models.CharField(max_length=256, null=False, verbose_name=u'目标数据库地址')
schema = models.CharField(null=False, max_length=128, default='', verbose_name=u'目标数据库')
tables = models.CharField(null=False, max_length=200, default='', verbose_name=u'目标表名')
query_sql = models.TextField(null=False, default='', verbose_name=u'查询SQL')
query_consume_time = models.FloatField(null=False, default=0.000, verbose_name=u'查询耗时,单位s')
query_status = models.CharField(max_length=2048, default='', verbose_name=u'查询是否成功或失败的原因')
affected_rows = models.IntegerField(default=0, null=False, verbose_name=u'影响影响行数')
created_at = models.DateTimeField(auto_now_add=True, verbose_name=u'查询时间')
class Meta:
verbose_name = u'DB查询日志'
verbose_name_plural = verbose_name
index_together = (('username',), ('schema',), ('tables',),)
default_permissions = ()
app_label = 'sqlquery'
db_table = 'yasql_sqlquery_log'
|
408385
|
from slot_attention.slot_attention import SlotAttention
from slot_attention.slot_attention_experimental import SlotAttentionExperimental
|
408392
|
from venv import EnvBuilder
from .deps_handler import DepsHandle
import shutil
import os
from subprocess import call
import logging
from simple_term_menu import TerminalMenu
extra_options = ["manually specify", "ignore dependency"]
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def create_env(path, options):
DIR = os.path.join(path)
logging.info('creating virtual environment')
builder = EnvBuilder(**options, with_pip=True)
builder.create(DIR)
logging.info('Created virtual environment')
def migrate(virt_dir, path):
logging.info(
bcolors.OKGREEN + 'Migrating the application...' +
bcolors.ENDC
)
if not os.path.exists(os.path.join(virt_dir, 'app')):
shutil.copytree(path, os.path.join(virt_dir, 'app'))
print(bcolors.OKGREEN + 'Migration done.' + bcolors.ENDC)
def ask_package_input(dep):
pkg_name = input(
bcolors.BOLD +
"Specify the package for module {} in the ".format(dep) +
"format <package_name>==<version> or just <package_name>, " +
"for example : flask==0.0.1 :\n>> " +
bcolors.ENDC
)
if not pkg_name:
return ""
pkg_name = str(pkg_name).strip().replace("\n", "")
return pkg_name
def select_dependency(dep, packages):
print()
if len(packages) == 0:
print(
bcolors.WARNING +
"No package was identified installed on your system for module " +
"{}, what you want to do??".format(dep) +
bcolors.ENDC
)
menu = TerminalMenu(extra_options)
selected_idx = menu.show()
if selected_idx == 0:
pkg_name = ask_package_input(dep)
if pkg_name == "":
print(
bcolors.WARNING +
"Module {} ignored.".format(dep) +
bcolors.ENDC
)
return ""
else:
print(
bcolors.OKGREEN +
"Using {} as package for module {}".format(pkg_name, dep) +
"." + bcolors.ENDC
)
return pkg_name
else:
print(
bcolors.WARNING +
"Module {} ignored.".format(dep) +
bcolors.ENDC
)
return ""
else:
print(
bcolors.BOLD +
"Identifed following packages {} ".format(packages) +
"for module {}, select any one of them or choose ".format(dep) +
"to ingore or provide your own package name: " +
bcolors.ENDC
)
options = packages
options.extend(extra_options)
menu = TerminalMenu(options)
selected_idx = menu.show()
if selected_idx == -1 or selected_idx == len(options) - 1:
print(
bcolors.WARNING + "Module {} ignored.".format(dep) +
bcolors.ENDC
)
return ""
elif selected_idx == len(options) - 2:
pkg_name = ask_package_input(dep)
if pkg_name == "":
print(
bcolors.WARNING +
'Module {} ignored.'.format(dep) +
bcolors.ENDC
)
return ""
else:
print(
bcolors.OKGREEN +
"Using {} as package for module {}".format(pkg_name, dep) +
"." + bcolors.ENDC
)
return pkg_name
else:
print(
bcolors.OKGREEN +
"Using {} as a package for module {}.".format(
options[selected_idx],
dep
) +
bcolors.ENDC
)
return options[selected_idx]
def install_deps(virtual_env, app):
logging.info('Scanning for dependencies..')
deps = []
# if requirements.txt exists, no need to identify dependencies
req_path = os.path.join(app, "requirements.txt")
if os.path.exists(req_path):
req_data = open(req_path, 'r').read()
deps = req_data.split("\n")
logging.info("Found requirements.txt in the local project")
logging.info(
"Found following packages in requirements.txt - {}"
.format(deps)
)
else:
deps = DepsHandle(root=app).run_task()
logging.info('Found dependencies : {}'.format(deps))
logging.info('Installing dependencies .... ')
virt_app_req_path = os.path.join(virtual_env, 'app', 'requirements.txt')
if (not os.path.exists(req_path)) and os.path.exists(virt_app_req_path):
os.remove(virt_app_req_path)
all_packages = []
print('\n---------REQUIRES YOUR INPUT, PAY ATTENTION----------------')
for dep in deps:
if dep == "":
continue
package_name = select_dependency(dep, deps[dep])
if package_name == "":
continue
all_packages.append(package_name)
logging.info(
"Starting to install packages - {}".format(all_packages)
)
for package_name in all_packages:
exec_suffix = ['install', package_name, '--prefix='+virtual_env]
call([os.path.join(virtual_env, 'bin', 'pip3'), *exec_suffix])
logging.info('Installed {}'.format(package_name))
if not os.path.exists(req_path):
with open(virt_app_req_path, 'a') as writer:
writer.write("{}\n".format(package_name))
print(
bcolors.OKGREEN +
"Installed all dependencies a selected." +
bcolors.ENDC
)
def get_deps(app):
deps = DepsHandle(root=app).run_task()
return deps
def gen_requirements(app, dest):
deps = get_deps(app)
dest_path = os.path.join(dest, 'requirements.txt')
with open(dest_path, 'w') as writer:
print('\n---------REQUIRES YOUR INPUT, PAY ATTENTION----------------')
for dep in deps:
if dep == "":
continue
package_name = select_dependency(dep, deps[dep])
if package_name == "":
continue
writer.write("{}\n".format(package_name))
print(
bcolors.OKGREEN + "Generated requirements.txt at {}".format(dest) +
bcolors.ENDC
)
print(
bcolors.OKGREEN + 'All done, thanks for using this tool ..' +
bcolors.ENDC
)
def check_path_exist(path, check_abs=False):
path_exist = os.path.exists(path)
if path_exist and check_abs and not os.path.isabs(path):
return False
return path_exist
# putting it all together :
def gen_virt_env(virt_dir, app_dir, options):
create_env(virt_dir, options)
migrate(virt_dir, app_dir)
install_deps(virt_dir, app_dir)
print(
bcolors.OKGREEN + 'All done, thanks for using this tool ..' +
bcolors.ENDC
)
|
408450
|
import parser
import player
import random
opcodes = { # Opcodes are keys, number of ticks are values.
'harvest': 5,
'plant': 4,
'peek': 4,
'poke': 3,
'goto': 1,
'ifequal': 2,
'ifless': 2,
'ifmore': 2,
'add': 3,
'sub': 3,
'mult': 5,
'div': 8,
'mod': 7,
'random': 6
}
indirectTicks = 2 # number of ticks for each $
harvestError = 15 # number of ticks if harvest fails
valLimit = 2**16 # Registers and memory values are signed 32-bit numbers
class CPU(object):
def __init__(self, memory=None, fruit=None, players=None):
self.memory = memory
self.players = players
self.fruit = fruit
self.ticks = 0
self.next = 0
self.registers = {'rw': 0, 'rt': 0}
def execute(self):
nextPlayer = self.players[self.next]
if nextPlayer.delay == 0:
try:
self.run(nextPlayer)
except Exception as e:
print("Exception thrown by " + nextPlayer.displayName)
else:
nextPlayer.delay = nextPlayer.delay - 1
self.next = self.next + 1
if self.next == len(self.players):
self.next = 0
# Update fruits
for f in self.fruit:
self.memory[f] = self.memory[f] - 1
if self.memory[f] < -100:
self.memory[f] = -100
# Update global registers
self.registers['rt'] = self.ticks
self.registers['rw'] = max(p.registers['rs'] for p in self.players)
def getMemoryValue(self, player, addr):
if addr < 0 or addr >= len(self.memory): # Bounds check.
player.registers["rf"] = 5
return 0
return self.memory[addr]
def plantMemory(self, player, addr):
if addr < 0 or addr >= len(self.memory): # Bounds check.
player.registers["rf"] = 5
return
if self.getMemoryValue(player, addr) < 0:
self.fruit.remove(addr)
self.memory[addr] = -1
self.fruit.add(addr)
def setMemoryValue(self, player, addr, val):
if addr < 0 or addr >= len(self.memory): # Bounds check.
player.registers["rf"] = 5
return
# Update fruit.
if self.getMemoryValue(player, addr) < 0:
self.fruit.remove(addr)
# Set value and wrap if needed.
if val >= 0 and val < valLimit:
self.memory[addr] = val
elif val < 0:
self.memory[addr] = val % (valLimit - 1)
player.registers['rf'] = 3
elif val > valLimit:
self.memory[addr] = val % (valLimit - 1)
player.registers['rf'] = 4
def getRegister(self, player, reg):
val = 0
if reg in player.registers:
val = player.registers[reg]
elif reg in self.registers:
val = self.registers[reg]
return val
# Only used for player setting a register.
def setRegister(self, player, reg, val):
if reg == 'r0' or reg == 'r1' or reg == 'r2' or reg == 'r3':
if val > -valLimit and val < valLimit:
player.registers[reg] = val
elif val < -valLimit:
player.registers[reg] = val % (valLimit - 1)
player.registers['rf'] = 1
elif val > valLimit:
player.registers[reg] = val % (valLimit - 1)
player.registers['rf'] = 2
else:
player.registers['rf'] = 10
def getAddress(self, player, op):
addr = -1
if op.type == "INT":
addr = int(op.token)
elif op.type == "REG":
addr = self.getRegister(player, op.token)
return addr
def getValue(self, player, op):
val = -1
if op.type == "INT":
val = int(op.token)
elif op.type == "REG":
val = self.getRegister(player, op.token)
# Handle $ in front of values.
if op.prefixed:
val = self.getMemoryValue(player, val)
return val
def gotoLabel(self, player, label):
if label in player.labels:
player.next = player.labels[label] - 1 # We will add 1 at the end.
else:
player.registers['rf'] = 7
print(player.displayName + " failed goto " + label)
def run(self, player):
# Check if player's program has ended.
if player.next >= len(player.instructions):
return
inst = player.instructions[player.next]
op = inst.token
operands = inst.operands
dTicks = opcodes[op] # Keep track of ticks this run. Start with the instruction's tick cost.
player.registers['rf'] = 0 # Reset error flag
# For debugging:
#if player.displayName == "azh2":
# print(player.displayName + " " + op + " " + str(player.next) + " of " + str(len(player.instructions)))
if op == "harvest":
#print(player.displayName + " is harvesting!")
addr = self.getAddress(player, operands[0])
if self.getMemoryValue(player, addr) == -100:
self.setMemoryValue(player, addr, 0)
player.registers['rs'] = player.registers['rs'] + 5
else:
dTicks = dTicks + harvestError
player.registers['rf'] = 9
elif op == "plant":
#print(player.displayName + " is planting!")
if player.registers['rs'] > 0:
addr = self.getAddress(player, operands[0])
#self.setMemoryValue(player, addr, -1)
self.plantMemory(player, addr)
player.registers['rs'] = player.registers['rs'] - 1
else:
player.registers['rf'] = 8
elif op == "peek":
addr = self.getAddress(player, operands[1])
self.setRegister(player, operands[0].token, addr)
elif op == "poke":
val = self.getValue(player, operands[1])
addr = self.getAddress(player, operands[0])
self.setMemoryValue(player, addr, val)
elif op == "goto":
self.gotoLabel(player, operands[0].token)
elif op == "ifequal":
val1 = self.getValue(player, operands[0])
val2 = self.getValue(player, operands[1])
if val1 == val2:
self.gotoLabel(player, operands[2].token)
elif op == "ifless":
val1 = self.getValue(player, operands[0])
val2 = self.getValue(player, operands[1])
if val1 < val2:
self.gotoLabel(player, operands[2].token)
elif op == "ifmore":
val1 = self.getValue(player, operands[0])
val2 = self.getValue(player, operands[1])
if val1 > val2:
self.gotoLabel(player, operands[2].token)
elif op == "add":
val1 = self.getValue(player, operands[1])
val2 = self.getValue(player, operands[2])
self.setRegister(player, operands[0].token, val1+val2)
elif op == "sub":
val1 = self.getValue(player, operands[1])
val2 = self.getValue(player, operands[2])
self.setRegister(player, operands[0].token, val1-val2)
elif op == "mult":
val1 = self.getValue(player, operands[1])
val2 = self.getValue(player, operands[2])
self.setRegister(player, operands[0].token, val1*val2)
elif op == "div":
val1 = self.getValue(player, operands[1])
val2 = self.getValue(player, operands[2])
if val2 == 0:
player.registers['rf'] = 10
else:
self.setRegister(player, operands[0].token, val1//val2)
elif op == "mod":
val1 = self.getValue(player, operands[1])
val2 = self.getValue(player, operands[2])
self.setRegister(player, operands[0].token, val1%val2)
elif op == "random":
val1 = self.getValue(player, operands[1])
val2 = self.getValue(player, operands[2])
self.setRegister(player, operands[0].token, random.randint(val1, val2+1))
else: # Should never happen.
pass
# Add in time for using $ prefix
for o in operands:
if o.prefixed:
dTicks = dTicks + indirectTicks
self.ticks = self.ticks + dTicks
player.delay = dTicks # Set delay based on instruction ticks
player.next = player.next + 1 # Next instruction to be executed
|
408473
|
import esphome.codegen as cg
import esphome.config_validation as cv
from esphome.components import climate, sensor, time
from esphome.components.remote_base import CONF_TRANSMITTER_ID
from esphome.const import CONF_ID, CONF_TIME_ID, CONF_MAC_ADDRESS, \
ESP_PLATFORM_ESP32, \
UNIT_PERCENT, ICON_PERCENT
ESP_PLATFORMS = [ESP_PLATFORM_ESP32]
CONFLICTS_WITH = ['eq3_v1', 'esp32_ble_tracker']
DEPENDENCIES = ['time']
AUTO_LOAD = ['sensor', 'esp32_ble_clients']
CONF_VALVE = 'valve'
CONF_PIN = 'pin'
CONF_TEMP = 'temperature_sensor'
EQ3Climate = cg.global_ns.class_('EQ3Climate', climate.Climate, cg.PollingComponent)
CONFIG_SCHEMA = cv.All(climate.CLIMATE_SCHEMA.extend({
cv.GenerateID(): cv.declare_id(EQ3Climate),
cv.GenerateID(CONF_TIME_ID): cv.use_id(time.RealTimeClock),
cv.Required(CONF_MAC_ADDRESS): cv.mac_address,
cv.Optional(CONF_VALVE): sensor.sensor_schema(UNIT_PERCENT, ICON_PERCENT, 0),
cv.Optional(CONF_PIN): cv.string,
cv.Optional(CONF_TEMP): cv.use_id(sensor.Sensor)
}).extend(cv.polling_component_schema('4h')))
def to_code(config):
var = cg.new_Pvariable(config[CONF_ID])
yield cg.register_component(var, config)
yield climate.register_climate(var, config)
cg.add(var.set_address(config[CONF_MAC_ADDRESS].as_hex))
time_ = yield cg.get_variable(config[CONF_TIME_ID])
cg.add(var.set_time(time_))
if CONF_TEMP in config:
sens = yield cg.get_variable(config[CONF_TEMP])
cg.add(var.set_temperature_sensor(sens))
if CONF_VALVE in config:
sens = yield sensor.new_sensor(config[CONF_VALVE])
cg.add(var.set_valve(sens))
|
408475
|
from __future__ import division
import h5py
import pickle
from gwpy.table import EventTable
import numpy as np
from scipy import integrate, interpolate
import random
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import lal
import lalsimulation
from pylal import antenna, cosmography
import argparse
import time
from scipy.signal import filtfilt, butter
from scipy.stats import norm, chi
from scipy.optimize import brentq
import os
import sys
#import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator
from scipy.stats import norm
def parser():
"""Parses command line arguments"""
parser = argparse.ArgumentParser(prog='data_prep.py',
description='generates GW data for application of deep learning networks.')
# arguments for reading in a data file
parser.add_argument('-d', '--dataset', type=str, help='test set')
parser.add_argument('-c', '--cutoff_freq', default=20.0, type=float, help='cutoff frequency used to generate template bank')
parser.add_argument('-tb', '--temp-bank', type=str, help='template bank .xml file')
parser.add_argument('-f', '--fsample', type=int, default=8192, help='the sampling frequency (Hz)')
parser.add_argument('-T', '--Tobs', type=int, default=1, help='the observation duration (sec)')
parser.add_argument('-R', '--ROC', action='store_true', default=False,
help='plot ROC curve if false else save results')
parser.add_argument('-r', '--res', type=str, default=None, help='path to file with results from CNN')
parser.add_argument('-n', '--name', type=str, default=None, help='name for ROC plot or data')
parser.add_argument('-I', '--detectors', type=str, nargs='+', default=['H1'], help='the detectors to use')
parser.add_argument('-b', '--basename', type=str, default='test', help='output file path and basename.')
parser.add_argument('-z', '--seed', type=int, default=1, help='the random seed')
parser.add_argument('-w', '--wave-bank', type=bool, default=False, help='waveforms already generated? (e.g. True/False')
parser.add_argument('-wb', '--w-basename', type=str, default='test', help='location of waveform .pkl files')
return parser.parse_args()
def tukey(M,alpha=0.5):
"""
Tukey window code copied from scipy
"""
n = np.arange(0, M)
width = int(np.floor(alpha*(M-1)/2.0))
n1 = n[0:width+1]
n2 = n[width+1:M-width-1]
n3 = n[M-width-1:]
w1 = 0.5 * (1 + np.cos(np.pi * (-1 + 2.0*n1/alpha/(M-1))))
w2 = np.ones(n2.shape)
w3 = 0.5 * (1 + np.cos(np.pi * (-2.0/alpha + 1 + 2.0*n3/alpha/(M-1))))
w = np.concatenate((w1, w2, w3))
return np.array(w[:M])
def inner_alt(a, b, T_obs, fs, psd):
"""
Computes the noise weighted inner product in the frequency domain
Follows Babak et al Eq. 2 where one product is whitened and
the other is unwhitned.
"""
N = T_obs * fs
df = 1.0 / T_obs
dt = 1.0 / fs
win = tukey(N, alpha=1.0 / 8.0)
idx = np.argwhere(psd == 0.0)
psd[idx] = 1e300
af = np.fft.rfft(a * win) * dt
bf = np.fft.rfft(b * win) * dt
temp = 4.0 * np.real(np.sum((np.conj(af) * bf) / np.sqrt(psd))) * df
return temp
def inner_FD(a, b, T_obs, fs, psd):
"""
Computes the noise weighted inner product in the frequency domain
Follows Babak et al Eq. 2 assuming both products are unwhitened.
"""
N = T_obs * fs
df = 1.0 / T_obs
dt = 1.0 / fs
win = tukey(N, alpha=1.0 / 8.0)
idx = np.argwhere(psd == 0.0)
psd[idx] = 1e300
af = a * dt
bf = b * dt # originally multiplied dt by np.fft.rfft(b * win)
temp = 4.0 * np.real(np.sum((np.conj(af) * bf) / psd)) * df # was originally complex conjugate of af
return temp
def inner(a, b, T_obs, fs, psd):
"""
Computes the noise weighted inner product in the frequency domain
Follows Babak et al Eq. 2 assuming both products are unwhitened.
"""
N = T_obs * fs
df = 1.0 / T_obs
dt = 1.0 / fs
win = tukey(N, alpha=1.0 / 8.0)
idx = np.argwhere(psd == 0.0)
psd[idx] = 1e300
af = np.fft.rfft(a * win) * dt
bf = b * dt # originally multiplied dt by np.fft.rfft(b * win)
temp = 4.0 * np.real(np.sum((np.conj(af) * bf) / psd)) * df # was originally complex conjugate of af
return temp
def meas_snr(data, template_p, template_c, Tobs, fs, psd):
"""
Computes the measured SNR for a given template and dataset
Follows Babak et al Eq. 9
"""
a = inner(data, template_p, Tobs, fs, psd)
b = inner(data, template_c * 1.j, Tobs, fs, psd)
c = inner_FD(template_p, template_p, Tobs, fs, psd)
return np.sqrt((a * a + b * b) / c)
def whiten_data(data,duration,sample_rate,psd):
"""
Takes an input timeseries and whitens it according to a psd
"""
# FT the input timeseries
#win = tukey(duration*sample_rate,alpha=1.0/8.0)
xf = data.real #np.fft.rfft(data)
# deal with undefined PDS bins and normalise
idx = np.argwhere(psd==0.0)
psd[idx] = 1e300
xf /= (np.sqrt(0.5*psd*sample_rate))
# Detrend the data: no DC component.
xf[0] = 0.0
# Return to time domain.
#x = np.fft.irfft(xf)
# Done.
return xf
def whiten_data_losc(data, psd, fs):
"""
Whitens the data
Based on the LOSC tutorial code
"""
Nt = len(data)
dt = 1.0/fs
idx = np.argwhere(psd==0.0)
psd[idx] = 1e300
# whitening: transform to freq domain, divide by asd, then transform back,
# taking care to get normalization right.
hf = np.fft.rfft(data)
white_hf = hf / (np.sqrt(psd /dt/2.))
white_ht = np.fft.irfft(white_hf, n=Nt)
return white_ht
def looper(sig_data,tmp_bank,T_obs,fs,dets,psds,wpsds,basename,w_basename,f_low=20.0,wave_bank=False):
# define input parameters
N = T_obs * fs # the total number of time samples
dt = 1 / fs # the sampling time (sec)
amplitude_order = 0
phase_order = 7
approximant = lalsimulation.IMRPhenomD # waveform
ndet = len(dets) # number of detectors
dist = 1e6 * lal.PC_SI # put it as 1 MPc
# make waveforms for template bank
if wave_bank == False:
# loop over template bank params
for idx,w in enumerate(tmp_bank):
if idx == 0:
hp,hc,fmin = make_waveforms(w,dt,dist,fs,approximant,N,ndet,dets,psds,T_obs,f_low)
hp_bank = {idx:hp}
hc_bank = {idx:hc}
fmin_bank = {idx:fmin}
#if idx == 10:
# break
else:
hp_new,hc_new,fmin_new = make_waveforms(w,dt,dist,fs,approximant,N,ndet,dets,psds,T_obs,f_low)
hp_bank.update({idx:hp_new})
hc_bank.update({idx:hc_new})
fmin_bank.update({idx:fmin_new})
# dump contents of hp and hc banks to pickle file
pickle_hp = open("%shp.pkl" % basename,"wb")
pickle.dump(hp_bank, pickle_hp)
pickle_hp.close()
pickle_hc = open("%shc.pkl" % basename,"wb")
pickle.dump(hc_bank, pickle_hc)
pickle_hc.close()
pickle_fmin = open("%sfmin.pkl" % basename,"wb")
pickle.dump(fmin_bank, pickle_fmin)
pickle_fmin.close()
hp = hp_bank
hc = hc_bank
# load waveforms if already made
else:
# load hplus and hcross pickle file
pickle_hp = open("%shp.pkl" % w_basename,"rb")
hp = pickle.load(pickle_hp)
pickle_hc = open("%shc.pkl" % w_basename,"rb")
hc = pickle.load(pickle_hc)
pickle_fmin = open("%sfmin.pkl" % w_basename,"rb")
fmin_bank = pickle.load(pickle_fmin)
# loop over test signals
# not setup to do multi detector network yet
# If you're reading this code, I'm sorry but ...
# welcome to the 7th circle of hell.
for det,psd,wpsd in zip(dets,psds,wpsds):
sig_match_rho = []
hp_hc_wvidx = []
chi_rho = []
noise = sig_data[0][sig_data[1]==0]
signal = sig_data[0][sig_data[1]==1]
chi_bool = False
if chi_bool == True:
#psd_wht = gen_psd(fs, 1, op='AdvDesign', det='H1')
count = 0
for idx in xrange(sig_data[0].shape[0]):
if sig_data[1][idx] == 0:
# whitened first template
h_idx = random.choice(hp.keys())
#hp_1_wht = chris_whiten_data(hp[h_idx], T_obs, fs, psd.data.data, flag='fd')
#hc_1_wht = chris_whiten_data(hc[h_idx], T_obs, fs, psd.data.data, flag='fd')
# calculate chi distribution. For testing purposes only!
#chi_rho.append(meas_snr(sig_data[0][idx][0], hp_1_wht, hc_1_wht, T_obs, fs, wpsd))
chi_rho.append(chris_snr_ts(sig_data[0][idx],hp[h_idx],hc[h_idx],T_obs,fs,wpsd,fmin_bank[h_idx],flag='fd')[0][int(N/2)])
count+=1
print '{}: Chi Rho for signal {} = {}'.format(time.asctime(),idx,chi_rho[-1])
# save list of chi rho for test purposes only
pickle_out = open("%schirho_values.pickle" % basename, "wb")
pickle.dump(chi_rho, pickle_out)
pickle_out.close()
# this loop defines how many signals you are looping over
#psd_wht = gen_psd(fs, 5, op='AdvDesign', det='H1')
for i in xrange(sig_data[0].shape[0]):
#for i in range(1000):
rho = -np.inf
if i == 2:
for j, M in enumerate(hp):
if j ==2487:
# compute the max(SNR) of this template
#hp_0_wht = chris_whiten_data(hp[j], T_obs, fs, psd.data.data, flag='fd')
#hc_1_wht = chris_whiten_data(hc[j], T_obs, fs, psd.data.data, flag='fd')
#max_rho = max(snr_ts(sig_data[0][i],hp_1_wht,hc_1_wht,T_obs,fs,wpsd)[0])
max_rho = max(chris_snr_ts(sig_data[0][i],hp[j],hc[j],T_obs,fs,wpsd,fmin_bank[j],flag='fd')[0]) #[int(fs*1.245):int(fs*1.455)]) #had [0] here
# check if max(SNR) greater than rho
if max_rho > rho:
rho = max_rho
hphcidx = j
#hphcidx = [hp_new,hc_new]
#if rho > 13:
# print fmin_bank[j]
# plt.plot(hp[j])
# plt.savefig('/home/hunter.gabbard/public_html/CBC/dl_match/test/hp.png')
# plt.close()
# sys.exit()
print '{}: Max(rho) for signal {} type {} = {}'.format(time.asctime(),i,sig_data[1][i],rho)
print '{}: Waveform idx for signal {} = {}'.format(time.asctime(),i,hphcidx)
# store max snr and index of hp/hc waveforms
sig_match_rho.append(rho)
#hp_hc_wvidx.append(hphcidx)
return np.array(sig_match_rho), np.array(chi_rho)
def _len_guards(M):
"""Handle small or incorrect window lengths"""
if int(M) != M or M < 0:
raise ValueError('Window length M must be a non-negative integer')
return M <= 1
def _extend(M, sym):
"""Extend window by 1 sample if needed for DFT-even symmetry"""
if not sym:
return M + 1, True
else:
return M, False
def _truncate(w, needed):
"""Truncate window by 1 sample if needed for DFT-even symmetry"""
if needed:
return w[:-1]
else:
return w
def get_snr(data,T_obs,fs,psd):
"""
computes the snr of a signal in unit variance time domain noise
"""
N = T_obs*fs
df = 1.0/T_obs
dt = 1.0/fs
idx = np.argwhere(psd==0.0)
psd[idx] = 1e300
xf = np.fft.rfft(data)*dt
SNRsq = 4.0*np.sum((np.abs(xf)**2)/psd)*df
return np.sqrt(SNRsq)
def chris_whiten_data(data,duration,sample_rate,psd,flag='td'):
"""
Takes an input timeseries and whitens it according to a psd
"""
if flag=='td':
# FT the input timeseries - window first
win = tukey(duration*sample_rate,alpha=1.0/8.0)
xf = np.fft.rfft(win*data)
else:
xf = data
# deal with undefined PDS bins and normalise
#idx = np.argwhere(psd>0.0)
#invpsd = np.zeros(psd.size)
#invpsd[idx] = 1.0/psd[idx]
#xf *= np.sqrt(2.0*invpsd/sample_rate)
# deal with undefined PDS bins and normalise
idx = np.argwhere(psd==0.0)
psd[idx] = 1e300
xf /= (np.sqrt(0.5*psd*sample_rate))
# Detrend the data: no DC component.
xf[0] = 0.0
if flag=='td':
# Return to time domain.
x = np.fft.irfft(xf)
return x
else:
return xf
def chris_snr_ts(data,template_p,template_c,Tobs,fs,psd,fmin,flag='td'):
"""
Computes the SNR timeseries given a timeseries and template
"""
N = Tobs*fs
df = 1.0/Tobs
dt = 1.0/fs
fidx = int(fmin/df)
win = tukey(N,alpha=1.0/8.0)
idx = np.argwhere(psd==0.0)
psd[idx] = 1e300
freqs = np.fft.fftfreq(N,dt)
oldfreqs = df*np.arange(N//2 + 1)
intpsd = np.interp(np.abs(freqs),oldfreqs,psd)
idx = np.argwhere(intpsd==0.0)
intpsd[idx] = 1e300
idx = np.argwhere(np.isnan(intpsd))
intpsd[idx] = 1e300
if flag=='td':
# make complex template
temp = template_p + template_c*1.j
ftemp = np.fft.fft(temp)*dt
# debug
plt.plot(ftemp)
plt.savefig('/home/hunter.gabbard/public_html/CBC/dl_match/test/template66_td_sig1.png')
plt.close()
sys.exit()
else:
# same as fft(temp_p) + i*fft(temp_c)
temp_p = np.hstack([template_p,np.conj((template_p[::-1])[1:-1])])
temp_c = np.hstack([template_c,np.conj((template_c[::-1])[1:-1])])
ftemp = temp_p + 1.j*temp_c
# fill negative frequencies - only set up to do N=even
#rev = temp[::-1]
#ftemp = np.hstack([temp,np.conj(rev[1:-1])])
ftemp[:fidx] = 0.0
ftemp[-fidx:] = 0.0
# FFT data
#print np.var(data*win)
#plt.plot((data*win)[0])
#plt.savefig('/home/hunter.gabbard/public_html/CBC/dl_match/test/template2487_ts_sig2.png')
#plt.close()
#sys.exit()
fdata = np.fft.fft(data*win)*dt
z = 4.0*np.fft.ifft(fdata*np.conj(ftemp)/intpsd)*df*N
s = 4.0*np.sum(np.abs(ftemp)**2/intpsd)*df
return np.abs(z)/np.sqrt(s)
def snr_ts(data, template_p, template_c, Tobs, fs, psd):
"""
Computes the SNR for each time step
Based on the LOSC tutorial code
"""
Nyq = fs / 2.
N = Tobs * fs
N_nyq = Tobs * Nyq
df = 1.0 / Tobs
dt = 1.0 / fs
dt_nyq = 1.0 / Nyq
temp = template_p + template_c * 1.j # didn't have dt before
dwindow = tukey(N, alpha=1.0 / 8.0)
# dwindow = np.ones(temp.size)
idx = np.argwhere(psd == 0.0)
psd[idx] = 1e300
# Take the Fourier Transform (FFT) of the data and the template (with dwindow)
data_fft = np.fft.fft(data * dwindow) * dt
#template_fft = np.fft.fft(temp * dwindow) * dt
# use nyquist for fs
freqs = np.fft.fftfreq(N, dt)
oldfreqs = df * np.arange(N // 2 + 1)
intpsd = np.interp(np.abs(freqs), oldfreqs, psd)
idx = np.argwhere(intpsd == 0.0)
intpsd[idx] = 1e300
idx = np.argwhere(np.isnan(intpsd))
intpsd[idx] = 1e300
# -- Calculate the matched filter output in the time domain:
# Multiply the Fourier Space template and data, and divide by the noise power in each frequency bin.
# Taking the Inverse Fourier Transform (IFFT) of the filter output puts it back in the time domain,
# so the result will be plotted as a function of time off-set between the template and the data:
optimal = data_fft * temp.conjugate() / intpsd # used to be template_fft.conj()
optimal_time = 2 * np.fft.ifft(optimal) * fs
# -- Normalize the matched filter output:
# Normalize the matched filter output so that we expect a value of 1 at times of just noise.
# Then, the peak of the matched filter output will tell us the signal-to-noise ratio (SNR) of the signal.
sigmasq = 1 * (temp * temp.conjugate() / intpsd).sum() * df # used to be template_fft.conj() and template_fft
sigma = np.sqrt(np.abs(sigmasq))
SNR_complex = optimal_time / sigma
return abs(SNR_complex)
def get_fmin(M,eta,dt):
"""
Compute the instantaneous frequency given a time till merger
"""
M_SI = M*lal.MSUN_SI
def dtchirp(f):
"""
The chirp time to 2nd PN order
"""
v = ((lal.G_SI/lal.C_SI**3)*M_SI*np.pi*f)**(1.0/3.0)
temp = (v**(-8.0) + ((743.0/252.0) + 11.0*eta/3.0)*v**(-6.0) -
(32*np.pi/5.0)*v**(-5.0) + ((3058673.0/508032.0) + 5429*eta/504.0 +
(617.0/72.0)*eta**2)*v**(-4.0))
return (5.0/(256.0*eta))*(lal.G_SI/lal.C_SI**3)*M_SI*temp - dt
# solve for the frequency between limits
fmin = brentq(dtchirp, 1.0, 2000.0, xtol=1e-6)
print '{}: signal enters segment at {} Hz'.format(time.asctime(),fmin)
return fmin
def make_waveforms(template,dt,dist,fs,approximant,N,ndet,dets,psds,T_obs,f_low=12.0):
""" make waveform"""
# define variables
template = list(template)
m12 = [template[0],template[1]]
eta = template[2]
mc = template[3]
N = T_obs * fs # the total number of time samples
dt = 1 / fs # the sampling time (sec)
approximant = lalsimulation.IMRPhenomD
f_high = fs/2.0
df = 1.0/T_obs
f_low = df*int(get_fmin(mc,eta,1.0)/df)
f_ref = f_low
dist = 1e6*lal.PC_SI # put it as 1 MPc
# generate iota
iota = np.arccos(-1.0 + 2.0*np.random.rand())
print '{}: selected bbh cos(inclination) = {}'.format(time.asctime(),np.cos(iota))
# generate polarisation angle
psi = 2.0*np.pi*np.random.rand()
print '{}: selected bbh polarisation = {}'.format(time.asctime(),psi)
# print parameters
print '{}: selected bbh mass 1 = {}'.format(time.asctime(),m12[0])
print '{}: selected bbh mass 2 = {}'.format(time.asctime(),m12[1])
print '{}: selected bbh eta = {}'.format(time.asctime(),eta)
# make waveform
hp, hc = lalsimulation.SimInspiralChooseFDWaveform(
m12[0] * lal.MSUN_SI, m12[1] * lal.MSUN_SI,
0, 0, 0, 0, 0, 0,
dist,
iota,
0, 0, 0, 0,
df,
f_low,f_high,
f_ref,
lal.CreateDict(),
approximant)
hp = hp.data.data
hc = hc.data.data
for psd in psds:
hp_1_wht = chris_whiten_data(hp, T_obs, fs, psd.data.data, flag='fd')
hc_1_wht = chris_whiten_data(hc, T_obs, fs, psd.data.data, flag='fd')
return hp_1_wht,hc_1_wht,get_fmin(mc,eta,1)
def gen_psd(fs, T_obs, op='AdvDesign', det='H1'):
"""
generates noise for a variety of different detectors
"""
N = T_obs * fs # the total number of time samples
dt = 1 / fs # the sampling time (sec)
df = 1 / T_obs # the frequency resolution
psd = lal.CreateREAL8FrequencySeries(None, lal.LIGOTimeGPS(0), 0.0, df, lal.HertzUnit, N // 2 + 1)
if det == 'H1' or det == 'L1':
if op == 'AdvDesign':
lalsimulation.SimNoisePSDAdVDesignSensitivityP1200087(psd, 10.0)
elif op == 'AdvEarlyLow':
lalsimulation.SimNoisePSDAdVEarlyLowSensitivityP1200087(psd, 10.0)
elif op == 'AdvEarlyHigh':
lalsimulation.SimNoisePSDAdVEarlyHighSensitivityP1200087(psd, 10.0)
elif op == 'AdvMidLow':
lalsimulation.SimNoisePSDAdVMidLowSensitivityP1200087(psd, 10.0)
elif op == 'AdvMidHigh':
lalsimulation.SimNoisePSDAdVMidHighSensitivityP1200087(psd, 10.0)
elif op == 'AdvLateLow':
lalsimulation.SimNoisePSDAdVLateLowSensitivityP1200087(psd, 10.0)
elif op == 'AdvLateHigh':
lalsimulation.SimNoisePSDAdVLateHighSensitivityP1200087(psd, 10.0)
else:
print 'unknown noise option'
exit(1)
else:
print 'unknown detector - will add Virgo soon'
exit(1)
return psd
def load_data(initial_dataset):
# get core name of dataset
#name1 = initial_dataset.split('_0')[0]
#name2 = initial_dataset.split('_0')[1]
print('Using data for: {0}'.format(initial_dataset))
#load in dataset 0
with open(initial_dataset, 'rb') as rfp:
base_test_set = pickle.load(rfp)
return base_test_set
def main():
# get the command line args
args = parser()
np.random.seed(args.seed)
# set path to file
cur_path = os.path.dirname(__file__)
new_path = os.path.relpath(args.dataset, cur_path)
# load dataset
data = load_data(new_path)
# redefine things for conciseness
Tobs = args.Tobs # observation time
fs = args.fsample # sampling frequency
dets = args.detectors # detectors
ndet = len(dets) # number of detectors
N = Tobs * fs # the total number of time samples
n = N // 2 + 1 # the number of frequency bins
tmp_bank = args.temp_bank # template bank file
f_low = args.cutoff_freq # cutoff frequency used in template generation
psds = [gen_psd(fs, Tobs, op='AdvDesign', det=d) for d in args.detectors]
wpsds = (2.0 / fs) * np.ones((ndet, n)) # define effective PSD for whited data
# load template bank
tmp_bank = np.array(EventTable.read(tmp_bank,
format='ligolw.sngl_inspiral', columns=['mass1','mass2','eta','mchirp']))
# loop over stuff
output,chi_test = looper(data,tmp_bank,Tobs,fs,dets,psds,wpsds,args.basename,args.w_basename,args.cutoff_freq,args.wave_bank)
chi_test = [chi_test,data[1]]
output = [output,data[1]]
# save list of rho for test signals and test noise
pickle_out = open("%srho_values.pickle" % args.basename, "wb")
pickle.dump(output, pickle_out)
pickle_out.close()
# save list of chi rho for test purposes only
pickle_out = open("%schirho_values.pickle" % args.basename, "wb")
pickle.dump(chi_test, pickle_out)
pickle_out.close()
if __name__ == "__main__":
main()
|
408487
|
import numpy as np
from . import logger
from duckietown_utils.expand_variables import expand_environment
import os
__all__ = [
'd8n_read_images_interval',
'd8n_read_all_images',
'd8n_get_all_images_topic',
]
def d8n_read_images_interval(filename, t0, t1):
"""
Reads all the RGB data from the bag,
in the interval [t0, t1], where t0 = 0 indicates
the first image.
"""
data = d8n_read_all_images(filename, t0, t1)
logger.info('Read %d images from %s.' % (len(data), filename))
timestamps = data['timestamp']
# normalize timestamps
first = data['timestamp'][0]
timestamps -= first
logger.info('Sequence has length %.2f seconds.' % timestamps[-1])
return data
def d8n_read_all_images(filename, t0=None, t1=None):
"""
Raises a ValueError if not data could be read.
Returns a numpy array.
data = d8n_read_all_images(bag)
print data.shape # (928,)
print data.dtype # [('timestamp', '<f8'), ('rgb', 'u1', (480, 640, 3))]
"""
import rosbag # @UnresolvedImport
filename = expand_environment(filename)
if not os.path.exists(filename):
msg = 'File does not exist: %r' % filename
raise ValueError(msg)
bag = rosbag.Bag(filename)
that_topic = get_image_topic(bag)
data = []
first_timestamp = None
with rosbag.Bag(filename, 'r') as bag:
for j, (topic, msg, t) in enumerate(bag.read_messages()):
if topic == that_topic:
float_time = t.to_sec()
if first_timestamp is None:
first_timestamp = float_time
rel_time = float_time - first_timestamp
if t0 is not None:
if rel_time < t0:
continue
if t1 is not None:
if rel_time > t1:
continue
rgb = numpy_from_ros_compressed(msg)
data.append({'timestamp': float_time, 'rgb': rgb})
if j % 10 == 0:
print('Read %d images from topic %s' % (j, topic))
print('Returned %d images' % len(data))
if not data:
raise ValueError('no data found')
H, W, _ = rgb.shape # (480, 640, 3)
print('Detected image shape: %s x %s' % (W, H))
n = len(data)
dtype = [
('timestamp', 'float'),
('rgb', 'uint8', (H, W, 3)),
]
x = np.zeros((n,), dtype=dtype)
for i, v in enumerate(data):
x[i]['timestamp'] = v['timestamp']
x[i]['rgb'][:] = v['rgb']
return x
def d8n_get_all_images_topic(bag_filename):
""" Returns the (name, type) of all topics that look like images """
import rosbag # @UnresolvedImport
bag = rosbag.Bag(bag_filename)
tat = bag.get_type_and_topic_info()
# TypesAndTopicsTuple(msg_types={'sensor_msgs/Image': '060021388200f6f0f447d0fcd9c64743', 'dynamic_reconfigure/ConfigDescript
# ion': '757ce9d44ba8ddd801bb30bc456f946f', 'diagnostic_msgs/DiagnosticArray': '60810da900de1dd6ddd437c3503511da', 'rosgraph_
# msgs/Log': 'acffd30cd6b6de30f120938c17c593fb', 'sensor_msgs/CameraInfo': 'c9a58c1b0b154e0e6da7578cb991d214', 'duckietown_ms
# gs/CarControl': '8cc92f3e13698e26d1f14ab2f75ce13b', 'theora_image_transport/Packet': '33ac4e14a7cff32e7e0d65f18bb410f3', 'd
# ynamic_reconfigure/Config': '958f16a05573709014982821e6822580', 'sensor_msgs/Joy': '5a9ea5f83505693b71e785041e67a8bb'}, top
# ics={'/rosberrypi_cam/image_raw/theora/parameter_updates': TopicTuple(msg_type='dynamic_reconfigure/Config', message_count=
# 1, connections=1, frequency=None), '/rosberrypi_cam/image_raw/theora': TopicTuple(msg_type='theora_image_transport/Packet',
# message_count=655, connections=1, frequency=8.25919467858131), '/rosout': TopicTuple(msg_type='rosgraph_msgs/Log', message
# _count=13, connections=6, frequency=23763.762039660058), '/rosberrypi_cam/camera_info': TopicTuple(msg_type='sensor_msgs/Ca
# meraInfo', message_count=649, connections=1, frequency=8.231283478868388), '/rosberrypi_cam/image_raw/theora/parameter_desc
# riptions': TopicTuple(msg_type='dynamic_reconfigure/ConfigDescription', message_count=1, connections=1, frequency=None), '/
# joy': TopicTuple(msg_type='sensor_msgs/Joy', message_count=1512, connections=1, frequency=182.16304017372423), '/rosout_agg
# ': TopicTuple(msg_type='rosgraph_msgs/Log', message_count=2, connections=1, frequency=12122.265895953757), '/diagnostics':
# TopicTuple(msg_type='diagnostic_msgs/DiagnosticArray', message_count=65, connections=1, frequency=0.9251713284731169), '/ca
# r_supervisor/car_control': TopicTuple(msg_type='duckietown_msgs/CarControl', message_count=3886, connections=1, frequency=5
# 0.09799097011538), '/joy_mapper/joy_control': TopicTuple(msg_type='duckietown_msgs/CarControl', message_count=3881, connect
# ions=1, frequency=50.10517261975869), '/rosberrypi_cam/image_raw': TopicTuple(msg_type='sensor_msgs/Image', message_count=6
# 45, connections=1, frequency=7.711386340215899)}
consider_images = [
'sensor_msgs/Image',
'sensor_msgs/CompressedImage',
]
all_types = set()
found = []
topics = tat.topics
for t,v in topics.items():
msg_type = v.msg_type
all_types.add(msg_type)
message_count = v.message_count
if msg_type in consider_images:
# quick fix: ignore image_raw if we have image_compressed version
if 'raw' in t:
other = t.replace('raw', 'compressed')
if other in topics:
continue
found.append((t,msg_type))
print('all_types: %s' % all_types)
print('found: %s' % found)
return found
def get_image_topic(bag):
""" Returns the name of the topic for the main camera """
topics = bag.get_type_and_topic_info()[1].keys()
for t in topics:
if 'camera_node/image/compressed' in t:
return t
msg = 'Cannot find the topic: %s' % topics
raise ValueError(msg)
def numpy_from_ros_compressed(msg):
if 'CompressedImage' in msg.__class__.__name__:
return rgb_from_pil(pil_from_CompressedImage(msg))
assert False, msg.__class__.__name__
def pil_from_CompressedImage(msg):
from PIL import ImageFile # @UnresolvedImport
parser = ImageFile.Parser()
parser.feed(msg.data)
res = parser.close()
return res
def rgb_from_pil(im):
return np.asarray(im).astype(np.uint8)
|
408497
|
import numpy as np
import torch
import torch.jit as jit
import torch.nn as nn
import torch.nn.functional as F
from habitat_baselines.rl.models.rnn_state_encoder import RNNStateEncoder
def _unflatten_helper(x, t: int, n: int):
sz = list(x.size())
new_sz = [t, n] + sz[1:]
return x.view(new_sz)
class RCMStateEncoder(RNNStateEncoder):
r"""A cross-modal state encoder akin to the reinforced cross-modal
matching state encoder (https://arxiv.org/abs/1811.10092).
"""
def __init__(
self,
rgb_input_channels: int,
depth_input_channels: int,
hidden_size: int,
action_embedding_size: int,
num_layers: int = 1,
rnn_type: str = "GRU",
):
nn.Module.__init__(self)
self._num_recurrent_layers = num_layers
self._rnn_type = rnn_type
self.rgb_kv = nn.Conv1d(rgb_input_channels, hidden_size, kernel_size=1)
self.depth_kv = nn.Conv1d(depth_input_channels, hidden_size, kernel_size=1)
self.q_net = nn.Linear(hidden_size, hidden_size // 2)
self.register_buffer("_scale", torch.tensor(1.0 / ((hidden_size // 2) ** 0.5)))
self.rnn = getattr(nn, rnn_type)(
input_size=hidden_size + action_embedding_size,
hidden_size=hidden_size,
num_layers=num_layers,
)
self.layer_init()
@property
def num_recurrent_layers(self):
return super().num_recurrent_layers + 1
def layer_init(self):
for param in self.parameters():
if param.dim() > 1:
nn.init.orthogonal_(param)
else:
nn.init.zeros_(param)
def _attn(self, q, k, v):
logits = torch.einsum("nc, nci -> ni", q, k)
attn = F.softmax(logits * self._scale, dim=1)
return torch.einsum("ni, nci -> nc", attn, v)
def forward(
self, rgb_embedding, depth_embedding, prev_actions, hidden_states, masks
):
# x is a (T, N, -1) tensor flattened to (T * N, -1)
n = hidden_states.size(1)
t = int(rgb_embedding.size(0) / n)
hidden_states, last_output = hidden_states[0:-1], hidden_states[-1]
hidden_states = self._unpack_hidden(hidden_states)
rgb_embedding = self.rgb_kv(rgb_embedding)
depth_embedding = self.depth_kv(depth_embedding)
# unflatten
rgb_embedding = _unflatten_helper(rgb_embedding, t, n)
depth_embedding = _unflatten_helper(depth_embedding, t, n)
masks = masks.view(t, n)
prev_actions = prev_actions.view(t, n, -1)
outputs = []
for it in range(t):
rgb = rgb_embedding[it]
depth = depth_embedding[it]
rgb_k, rgb_v = torch.chunk(rgb, chunks=2, dim=1)
depth_k, depth_v = torch.chunk(depth, chunks=2, dim=1)
last_output = last_output * masks[it].view(n, 1)
q = self.q_net(last_output)
rgb_attn = self._attn(q, rgb_k, rgb_v)
depth_attn = self._attn(q, depth_k, depth_v)
rnn_input = torch.cat([rgb_attn, depth_attn, prev_actions[it]], dim=1).view(
1, n, -1
)
last_output, hidden_states = self.rnn(
rnn_input, self._mask_hidden(hidden_states, masks[it].view(1, n, 1))
)
last_output = last_output.view(n, -1)
outputs.append(last_output)
hidden_states = self._pack_hidden(hidden_states)
hidden_states = torch.cat([hidden_states, last_output.unsqueeze(0)], dim=0)
return torch.stack(outputs, dim=0).view(t * n, -1), hidden_states
if __name__ == "__main__":
rcm = RCMStateEncoder(2048, 1024, 256, 32)
rgb_input = torch.randn(2 * 4, 2048, 7 * 7)
depth_input = torch.randn(2 * 4, 1024, 4 * 4)
prev_actions = torch.randn(2 * 4, 32)
masks = torch.randint(1, size=(2 * 4,)).float()
hidden_states = torch.randn(rcm.num_recurrent_layers, 4, 256)
rcm(rgb_input, depth_input, prev_actions, hidden_states, masks)
|
408508
|
from __future__ import division
import numpy as np
import nibabel as nib
import copy
import time
import configparser
from skimage.transform import resize
from scipy.ndimage import measurements
import tensorflow as tf
from glob import glob
import re
import SimpleITK as sitk
import random
from keras_preprocessing.image import *
import cv2 as cv
import colorsys
from skimage.measure import find_contours
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
from collections import Counter
# construct a iterator for batch generation
class BatchGenerator(Iterator):
'''
get an iteratator for generating(batch_x, batch_y)
'''
def __init__(
self,
batch_size,
shuffle,
seed,
volume_path,
modalities,
resize_r,
rename_map,
patch_dim,
augmentation):
self.batch_size = batch_size
self.volume_path = volume_path
self.modalities = modalities
self.resize_ratio = resize_r
self.rename_map = rename_map
self.file_list = self._get_img_info()
self.total_num = len(self.file_list)
self.patch_dim = patch_dim
# self.rot_flag = rot_flag
self.augmentation = augmentation
self.image_shape = (patch_dim, patch_dim, patch_dim) + (modalities,)
self.label_shape = (patch_dim, patch_dim, patch_dim)
super(
BatchGenerator,
self).__init__(
n=self.total_num,
batch_size=batch_size,
shuffle=shuffle,
seed=seed)
def _get_img_info(self):
'''
this function read all files of specific directory, get the path list
:return:path list of all the volume files
'''
file_list = []
categories = os.listdir(self.volume_path)
for category in categories:
category_path = os.path.join(self.volume_path, category)
dir_list = os.listdir(category_path)
for dire in dir_list:
dire_lower = dire.lower()
if not dire_lower.startswith('brats'):
raise Exception("volume file exception!")
file_abs_path = os.path.join(category_path, dire)
single_file = {"path": file_abs_path, "category": category}
file_list.append(single_file)
return file_list
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(
(len(index_array),
) + self.image_shape,
dtype='float32')
batch_x2 = np.zeros(
(len(index_array),
) + self.label_shape+(1,),
dtype='float32')
batch_y = np.zeros(
(len(index_array),
) + self.label_shape,
dtype='int32')
batch_y_stage2 = np.zeros(
(len(index_array),
) + self.label_shape,
dtype='int32')
batch_y_stage3 = np.zeros(
(len(index_array),
) + self.label_shape,
dtype='int32')
for i, j in enumerate(index_array):
# data directory of a patient
single_dir_path = self.file_list[j]["path"]
img_data, img_data2, stage1_label_data, stage2_label, \
stage3_label, _ = self.load_volumes_label(single_dir_path, True)
rand_num = np.random.randint(self.total_num - 1, size=self.total_num)
matching_index = rand_num[0] if rand_num[0] != j else rand_num[-1]
# ready for histogram matching
img_data_matching, img_data_matching2, _, _, _, _ = self.load_volumes_label(
self.file_list[matching_index]["path"], True)
img_data_matching_cast = img_data_matching.astype("float32")
img_data_matching_cast2 = img_data_matching2.astype("float32")
# data augmentation
volume_list = [img_data[...,0], img_data[...,1], np.squeeze(img_data2, axis=-1),
stage1_label_data, stage2_label, stage3_label]
img_data_0, img_data_1, img_data2, stage1_label_data, \
stage2_label, stage3_label = self.data_augment_volume(*volume_list,
augmentation=self.augmentation)
img_data = np.stack((img_data_0,img_data_1), axis=-1)
img_data2 = np.expand_dims(img_data2, axis=-1)
# reduce background region
regions = get_brain_region(np.squeeze(img_data2, axis=-1))
img_data = img_data[regions[0]:regions[1], regions[2]:regions[3], regions[4]:regions[5], :]
img_data2 = img_data2[regions[0]:regions[1], regions[2]:regions[3], regions[4]:regions[5], :]
stage1_label_data = stage1_label_data[regions[0]:regions[1], regions[2]:regions[3], regions[4]:regions[5]]
stage2_label= stage2_label[regions[0]:regions[1], regions[2]:regions[3],
regions[4]:regions[5]]
stage3_label = stage3_label[regions[0]:regions[1], regions[2]:regions[3],
regions[4]:regions[5]]
# test whether using the histogram matching data augmentation method.(deprecated)
augment = False
if augment:
# histogram matching data augmentation
img_hist_match = Preprocessing.hist_match(
img_data.astype("float32"), img_data_matching_cast)
img_hist_match2 = Preprocessing.hist_match(img_data2.astype("float32"), img_data_matching_cast2)
# using B-spine interpolation for deformation (just like V-net did)
numcontrolpoints = 2
sigma = 15
else:
img_hist_match = img_data
img_hist_match2 = img_data2
# resize
resize_dim = (np.array(stage1_label_data.shape) * self.resize_ratio).astype('int')
img_data_resize = resize(img_hist_match.astype("float32"), resize_dim, order=1, preserve_range=True)
img_data2_resize = resize(img_hist_match2.astype("float32"), resize_dim, order=1, preserve_range=True)
stage1_label_resize = resize(stage1_label_data, resize_dim, order=0, preserve_range=True)
stage2_label_resize = resize(stage2_label, resize_dim, order=0, preserve_range=True)
stage3_label_resize = resize(stage3_label, resize_dim, order=0, preserve_range=True)
img_data_cast = img_data_resize.astype("float32")
img_data_cast2 = img_data2_resize.astype("float32")
label_data_cast = stage1_label_resize.astype('int32')
stage2_label_cast = stage2_label_resize.astype("int32")
stage3_label_cast = stage3_label_resize.astype("int32")
# normalization
img_norm = Preprocessing.Normalization(img_data_cast, axis=(0, 1, 2))
img_norm2 = Preprocessing.Normalization(img_data_cast2)
# randomly select a box anchor
l, w, h = label_data_cast.shape
l_rand = np.arange(l - self.patch_dim) # get a start point
w_rand = np.arange(w - self.patch_dim)
h_rand = np.arange(h - self.patch_dim)
np.random.shuffle(l_rand) # shuffle the start point series
np.random.shuffle(w_rand)
np.random.shuffle(h_rand)
pos = np.array([l_rand[0], w_rand[0], h_rand[0]]) # get the start point
# crop the volume to get the same size for the network
img_temp = copy.deepcopy(img_norm[pos[0]:pos[0] +
self.patch_dim, pos[1]:pos[1] +
self.patch_dim, pos[2]:pos[2] +
self.patch_dim, :])
img_temp2 = copy.deepcopy(img_norm2[pos[0]:pos[0] +
self.patch_dim, pos[1]:pos[1] +
self.patch_dim, pos[2]:pos[2] +
self.patch_dim, :])
# crop the label just like the volume data
label_temp = copy.deepcopy(
label_data_cast[pos[0]:pos[0] + self.patch_dim, pos[1]:pos[1] + self.patch_dim, pos[2]:pos[2] + self.patch_dim])
stage2_label_temp = copy.deepcopy(stage2_label_cast[pos[0]:pos[0] + self.patch_dim, pos[1]:pos[1] + self.patch_dim, pos[2]:pos[2] + self.patch_dim])
stage3_label_temp = copy.deepcopy(stage3_label_cast[pos[0]:pos[0] + self.patch_dim, pos[1]:pos[1] + self.patch_dim, pos[2]:pos[2] + self.patch_dim])
# get the batch data
batch_x[i, :, :, :, :] = img_temp
batch_x2[i, :, :, :, :] = img_temp2
batch_y[i, :, :, :] = label_temp
batch_y_stage2[i,:,:,:] = stage2_label_temp
batch_y_stage3[i,:,:,:] = stage3_label_temp
return batch_x, batch_x2, batch_y, batch_y_stage2, batch_y_stage3
# load volumes and the GT
def load_volumes_label(self, src_path, rename_map_flag):
'''
this function get the volume data and gt from the giving path
:param src_path: directory path of a patient
:return: GT and the volume data(width,height, slice, modality)
'''
# rename_map = [0, 1, 2, 4]
volume_list, seg_dict = self.data_dict_construct(src_path)
# assert len(volume_list) == 4
# assert seg_dict["mod"] == "seg"
if seg_dict["mod"] == "seg":
label_nib_data = nib.load(seg_dict["path"])
label = label_nib_data.get_data().copy()
# label = nib.load(seg_dict["path"]).get_data().copy()
# resolve the issue from resizing label, we first undertake binarization and then resize
stage1_label_data = np.zeros(label.shape, dtype='int32')
stage2_label_data = np.zeros(label.shape, dtype='int32')
stage3_label_data = np.zeros(label.shape, dtype='int32')
if rename_map_flag:
for i in range(len(self.rename_map)):
if i > 0:
stage1_label_data[label == self.rename_map[i]] = 1
else:
continue
# Cascaded structure,stage2,stage3 label prepare
stage2_label_data[label == 1] = 1
stage2_label_data[label == 4] = 1
stage3_label_data[label == 1] = 1
else:
stage1_label_data = copy.deepcopy(label).astype('int16')
stage2_label_data = copy.deepcopy(label).astype('int16')
stage3_label_data = copy.deepcopy(label).astype('int16')
else:
stage1_label_data = []
stage2_label_data = []
stage3_label_data = []
label_nib_data = []
img_all_modality = []
# order of the sequences [flair, T1, T1ce, T2]
for i in range(len(volume_list)):
volume = nib.load(volume_list[i]["path"])
img = volume.get_data().copy()
# resized_img = resize(img, resize_dim, order=1, preserve_range=True)
img_all_modality.append(img)
# choose different modalities for the network
if self.modalities == 4:
# all the modalities
img_data = img_all_modality
elif self.modalities == 3:
# select T1ce T1 Flair modalities
img_data = [img_all_modality[0], img_all_modality[2], img_all_modality[3]]
elif self.modalities == 2:
# two modalities
# choose T2 and Flair
img_data = [img_all_modality[0], img_all_modality[3]]
else:
# one modality
img_data = img_all_modality[0]
img_data = np.expand_dims(img_data, axis=0)
# input volume data
img_data2 = np.expand_dims(img_all_modality[2], axis=0)
img_array2 = np.array(img_data2, "float32").transpose((1,2,3,0))
# list to ndarray
img_array = np.array(img_data, "float32").transpose((1, 2, 3, 0))
return img_array, img_array2, stage1_label_data, stage2_label_data, stage3_label_data, volume
# construct data dict
def data_dict_construct(self, path):
'''
this function get the list of dictionary of the patients
:param path: path of the patient data
:return: list of dictionary including the path and the modality
'''
# list the image volumes and GT
files = os.listdir(path)
nii_list = sorted(glob('{}/*.nii.gz'.format(path)))
re_style = r'[\-\_\.]+'
volumn_list = []
seg_dict = {"mod": "None"}
for count, nii in enumerate(nii_list):
# modality mapping [seg, flair, T1, T1ce, T2]
mapping = [0, 1, 2, 3, 4]
file = os.path.basename(nii)
split_text = re.split(re_style, file)
modality = split_text[-3]
assert modality in ["flair", "seg", "t1", "t2", "t1ce"]
if modality == "seg":
data_dict = {"mod": modality, "path": nii, "count": mapping[0]}
elif modality == "flair":
data_dict = {"mod": modality, "path": nii, "count": mapping[1]}
elif modality == "t1":
data_dict = {"mod": modality, "path": nii, "count": mapping[2]}
elif modality == "t1ce":
data_dict = {"mod": modality, "path": nii, "count": mapping[3]}
else:
data_dict = {"mod": modality, "path": nii, "count": mapping[4]}
if data_dict["mod"] != "seg":
volumn_list.append(data_dict)
else:
seg_dict = {"mod": modality, "path": nii, "count": mapping[0]}
# sort the modalites in the list
volumn_list.sort(key=lambda x: x["count"])
return volumn_list, seg_dict
def data_augment_volume(self, *datalist , augmentation):
# first get the volume data from the data list
image1, image2, image3, mask1, mask2, mask3 = datalist
# Augmentation
# This requires the imgaug lib (https://github.com/aleju/imgaug)
if augmentation:
import imgaug
# Augmenters that are safe to apply to masks
# Some, such as Affine, have settings that make them unsafe, so always
# test your augmentation on masks
MASK_AUGMENTERS = ["Sequential", "SomeOf", "OneOf", "Sometimes",
"Fliplr", "Flipud", "CropAndPad",
"Affine", "PiecewiseAffine"]
def hook(images, augmenter, parents, default):
"""Determines which augmenters to apply to masks."""
return augmenter.__class__.__name__ in MASK_AUGMENTERS
# Store shapes before augmentation to compare
image1_shape = image1.shape
mask1_shape = mask1.shape
image2_shape = image2.shape
mask2_shape = mask2.shape
image3_shape = image3.shape
mask3_shape = mask3.shape
# Make augmenters deterministic to apply similarly to images and masks
det = augmentation.to_deterministic()
# image should be uint8!!
image1 = det.augment_image(image1)
image2 = det.augment_image(image2)
image3 = det.augment_image(image3)
# Change mask to np.uint8 because imgaug doesn't support np.bool
mask1 = det.augment_image(mask1.astype(np.uint8),
hooks=imgaug.HooksImages(activator=hook))
mask2 = det.augment_image(mask2.astype(np.uint8),
hooks=imgaug.HooksImages(activator=hook))
mask3 = det.augment_image(mask3.astype(np.uint8),
hooks=imgaug.HooksImages(activator=hook))
# Verify that shapes didn't change
assert image1.shape == image1_shape, "Augmentation shouldn't change image size"
assert mask1.shape == mask1_shape, "Augmentation shouldn't change mask size"
assert image2.shape == image2_shape, "Augmentation shouldn't change image size"
assert mask2.shape == mask2_shape, "Augmentation shouldn't change mask size"
assert image3.shape == image3_shape, "Augmentation shouldn't change image size"
assert mask3.shape == mask3_shape, "Augmentation shouldn't change mask size"
# Change mask back to bool
# masks = masks.astype(np.bool)
return image1,image2, image3, mask1, mask2, mask3
def data_augment(self, image, mask, augmentation):
# Augmentation
# This requires the imgaug lib (https://github.com/aleju/imgaug)
if augmentation:
import imgaug
# Augmenters that are safe to apply to masks
# Some, such as Affine, have settings that make them unsafe, so always
# test your augmentation on masks
MASK_AUGMENTERS = ["Sequential", "SomeOf", "OneOf", "Sometimes",
"Fliplr", "Flipud", "CropAndPad",
"Affine", "PiecewiseAffine"]
def hook(images, augmenter, parents, default):
"""Determines which augmenters to apply to masks."""
return augmenter.__class__.__name__ in MASK_AUGMENTERS
# Store shapes before augmentation to compare
image_shape = image.shape
mask_shape = mask.shape
# Make augmenters deterministic to apply similarly to images and masks
det = augmentation.to_deterministic()
# image should be uint8!!
images = det.augment_image(image)
# Change mask to np.uint8 because imgaug doesn't support np.bool
masks = det.augment_image(mask.astype(np.uint8),
hooks=imgaug.HooksImages(activator=hook))
# Verify that shapes didn't change
assert images.shape == image_shape, "Augmentation shouldn't change image size"
assert masks.shape == mask_shape, "Augmentation shouldn't change mask size"
# Change mask back to bool
# masks = masks.astype(np.bool)
return image, mask
def get_brain_region(volume_data):
# volume = nib.load(volume_path)
# volume_data = volume.get_data()
# get the brain region
indice_list = np.where(volume_data > 0)
# calculate the min and max of the indice, here volume have 3 channels
channel_0_min = min(indice_list[0])
channel_0_max = max(indice_list[0])
channel_1_min = min(indice_list[1])
channel_1_max = max(indice_list[1])
channel_2_min = min(indice_list[2])
channel_2_max = max(indice_list[2])
brain_volume = volume_data[channel_0_min:channel_0_max, channel_1_min:channel_1_max,channel_2_min:channel_2_max]
return (channel_0_min, channel_0_max, channel_1_min, channel_1_max, channel_2_min, channel_2_max)
class Preprocessing(object):
def __init__(self):
pass
# N4 Bias Field Correction by simpleITK
@staticmethod
def N4BiasFieldCorrection(src_path, dst_path):
'''
This function carry out BiasFieldCorrection for the files in a specific directory
:param src_path: path of the source file
:param dst_path: path of the target file
:return:
'''
print("N4 bias correction runs.")
inputImage = sitk.ReadImage(src_path)
maskImage = sitk.OtsuThreshold(inputImage, 0, 1, 200)
sitk.WriteImage(maskImage, dst_path)
inputImage = sitk.Cast(inputImage, sitk.sitkFloat32)
corrector = sitk.N4BiasFieldCorrectionImageFilter()
# corrector.SetMaximumNumberOfIterations(10)
output = corrector.Execute(inputImage, maskImage)
sitk.WriteImage(output, dst_path)
print("Finished N4 Bias Field Correction.....")
# normalize the data(zero mean and unit variance)
@staticmethod
def Normalization(volume, axis=None):
mean = np.mean(volume, axis=axis)
std = np.std(volume, axis=axis)
norm_volume = (volume - mean) / std
return norm_volume
# data augmentation by histogram matching
@staticmethod
def hist_match(source, template):
"""
Adjust the pixel values of a grayscale image such that its histogram
matches that of a target image
Arguments:
-----------
source: np.ndarray
Image to transform; the histogram is computed over the flattened
array
template: np.ndarray
Template image; can have different dimensions to source(randomly choose from the training dataset)
Returns:
-----------
matched: np.ndarray
The transformed output image
"""
oldshape = source.shape
source = source.ravel()
template = template.ravel()
# get the set of unique pixel values and their corresponding indices and
# counts
s_values, bin_idx, s_counts = np.unique(source, return_inverse=True,
return_counts=True)
t_values, t_counts = np.unique(template, return_counts=True)
# take the cumsum of the counts and normalize by the number of pixels to
# get the empirical cumulative distribution functions for the source and
# template images (maps pixel value --> quantile)
s_quantiles = np.cumsum(s_counts).astype(np.float64)
s_quantiles /= s_quantiles[-1]
t_quantiles = np.cumsum(t_counts).astype(np.float64)
t_quantiles /= t_quantiles[-1]
# interpolate linearly to find the pixel values in the template image
# that correspond most closely to the quantiles in the source image
# interp_t_values = np.zeros_like(source,dtype=float)
interp_t_values = np.interp(s_quantiles, t_quantiles, t_values)
return interp_t_values[bin_idx].reshape(oldshape)
# data augmentation by deforming
@staticmethod
def produceRandomlyDeformedImage(image, label, numcontrolpoints, stdDef, seed=1):
'''
This function comes from V-net,deform a image by B-spine interpolation
:param image: images ,numpy array
:param label: labels,numpy array
:param numcontrolpoints: control point,B-spine interpolation parameters,take 2 for default
:param stdDef: Deviation,B-spine interpolation parameters,take 15 for default
:return: Deformed images and GT in numpy array
'''
sitkImage = sitk.GetImageFromArray(image, isVector=False)
sitklabel = sitk.GetImageFromArray(label, isVector=False)
transfromDomainMeshSize = [numcontrolpoints] * sitkImage.GetDimension()
tx = sitk.BSplineTransformInitializer(
sitkImage, transfromDomainMeshSize)
params = tx.GetParameters()
paramsNp = np.asarray(params, dtype=float)
# 设置种子值,确保多通道时两个通道变换程度一样
np.random.seed(seed)
paramsNp = paramsNp + np.random.randn(paramsNp.shape[0]) * stdDef
# remove z deformations! The resolution in z is too bad
paramsNp[0:int(len(params) / 3)] = 0
params = tuple(paramsNp)
tx.SetParameters(params)
resampler = sitk.ResampleImageFilter()
resampler.SetReferenceImage(sitkImage)
resampler.SetInterpolator(sitk.sitkLinear)
resampler.SetDefaultPixelValue(0)
resampler.SetTransform(tx)
resampler.SetDefaultPixelValue(0)
outimgsitk = resampler.Execute(sitkImage)
outlabsitk = resampler.Execute(sitklabel)
outimg = sitk.GetArrayFromImage(outimgsitk)
outimg = outimg.astype(dtype=np.float32)
outlbl = sitk.GetArrayFromImage(outlabsitk)
# outlbl = (outlbl > 0.5).astype(dtype=np.float32)
return outimg, outlbl
class Evaluation(object):
def __init__(self):
pass
# save 3d volume as slices
def save_slice_img(self, volume_path, output_path):
file_name = os.path.basename(volume_path)
output_dir = os.path.join(output_path, file_name)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
else:
pass
input_volume = nib.load(volume_path).get_data()
# mapping to 0-1
vol_max = np.max(input_volume)
vol_min = np.min(input_volume)
input_unit = (input_volume-vol_min)/(vol_max - vol_min)
width, height, depth= input_unit.shape
for i in range(0, depth):
slice_path = os.path.join(output_dir, str(i)+'.png')
img_i = input_unit[:, :, i]
# normalize to 0-255
img_i = (img_i*255).astype('uint8')
# cv.imwrite(slice_path, img_i)
return input_unit
def save_slice_img_label(self, img_volume, pre_volume, gt_volume,
output_path, file_name, show_mask=False, show_gt = False):
assert img_volume.shape == pre_volume.shape
if show_gt:
assert img_volume.shape == gt_volume.shape
width, height, depth = img_volume.shape
# gray value mapping from MRI value to pixel value(0-255)
volume_max = np.max(img_volume)
volume_min = np.min(img_volume)
volum_mapped = (img_volume-volume_min)/(volume_max-volume_min)
volum_mapped = (255*volum_mapped).astype('uint8')
# construct a directory for each volume to save slices
dir_volume = os.path.join(output_path, file_name)
if not os.path.exists(dir_volume):
os.makedirs(dir_volume)
else:
pass
for i in range(depth):
img_slice = volum_mapped[:, :, i]
pre_slice = pre_volume[:, :, i]
if show_gt:
gt_slice = gt_volume[:, :, i]
else:
gt_slice = []
self.save_contour_label(img=img_slice, pre=pre_slice, gt=gt_slice,
save_path=dir_volume, file_name=i,show_mask=show_mask,show_gt=show_gt)
def apply_mask(self, image, mask, color, alpha=0.5):
"""Apply the given mask to the image.
"""
for c in range(image.shape[-1]):
image[:, :, c] = np.where(mask == 1,
image[:, :, c] *
(1 - alpha) + alpha * color[c] * 255,
image[:, :, c])
return image
def random_colors(self, N, bright=True):
"""
Generate random colors.
To get visually distinct colors, generate them in HSV space then
convert to RGB.
"""
brightness = 1.0 if bright else 0.7
hsv = [(i / N, 1, brightness) for i in range(N)]
colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))
random.shuffle(colors)
return colors
def save_contour_label(self, img, pre, gt=None, save_path='', file_name=None, show_mask=False, show_gt = False):
# single channel to multi-channel
img = np.expand_dims(img, axis=-1)
img = np.tile(img, (1, 1, 3))
height, width = img.shape[:2]
_, ax = plt.subplots(1, figsize=(height, width))
# Generate random colors
# colors = self.random_colors(4)
# Prediction result is illustrated as red and the groundtruth is illustrated as blue
colors = [[1.0, 0, 0], [0, 0, 1.0]]
# Show area outside image boundaries.
# ax.set_ylim(height + 10, -10)
# ax.set_xlim(-10, width + 10)
ax.set_ylim(height + 0, 0)
ax.set_xlim(0, width + 0)
ax.axis('off')
# ax.set_title("volume mask")
masked_image = img.astype(np.uint32).copy()
if show_mask:
masked_image = self.apply_mask(masked_image, pre, colors[0])
if show_gt:
masked_image = self.apply_mask(masked_image, gt, colors[1])
# Mask Polygon
# Pad to ensure proper polygons for masks that touch image edges.
padded_mask_pre = np.zeros(
(pre.shape[0] + 2, pre.shape[1] + 2), dtype=np.uint8)
padded_mask_pre[1:-1, 1:-1] = pre
contours = find_contours(padded_mask_pre, 0.5)
for verts in contours:
# reduce padding and flipping from (y, x) to (x, y)
verts = np.fliplr(verts) - 1
p = Polygon(verts, facecolor="none", edgecolor=colors[0], linewidth=1)
ax.add_patch(p)
if show_gt:
padded_mask_gt = np.zeros((gt.shape[0] + 2, gt.shape[1] + 2), dtype=np.uint8)
padded_mask_gt[1:-1, 1:-1] = gt
contours_gt = find_contours(padded_mask_gt, 0.5)
for contour in contours_gt:
contour = np.fliplr(contour) -1
p_gt = Polygon(contour, facecolor="none", edgecolor=colors[1], linewidth=1)
ax.add_patch(p_gt)
# reduce the blank part generated by plt and keep the original resolution
fig = plt.gcf()
fig.set_size_inches(height/37.5, width/37.5)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0)
plt.margins(0, 0)
ax.imshow(masked_image.astype(np.uint8))
# plt.show()
fig.savefig('{}/{}.png'.format(save_path, file_name))
# clear the image after saving
plt.cla()
plt.close(fig)
def save_slice_volume(volume, save_path):
'''
the function save volume data to slices in the specific directory
:param volume: input volume data
:param save_path:
:return:
'''
shape = volume.shape
# translate intensity to 0-255
v_max = np.max(volume)
v_min = np.min(volume)
volume_norm = (volume - v_min) / (v_max - v_min)
volume_norm = (volume_norm * 255).astype("int")
if not os.path.exists(save_path):
os.makedirs(save_path)
for i in range(shape[-1]):
abs_path = os.path.join(save_path, str(i)+".png")
cv.imwrite(abs_path, volume_norm[..., i])
# calculate the cube information
def fit_cube_param(vol_dim, cube_size, ita):
dim = np.asarray(vol_dim)
fold = dim / cube_size + ita
ovlap = np.ceil(
np.true_divide(
(fold * cube_size - dim),
(fold - 1))) # dim+ita*cubesize-dim
ovlap = ovlap.astype('int')
# print( "ovlap:", str( ovlap ) )#[62 62 86]
fold = np.ceil(np.true_divide((dim + (fold - 1) * ovlap), cube_size))
fold = fold.astype('int')
# print( "fold:", str( fold) ) fold: [8 8 6]
return fold, ovlap
# decompose volume into list of cubes
def decompose_vol2cube_brain(vol_data, cube_size, n_chn, ita):
cube_list = []
fold, ovlap = fit_cube_param(vol_data.shape[0:3], cube_size, ita)
dim = np.asarray(vol_data.shape[0:3]) # [307, 307, 143]
# decompose
for R in range(0, fold[0]):
r_s = R * cube_size - R * ovlap[0]
r_e = r_s + cube_size
if r_e >= dim[0]: # see if exceed the boundry
r_s = dim[0] - cube_size
r_e = r_s + cube_size
for C in range(0, fold[1]):
c_s = C * cube_size - C * ovlap[1]
c_e = c_s + cube_size
if c_e >= dim[1]:
c_s = dim[1] - cube_size
c_e = c_s + cube_size
for H in range(0, fold[2]):
h_s = H * cube_size - H * ovlap[2]
h_e = h_s + cube_size
if h_e >= dim[2]:
h_s = dim[2] - cube_size
h_e = h_s + cube_size
# partition multiple channels
cube_temp = vol_data[r_s:r_e, c_s:c_e, h_s:h_e, :]
# By default batch_size = 1
cube_batch = np.zeros(
[1, cube_size, cube_size, cube_size, n_chn]).astype('float32')
cube_batch[0, :, :, :, :] = copy.deepcopy(cube_temp)
# save
cube_list.append(cube_batch)
return cube_list
# compose list of label cubes into a label volume
def compose_label_cube2vol(cube_list, vol_dim, cube_size, ita, class_n):
# get parameters for compose
fold, ovlap = fit_cube_param(vol_dim, cube_size, ita)
# create label volume for all classes
label_classes_mat = (
np.zeros([vol_dim[0], vol_dim[1], vol_dim[2], class_n])).astype('int32')
idx_classes_mat = (
np.zeros([cube_size, cube_size, cube_size, class_n])).astype('int32')
p_count = 0
for R in range(0, fold[0]):
r_s = R * cube_size - R * ovlap[0]
r_e = r_s + cube_size
if r_e >= vol_dim[0]:
r_s = vol_dim[0] - cube_size
r_e = r_s + cube_size
for C in range(0, fold[1]):
c_s = C * cube_size - C * ovlap[1]
c_e = c_s + cube_size
if c_e >= vol_dim[1]:
c_s = vol_dim[1] - cube_size
c_e = c_s + cube_size
for H in range(0, fold[2]):
h_s = H * cube_size - H * ovlap[2]
h_e = h_s + cube_size
if h_e >= vol_dim[2]:
h_s = vol_dim[2] - cube_size
h_e = h_s + cube_size
# histogram for voting (one-hot)
for k in range(class_n):
idx_classes_mat[:, :, :, k] = (cube_list[p_count] == k)
# accumulation
label_classes_mat[r_s:r_e,
c_s:c_e,
h_s:h_e,
:] = label_classes_mat[r_s:r_e,
c_s:c_e,
h_s:h_e,
:] + idx_classes_mat
p_count += 1
# print 'label mat unique:'
# print np.unique(label_mat)
compose_vol = np.argmax(label_classes_mat, axis=3)
# print np.unique(label_mat)
return compose_vol
# compose list of probability cubes into a probability volumes
def compose_prob_cube2vol(cube_list, vol_dim, cube_size, ita, class_n):
# get parameters for compose
fold, ovlap = fit_cube_param(vol_dim, cube_size, ita)
# create label volume for all classes
map_classes_mat = (
np.zeros([vol_dim[0], vol_dim[1], vol_dim[2], class_n])).astype('float32')
cnt_classes_mat = (
np.zeros([vol_dim[0], vol_dim[1], vol_dim[2], class_n])).astype('float32')
p_count = 0
for R in range(0, fold[0]):
r_s = R * cube_size - R * ovlap[0]
r_e = r_s + cube_size
if r_e >= vol_dim[0]:
r_s = vol_dim[0] - cube_size
r_e = r_s + cube_size
for C in range(0, fold[1]):
c_s = C * cube_size - C * ovlap[1]
c_e = c_s + cube_size
if c_e >= vol_dim[1]:
c_s = vol_dim[1] - cube_size
c_e = c_s + cube_size
for H in range(0, fold[2]):
h_s = H * cube_size - H * ovlap[2]
h_e = h_s + cube_size
if h_e >= vol_dim[2]:
h_s = vol_dim[2] - cube_size
h_e = h_s + cube_size
# accumulation
map_classes_mat[r_s:r_e,
c_s:c_e,
h_s:h_e,
:] = map_classes_mat[r_s:r_e,
c_s:c_e,
h_s:h_e,
:] + cube_list[p_count]
cnt_classes_mat[r_s:r_e,
c_s:c_e,
h_s:h_e,
:] = cnt_classes_mat[r_s:r_e,
c_s:c_e,
h_s:h_e,
:] + 1.0
p_count += 1
# elinimate NaN
nan_idx = (cnt_classes_mat == 0)
cnt_classes_mat[nan_idx] = 1.0
# average
compose_vol = map_classes_mat / cnt_classes_mat
return compose_vol
# Remove small connected components
def remove_minor_cc(vol_data, rej_ratio, rename_map):
"""Remove small connected components refer to rejection ratio"""
"""Usage
# rename_map = [0, 205, 420, 500, 550, 600, 820, 850]
# nii_path = '/home/xinyang/project_xy/mmwhs2017/dataset/ct_output/test/test_4.nii'
# vol_file = nib.load(nii_path)
# vol_data = vol_file.get_data().copy()
# ref_affine = vol_file.affine
# rem_vol = remove_minor_cc(vol_data, rej_ratio=0.2, class_n=8, rename_map=rename_map)
# # save
# rem_path = 'rem_cc.nii'
# rem_vol_file = nib.Nifti1Image(rem_vol, ref_affine)
# nib.save(rem_vol_file, rem_path)
#===# possible be parallel in future
"""
rem_vol = copy.deepcopy(vol_data)
class_n = len(rename_map)
# retrieve all classes
for c in range(1, class_n):
print('processing class %d...' % c)
class_idx = (vol_data == rename_map[c]) * 1
class_vol = np.sum(class_idx)
labeled_cc, num_cc = measurements.label(class_idx)
# retrieve all connected components in this class
for cc in range(1, num_cc + 1):
single_cc = ((labeled_cc == cc) * 1)
single_vol = np.sum(single_cc)
# remove if too small
if single_vol / (class_vol * 1.0) < rej_ratio:
rem_vol[labeled_cc == cc] = 0
return rem_vol
def background_num_to_save(input_gt, fg_ratio, bg_ratio):
background_num = tf.reduce_sum(input_gt[:, :, :, :, 0])
total_num = tf.reduce_sum(input_gt)
foreground_num = total_num - background_num
# save_back_ground_num = tf.reduce_max(
# [2 * foreground_num, background_num / 32]) # set the number of background samples to reserve
save_back_ground_num = tf.reduce_max(
[fg_ratio * foreground_num, background_num / bg_ratio]) # set the number of background samples to reserve
save_back_ground_num = tf.clip_by_value(
save_back_ground_num, 0, background_num)
return save_back_ground_num
def no_background(input_gt):
return input_gt
def exist_background(input_gt, pred, save_back_ground_num):
batch, in_depth, in_height, in_width, in_channels = [
int(d) for d in input_gt.get_shape()]
pred_data = pred[:, :, :, :, 0]
gt_backgound_data = 1 - input_gt[:, :, :, :, 0]
pred_back_ground_data = tf.reshape(
pred_data, (batch, in_depth * in_height * in_width))
gt_back_ground_data = tf.reshape(
gt_backgound_data,
(batch,
in_depth *
in_height *
in_width))
new_pred_data = pred_back_ground_data + gt_back_ground_data
mask = []
for i in range(batch):
gti = -1 * new_pred_data[i, :]
max_k_number, index = tf.nn.top_k(
gti, save_back_ground_num)
max_k = tf.reduce_min(max_k_number)
one = tf.ones_like(gti) # all 1 mask
zero = tf.zeros_like(gti) # all 0 mask
mask_slice = tf.where(gti < max_k, x=zero, y=one)
mask_slice = tf.reshape(mask_slice, [in_depth, in_height, in_width])
mask.append(mask_slice)
mask = tf.expand_dims(mask, -1)
other_mask = tf.ones([batch,
in_depth,
in_height,
in_width,
in_channels - 1],
tf.float32)
full_mask = tf.concat([mask, other_mask], 4)
input_gt = full_mask * input_gt
return input_gt
# Get a background mask for the groundtruth so that we can
# discard the unnecessary background information
def produce_mask_background(input_gt, pred, fg_ratio, bg_ratio):
save_back_ground_num = background_num_to_save(
input_gt, fg_ratio, bg_ratio) # Get the background numbers to reserve from groundtruth
save_back_ground_num = tf.cast(
save_back_ground_num,
dtype=tf.int32)
product = tf.cond(
save_back_ground_num < 5,
lambda: no_background(input_gt),
lambda: exist_background(
input_gt,
pred,
save_back_ground_num))
return product
def fillhole(input_image):
'''
input gray binary image get the filled image by floodfill method
Note: only holes surrounded in the connected regions will be filled.
:param input_image:
:return:
'''
im_flood_fill = input_image.copy()
h, w = input_image.shape[:2]
mask = np.zeros((h + 2, w + 2), np.uint8)
im_flood_fill = im_flood_fill.astype("uint8")
cv.floodFill(im_flood_fill, mask, (0, 0), 255)
im_flood_fill_inv = cv.bitwise_not(im_flood_fill)
img_out = input_image | im_flood_fill_inv
return img_out
def postprocessing(input_volume):
_,_, slices = input_volume.shape
volume_out = np.zeros(input_volume.shape, dtype="int16")
input_volume = input_volume*255
for i in range(slices):
temp = fillhole(input_volume[..., i])
volume_out[:, :, i] = temp
volume_out = (volume_out/255).astype("int16")
return volume_out
def majority_voting(array):
'''
this function realize the majority voting algorithm.
:param array: input array need to processed
:return: majority numbet
'''
count = Counter(array)
majo = count.most_common(1)
return majo
def multi_majority_voting(ndaray):
shape = ndaray.shape
out = np.zeros(shape[0:3])
for i in range(shape[0]):
for j in range(shape[1]):
for k in range(shape[2]):
array_vote = [ndaray[i,j,k,0],ndaray[i,j,k,1],ndaray[i,j,k,2],ndaray[i,j,k,3],ndaray[i,j,k,4] ]
out[i,j,k] = majority_voting(array_vote)[0][0]
return out
def five_fold_validation(dataset, outpath):
path1 = '/home/server/home/5foldtest/fold1'
path2 = '/home/server/home/5foldtest/fold2'
path3 = '/home/server/home/5foldtest/fold3'
path4 = '/home/server/home/5foldtest/fold4'
path5 = '/home/server/home/5foldtest/fold5'
file_list = os.listdir(path1)
datalist = []
for file in file_list:
file_abs_path1 = os.path.join(path1, file)
volume1 = nib.load(file_abs_path1)
data1 = volume1.get_data()
file_abs_path2 = os.path.join(path2, file)
volume2 = nib.load(file_abs_path2)
data2 = volume2.get_data()
file_abs_path1 = os.path.join(path1, file)
volume1 = nib.load(file_abs_path1)
data1 = volume1.get_data()
file_abs_path1 = os.path.join(path1, file)
volume1 = nib.load(file_abs_path1)
data1 = volume1.get_data()
file_abs_path1 = os.path.join(path1, file)
volume1 = nib.load(file_abs_path1)
data1 = volume1.get_data()
def load_train_ini(ini_file):
# initialize
cf = configparser.ConfigParser()
cf.read(ini_file, encoding="utf-8-sig")
# dictionary list
param_sections = []
s = cf.sections()
for d in range(len(s)):
# create dictionary
level_dict = dict(phase=cf.get(s[d], "phase"),
batch_size=cf.getint(s[d], "batch_size"),
inputI_size=cf.getint(s[d], "inputI_size"),
inputI_chn=cf.getint(s[d], "inputI_chn"),
outputI_size=cf.getint(s[d], "outputI_size"),
output_chn=cf.getint(s[d], "output_chn"),
rename_map=cf.get(s[d], "rename_map"),
resize_r=cf.getfloat(s[d], "resize_r"),
traindata_dir=cf.get(s[d], "traindata_dir"),
chkpoint_dir=cf.get(s[d], "chkpoint_dir"),
learning_rate=cf.getfloat(s[d], "learning_rate"),
beta1=cf.getfloat(s[d], "beta1"),
epoch=cf.getint(s[d], "epoch"),
model_name=cf.get(s[d], "model_name"),
save_intval=cf.getint(s[d], "save_intval"),
testdata_dir=cf.get(s[d], "testdata_dir"),
labeling_dir=cf.get(s[d], "labeling_dir"),
ovlp_ita=cf.getint(s[d], "ovlp_ita"),
step=cf.getint(s[d], "step"),
Stages=cf.getint(s[d], "Stages"),
Blocks=cf.getint(s[d], "Blocks"),
Columns=cf.getint(s[d], "Columns"),
fg_ratio=cf.getfloat(s[d], "fg_ratio"),
bg_ratio=cf.getfloat(s[d], "bg_ratio"),
focal_loss_flag=cf.getboolean(s[d], "focal_loss_flag"))
# add to list
param_sections.append(level_dict)
return param_sections
if __name__ == '__main__':
path = '/home/server/home/5foldtest/fold1/validation/BraTS19_UAB_3498_1.nii.gz'
path2 = '/home/server/home/5foldtest/'
dfdfd = five_fold_validation(path2, "validation", "")
arrrr = np.array(dfdfd)
vol = nib.load(path)
img = vol.get_data()
shape = img.shape
a = [1,2,1,2,3]
aa = [1,2,1,2,2]
aaa = np.array(aa)
tim1 = time.time()
# ndar = np.random.randint(0,4,size=(240,240,155,5))
ndar = np.random.randint(0, 4, size=(240, 240, 155, 5))
out = multi_majority_voting(ndar)
tim2 = time.time()
elaps = tim2 - tim1
b = majority_voting(aaa)
|
408516
|
b = False
e = "Hello world"
while b:
print(b)
d = True
while d:
print(d and False)
print(d and True)
print("hello")
|
408560
|
from pydantic import validator, BaseModel
from app.models.schema.base import PityModel
class DatabaseForm(BaseModel):
id: int = None
name: str
host: str
port: int = None
username: str
password: str
database: str
sql_type: int
env: int
@validator("name", "host", "port", "username", "password", "database", "sql_type", "env")
def data_not_empty(cls, v):
return PityModel.not_empty(v)
|
408601
|
import cv2
import numpy as np
import torch
import torch.nn.functional as F
from data import CenterAffine
def gather_feature(fmap, index, mask=None, use_transform=False):
if use_transform:
# change a (N, C, H, W) tenor to (N, HxW, C) shape
batch, channel = fmap.shape[:2]
fmap = fmap.view(batch, channel, -1).permute((0, 2, 1)).contiguous()
dim = fmap.size(-1)
index = index.unsqueeze(len(index.shape)).expand(*index.shape, dim)
fmap = fmap.gather(dim=1, index=index)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(fmap)
fmap = fmap[mask]
fmap = fmap.reshape(-1, dim)
return fmap
class CenterNetDecoder(object):
@staticmethod
def decode(fmap, wh, reg=None, cat_spec_wh=False, K=100):
r"""
decode output feature map to detection results
Args:
fmap(Tensor): output feature map
wh(Tensor): tensor that represents predicted width-height
reg(Tensor): tensor that represens regression of center points
cat_spec_wh(bool): whether apply gather on tensor `wh` or not
K(int): topk value
"""
batch, channel, height, width = fmap.shape
fmap = CenterNetDecoder.pseudo_nms(fmap)
scores, index, clses, ys, xs = CenterNetDecoder.topk_score(fmap, K=K)
if reg is not None:
reg = gather_feature(reg, index, use_transform=True)
reg = reg.reshape(batch, K, 2)
xs = xs.view(batch, K, 1) + reg[:, :, 0:1]
ys = ys.view(batch, K, 1) + reg[:, :, 1:2]
else:
xs = xs.view(batch, K, 1) + 0.5
ys = ys.view(batch, K, 1) + 0.5
wh = gather_feature(wh, index, use_transform=True)
if cat_spec_wh:
wh = wh.view(batch, K, channel, 2)
clses_ind = clses.view(batch, K, 1, 1).expand(batch, K, 1, 2).long()
wh = wh.gather(2, clses_ind).reshape(batch, K, 2)
else:
wh = wh.reshape(batch, K, 2)
clses = clses.reshape(batch, K, 1).float()
scores = scores.reshape(batch, K, 1)
half_w, half_h = wh[..., 0:1] / 2, wh[..., 1:2] / 2
bboxes = torch.cat([xs - half_w, ys - half_h, xs + half_w, ys + half_h], dim=2)
detections = (bboxes, scores, clses)
return detections
@staticmethod
def transform_boxes(boxes, img_info, scale=1):
r"""
transform predicted boxes to target boxes
Args:
boxes(Tensor): torch Tensor with (Batch, N, 4) shape
img_info(dict): dict contains all information of original image
scale(float): used for multiscale testing
"""
boxes = boxes.cpu().numpy().reshape(-1, 4)
center = img_info["center"]
size = img_info["size"]
output_size = (img_info["width"], img_info["height"])
src, dst = CenterAffine.generate_src_and_dst(center, size, output_size)
trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))
coords = boxes.reshape(-1, 2)
aug_coords = np.column_stack((coords, np.ones(coords.shape[0])))
target_boxes = np.dot(aug_coords, trans.T).reshape(-1, 4)
return target_boxes
@staticmethod
def pseudo_nms(fmap, pool_size=3):
r"""
apply max pooling to get the same effect of nms
Args:
fmap(Tensor): output tensor of previous step
pool_size(int): size of max-pooling
"""
pad = (pool_size - 1) // 2
fmap_max = F.max_pool2d(fmap, pool_size, stride=1, padding=pad)
keep = (fmap_max == fmap).float()
return fmap * keep
@staticmethod
def topk_score(scores, K=40):
"""
get top K point in score map
"""
batch, channel, height, width = scores.shape
# get topk score and its index in every H x W(channel dim) feature map
topk_scores, topk_inds = torch.topk(scores.reshape(batch, channel, -1), K)
topk_inds = topk_inds % (height * width)
topk_ys = (topk_inds / width).int().float()
topk_xs = (topk_inds % width).int().float()
# get all topk in in a batch
topk_score, index = torch.topk(topk_scores.reshape(batch, -1), K)
# div by K because index is grouped by K(C x K shape)
topk_clses = (index / K).int()
topk_inds = gather_feature(topk_inds.view(batch, -1, 1), index).reshape(batch, K)
topk_ys = gather_feature(topk_ys.reshape(batch, -1, 1), index).reshape(batch, K)
topk_xs = gather_feature(topk_xs.reshape(batch, -1, 1), index).reshape(batch, K)
return topk_score, topk_inds, topk_clses, topk_ys, topk_xs
|
408631
|
import tensorflow as tf
import numpy as np
import tqdm
__all__ = ('pad_ragged_2d', 'shuffle_ragged_2d',
'inputs_to_labels', 'get_pos_encoding',
'get_quant_time', 'softmax_with_temp',
'generate_midis')
def pad_ragged_2d(ragged_tensor, pad_idx):
# ragged_tensor -> RAGGED(batch_size, None)
lens = ragged_tensor.row_lengths(axis=-1)
maxlen = tf.math.reduce_max(lens)
mask = tf.sequence_mask(lens, maxlen, tf.bool)
zero_padded = ragged_tensor.to_tensor()
# zero_padded -> (batch_size, maxlen)
padding = tf.constant(pad_idx, dtype=zero_padded.dtype)
padded_tensor = tf.where(mask, zero_padded, padding)
# padded_tensor -> (batch_size, maxlen)
return padded_tensor
def shuffle_ragged_2d(ragged_tensors, pad_idx, lowest_idx=5):
if not isinstance(ragged_tensors, (list, tuple)):
ragged_tensors = [ragged_tensors]
# ragged_tensor -> RAGGED(batch_size, None)
lens = ragged_tensors[0].row_lengths(axis=-1)
kth_lowest = -tf.nn.top_k(-lens, lowest_idx).values[-1]
shuffled_tensors = [[] for _ in ragged_tensors]
for len_, *rows in zip(lens, *ragged_tensors):
assert all(row.shape[0] == len_ for row in rows)
if len_ <= kth_lowest:
new_rows = [tf.pad(row, paddings=[[0, kth_lowest - len_]],
constant_values=pad_idx) for row in rows]
else:
start_idx = tf.random.uniform(
(), minval=0, maxval=len_ - kth_lowest + 1, dtype=tf.int64)
new_rows = [row[start_idx: start_idx + kth_lowest]
for row in rows]
for tensor, row in zip(shuffled_tensors, new_rows):
tensor.append(row[tf.newaxis, :])
shuffled_tensors = [tf.concat(shuffled_tensor, axis=0)
for shuffled_tensor in shuffled_tensors]
return shuffled_tensors
def inputs_to_labels(inputs, pad_idx):
# inputs -> (batch_size, seq_len)
inputs_padded = tf.pad(inputs[:, 1:], paddings=[
[0, 0], [0, 1]], constant_values=pad_idx)
return inputs_padded
def get_pos_encoding(seq_len, d_model):
numerator = np.arange(seq_len, dtype=np.float32)
numerator = numerator[:, np.newaxis]
denominator = np.arange(0, d_model, 2, dtype=np.float32)
denominator = denominator / d_model
denominator = np.power(np.array(10000, dtype=np.float32), denominator)
denominator = 1 / denominator
denominator = np.repeat(denominator, 2)
denominator = denominator[np.newaxis, :]
encoding = np.matmul(numerator, denominator)
encoding[:, ::2] = np.sin(encoding[:, ::2])
encoding[:, 1::2] = np.cos(encoding[:, 1::2])
#encoding = encoding[np.newaxis, ...]
encoding = tf.cast(encoding, dtype=tf.float32)
return encoding
def get_quant_time():
step = 0.001
coef = 1.16
delta = 0
total_reps = 64
local_reps = 2
quant_time = []
for _ in range(total_reps // local_reps):
for _ in range(local_reps):
delta += step
quant_time.append(delta)
step *= coef
quant_time = np.sort(quant_time + [5.0, 0.0])
return quant_time
def softmax_with_temp(x, temp=1.0):
assert isinstance(temp, float)
assert temp > 0
assert all(map(lambda a: a > 0, x))
x = x / np.sum(x) / temp
x = tf.nn.softmax(x).numpy()
return x
def generate_midis(model, seq_len, mem_len, max_len, parser, filenames, pad_idx, top_k=1, temp=1.0):
assert isinstance(seq_len, int)
assert seq_len > 0
assert isinstance(mem_len, int)
assert mem_len >= 0
assert isinstance(max_len, int)
assert max_len > 1
batch_size = len(filenames)
sounds, deltas = zip(*[parser.load_features(filename)
for filename in filenames])
min_len = min([len(s) for s in sounds])
orig_len = np.random.randint(1, min(2 * mem_len, min_len))
assert orig_len >= 1
sounds = np.array([sound[:orig_len] for sound in sounds])
deltas = np.array([delta[:orig_len] for delta in deltas])
# sounds -> (batch_size, orig_len)
full_len = mem_len + seq_len - 1
inputs_sound = tf.constant(sounds[:, -seq_len:])
inputs_delta = tf.constant(deltas[:, -seq_len:])
outputs_sound, outputs_delta, next_mem_list, attention_weight_list, attention_loss_list = model(
inputs=(inputs_sound, inputs_delta),
mem_list=None,
next_mem_len=mem_len,
training=False
)
for _ in tqdm.tqdm(range(max_len)):
outputs_sound = outputs_sound[:, -1, :]
probs_sound = tf.nn.softmax(outputs_sound, axis=-1).numpy()
probs_sound[:, pad_idx] = 0
# probs_sound -> (batch_size, n_sounds)
outputs_delta = outputs_delta[:, -1, :]
probs_delta = tf.nn.softmax(outputs_delta, axis=-1).numpy()
probs_delta[:, pad_idx] = 0
# probs_delta -> (batch_size, n_deltas)
new_sounds = []
for batch_probs in probs_sound:
best_idxs = batch_probs.argsort()[-top_k:][::-1]
best_probs = softmax_with_temp(batch_probs[best_idxs], temp)
new_sound = np.random.choice(best_idxs, p=best_probs)
new_sounds.append(new_sound)
new_sounds = np.array(new_sounds)[:, np.newaxis]
# new_sounds -> (batch_size, 1)
sounds = np.concatenate((sounds, new_sounds), axis=-1)
new_deltas = []
for batch_probs in probs_delta:
best_idxs = batch_probs.argsort()[-top_k:][::-1]
best_probs = softmax_with_temp(batch_probs[best_idxs], temp)
new_delta = np.random.choice(best_idxs, p=best_probs)
new_deltas.append(new_delta)
new_deltas = np.array(new_deltas)[:, np.newaxis]
# new_deltas -> (batch_size, 1)
deltas = np.concatenate((deltas, new_deltas), axis=-1)
inputs_sound = tf.constant(new_sounds)
inputs_delta = tf.constant(new_deltas)
outputs_sound, outputs_delta, next_mem_list, attention_weight_list, attention_loss_list = model(
inputs=(inputs_sound, inputs_delta),
mem_list=next_mem_list,
next_mem_len=mem_len,
training=False
)
sounds = sounds[:, orig_len:]
deltas = deltas[:, orig_len:]
midi_list = [parser.features_to_midi(
sound, delta) for sound, delta in zip(sounds, deltas)]
return midi_list, next_mem_list, attention_weight_list, attention_loss_list
def generate_text(model, seq_len, mem_len, max_len, tokenizer, start_idx, end_idx, blocked_idxs,
batch_size, beginning=None, top_k=3, temp=0.4):
if isinstance(beginning, str):
words = tokenizer.texts_to_sequences([beginning])
words = np.repeat(words, batch_size, axis=0)
start_idxs = np.full((batch_size, 1), start_idx,
dtype=words.dtype)
words = np.concatenate((start_idxs, words), axis=-1)
elif isinstance(beginning, list):
assert len(beginning) == batch_size
for string in beginning:
assert isinstance(string, str)
words = tokenizer.texts_to_sequences(beginning)
min_len = min([len(x) for x in words])
words = np.array([x[:min_len] for x in words])
start_idxs = np.full((batch_size, 1), start_idx,
dtype=words.dtype)
words = np.concatenate((start_idxs, words), axis=-1)
else:
words = np.full((batch_size, 1), start_idx)
end_flags = [False] * batch_size
end_cnt = 0
orig_len = words.shape[1]
assert orig_len >= 1
# words -> (batch_size, orig_len)
# ================================
inputs = tf.constant(words[:, -seq_len:])
outputs, next_mem_list, attention_weight_list, attention_loss_list = model(
inputs=inputs,
mem_list=None,
next_mem_len=mem_len,
training=False
)
for _ in tqdm.tqdm(range(max_len)):
outputs = outputs[:, -1, :]
probs = tf.nn.softmax(outputs, axis=-1).numpy()
probs[:, blocked_idxs] = 0
# probs -> (batch_size, n_words)
new_words = []
for batch_idx, batch_probs in enumerate(probs):
best_idxs = batch_probs.argsort()[-top_k:][::-1]
best_probs = softmax_with_temp(batch_probs[best_idxs], temp)
new_word = np.random.choice(best_idxs, p=best_probs)
new_words.append(new_word)
if new_word == end_idx and not end_flags[batch_idx]:
end_flags[batch_idx] = True
end_cnt += 1
new_words = np.array(new_words)[:, np.newaxis]
# new_words -> (batch_size, 1)
words = np.concatenate((words, new_words), axis=-1)
if end_cnt >= batch_size:
break
inputs = tf.constant(new_words)
outputs, next_mem_list, attention_weight_list, attention_loss_list = model(
inputs=inputs,
mem_list=next_mem_list,
next_mem_len=mem_len,
training=False
)
return words, end_flags
|
408646
|
import pyqrcode
import png
from pyqrcode import QRCode
# Text which is to be converted to QR code
print("Enter text to convert")
s=input(": ")
# Name of QR code png file
print("Enter image name to save")
n=input(": ")
# Adding extension as .pnf
d=n+".png"
# Creating QR code
url=pyqrcode.create(s)
# Saving QR code as a png file
url.show()
url.png(d, scale =6)
|
408714
|
class Room:
def __init__(self):
pass
def intro_text(self):
raise NotImplementedError()
def adjacent_moves(self):
pass
def available_actions(self):
pass
class StartingRoom(Room):
def __init__(self, ):
super().__init__()
def intro_text(self):
return """You find yourself in a cave with a flickering torch on the wall.
You can make out four paths, each equally foreboding.
"""
class EmptyCavePath(Room):
def intro_text(self):
return """
Another unremarkable part of the cave. You must forge onwards.
"""
|
408715
|
import asyncio
import aiodocker
from async_timeout import timeout
from fastapi import FastAPI
from simcore_service_director_v2.models.schemas.constants import (
DYNAMIC_SIDECAR_SERVICE_PREFIX,
)
from simcore_service_director_v2.modules.dynamic_sidecar.scheduler import (
DynamicSidecarsScheduler,
)
SERVICE_WAS_CREATED_BY_DIRECTOR_V2 = 20
async def ensure_network_cleanup(
docker_client: aiodocker.Docker, project_id: str
) -> None:
network_names = {x["Name"] for x in await docker_client.networks.list()}
for network_name in network_names:
if project_id in network_name:
network = await docker_client.networks.get(network_name)
try:
# if there is an error this cleansup the environament
# useful for development, avoids leaving too many
# hanging networks
delete_result = await network.delete()
assert delete_result is True
except aiodocker.exceptions.DockerError as e:
# if the tests succeeds the network will nto exists
str_error = str(e)
assert "network" in str_error
assert "not found" in str_error
async def patch_dynamic_service_url(app: FastAPI, node_uuid: str) -> None:
"""
Normally director-v2 talks via docker-netwoks with the dynamic-sidecar.
Since the director-v2 was started outside docker and is not
running in a container, the service port needs to be exposed and the
url needs to be changed to 172.17.0.1 (docker localhost)
returns: the local endpoint
"""
service_name = f"{DYNAMIC_SIDECAR_SERVICE_PREFIX}_{node_uuid}"
port = None
async with aiodocker.Docker() as docker_client:
async with timeout(SERVICE_WAS_CREATED_BY_DIRECTOR_V2):
# it takes a bit of time for the port to be auto generated
# keep trying until it is there
while port is None:
services = await docker_client.services.list()
for service in services:
if service["Spec"]["Name"] == service_name:
ports = service["Endpoint"].get("Ports", [])
if len(ports) == 1:
port = ports[0]["PublishedPort"]
break
await asyncio.sleep(1)
# patch the endppoint inside the scheduler
scheduler: DynamicSidecarsScheduler = app.state.dynamic_sidecar_scheduler
async with scheduler._lock: # pylint: disable=protected-access
for entry in scheduler._to_observe.values(): # pylint: disable=protected-access
if entry.scheduler_data.service_name == service_name:
entry.scheduler_data.dynamic_sidecar.hostname = "172.17.0.1"
entry.scheduler_data.dynamic_sidecar.port = port
endpoint = entry.scheduler_data.dynamic_sidecar.endpoint
assert endpoint == f"http://172.17.0.1:{port}"
break
|
408717
|
from yoyo import step
step(
"CREATE TABLE IF NOT EXISTS evolve_log (pokemon text, iv real, cp real, dated datetime DEFAULT CURRENT_TIMESTAMP)"
)
|
408752
|
from rest_framework import serializers
from mayan.apps.rest_api.relations import MultiKwargHyperlinkedIdentityField
from ..models.document_type_models import DocumentType, DocumentTypeFilename
class DocumentTypeQuickLabelSerializer(serializers.ModelSerializer):
document_type_url = serializers.HyperlinkedIdentityField(
lookup_url_kwarg='document_type_id',
view_name='rest_api:documenttype-detail'
)
url = MultiKwargHyperlinkedIdentityField(
view_kwargs=(
{
'lookup_field': 'document_type_id',
'lookup_url_kwarg': 'document_type_id',
},
{
'lookup_field': 'pk',
'lookup_url_kwarg': 'document_type_quick_label_id',
},
),
view_name='rest_api:documenttype-quicklabel-detail'
)
class Meta:
fields = ('document_type_url', 'enabled', 'filename', 'id', 'url')
model = DocumentTypeFilename
read_only_fields = ('document_type_url', 'id', 'url')
class DocumentTypeSerializer(serializers.HyperlinkedModelSerializer):
quick_label_list_url = serializers.HyperlinkedIdentityField(
lookup_url_kwarg='document_type_id',
view_name='rest_api:documenttype-quicklabel-list'
)
class Meta:
extra_kwargs = {
'url': {
'lookup_url_kwarg': 'document_type_id',
'view_name': 'rest_api:documenttype-detail'
},
}
fields = (
'delete_time_period', 'delete_time_unit',
'filename_generator_backend',
'filename_generator_backend_arguments', 'id', 'label',
'quick_label_list_url', 'trash_time_period', 'trash_time_unit',
'url'
)
model = DocumentType
read_only_fields = ('id', 'quick_label_list_url', 'url')
|
408769
|
import logging
import ibmsecurity.utilities.tools
logger = logging.getLogger(__name__)
def get(isdsAppliance, check_mode=False, force=False):
"""
Get all snmp objects
"""
return isdsAppliance.invoke_get("Get all alert objects",
"/system_alerts")
def enable(isdsAppliance, uuid, objType, check_mode=False, force=False):
"""
Enable a system alert
"""
if force is True or _check(isdsAppliance, uuid) is False:
if check_mode is True:
return isdsAppliance.create_return_object(changed=True)
else:
return isdsAppliance.invoke_post(
"Enable a system alert",
"/system_alerts",
{
'uuid': uuid,
'objType': objType
})
return isdsAppliance.create_return_object()
def disable(isdsAppliance, uuid, check_mode=False, force=False):
"""
Delete a system alert
"""
if force is True or _check(isdsAppliance, uuid) is True:
if check_mode is True:
return isdsAppliance.create_return_object(changed=True)
else:
return isdsAppliance.invoke_delete(
"Delete a system alert",
"/system_alerts/{0}".format(uuid))
return isdsAppliance.create_return_object()
def _check(isdsAppliance, uuid):
"""
Check if the system alert exists or not
"""
ret_obj = get(isdsAppliance)
for obj in ret_obj['data']['responses']:
if obj['uuid'] == uuid:
return True
return False
def compare(isdsAppliance1, isdsAppliance2):
"""
Compare system alert objects between two appliances
"""
ret_obj1 = get(isdsAppliance1)
ret_obj2 = get(isdsAppliance2)
for obj in ret_obj1['data']['responses']:
del obj['uuid']
for obj in ret_obj2['data']['responses']:
del obj['uuid']
return ibmsecurity.utilities.tools.json_compare(ret_obj1, ret_obj2, deleted_keys=['uuid'])
|
408800
|
import FWCore.ParameterSet.Config as cms
RawDataConverter = cms.EDAnalyzer(
"RawDataConverter",
# list of digi producers
DigiProducersList = cms.VPSet(
cms.PSet(
DigiLabel = cms.string( 'ZeroSuppressed' ),
DigiProducer = cms.string( 'laserAlignmentT0Producer' ), #simSiStripDigis
DigiType = cms.string( 'Processed' )
),
cms.PSet(
DigiLabel = cms.string('ZeroSuppressed'),
DigiType = cms.string('Processed'),
DigiProducer = cms.string('siStripDigis')
),
cms.PSet(
DigiLabel = cms.string('VirginRaw'),
DigiType = cms.string('Raw'),
DigiProducer = cms.string('siStripDigis')
),
cms.PSet(
DigiLabel = cms.string('ProcessedRaw'),
DigiType = cms.string('Raw'),
DigiProducer = cms.string('siStripDigis')
),
cms.PSet(
DigiLabel = cms.string('ScopeMode'),
DigiType = cms.string('Raw'),
DigiProducer = cms.string('siStripDigis')
)
),
DigiModuleLabels = cms.vstring(
'laserAlignmentT0Producer',
'siStripDigis'
),
ProductInstanceLabels = cms.vstring(
'ZeroSuppressed',
'VirginRaw'
)
)
|
408834
|
from build.pymod import test_runtime_error
try:
test_runtime_error()
except RuntimeError as e:
print(e)
print('successfully caught error')
|
408841
|
import functools
import tensorflow as tf
import math
import sys
def doublewrap(function):
"""
A decorator decorator, allowing to use the decorator to be used without
parentheses if not arguments are provided. All arguments must be optional.
"""
@functools.wraps(function)
def decorator(*args, **kwargs):
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
return function(args[0])
else:
return lambda wrapee: function(wrapee, *args, **kwargs)
return decorator
@doublewrap
def define_scope(function, scope=None, *args, **kwargs):
"""
A decorator for functions that define TensorFlow operations. The wrapped
function will only be executed once. Subsequent calls to it will directly
return the result so that operations are added to the graph only once.
The operations added by the function live within a tf.variable_scope(). If
this decorator is used with arguments, they will be forwarded to the
variable scope. The scope name defaults to the name of the wrapped
function.
"""
attribute = '_cache_' + function.__name__
name = scope or function.__name__
@property
@functools.wraps(function)
def decorator(self):
if not hasattr(self, attribute):
with tf.variable_scope(name, *args, **kwargs):
setattr(self, attribute, function(self))
return getattr(self, attribute)
return decorator
def _auc_pr(true, prob, threshold):
pred = tf.where(prob > threshold, tf.ones_like(prob), tf.zeros_like(prob))
tp = tf.logical_and(tf.cast(pred, tf.bool), tf.cast(true, tf.bool))
fp = tf.logical_and(tf.cast(pred, tf.bool), tf.logical_not(tf.cast(true, tf.bool)))
fn = tf.logical_and(tf.logical_not(tf.cast(pred, tf.bool)), tf.cast(true, tf.bool))
tn = tf.logical_and(tf.logical_not(tf.cast(pred, tf.bool)), tf.logical_not(tf.cast(true, tf.bool)))
FPR = tf.truediv(tf.reduce_sum(tf.cast(fp, tf.int32)),
tf.reduce_sum(tf.cast(tf.logical_or(tn, fp), tf.int32)))
TPR = tf.truediv(tf.reduce_sum(tf.cast(tp, tf.int32)),
tf.reduce_sum(tf.cast(tf.logical_or(tp, fn), tf.int32)))
PPV = tf.truediv(tf.reduce_sum(tf.cast(tp, tf.int32)),
tf.reduce_sum(tf.cast(tf.logical_or(tp, fp), tf.int32)))
return FPR, TPR, PPV
class Model(object):
# parameter lists
initial_variation=0.001 #standard deviation of initial variables in the convolution filters
#mini batch size
dimension1=320 #the number of the convolution filters in the 1st layer
dimension2=480 #the number of the convolution filters in the 2nd layer
dimension21=960
dimension4=925 #the number of the neurons in each layer of the fully-connected neural network
conv1_filter=8
#conv1_filter2=49
conv2_filter=8
conv21_filter=8
train_speed=0.0001
def __init__(self, *args, **kwargs):
self.data_length=kwargs["data_length"]
self.image = kwargs["image"]
self.label = kwargs["label"]
self.phase=kwargs["phase"]
self.keep_prob=kwargs["keep_prob"]
self.keep_prob2=kwargs["keep_prob2"]
self.keep_prob3=kwargs["keep_prob3"]
self.start_at=kwargs["start_at"]
self.output_dir=kwargs["output_dir"]
self.max_to_keep=kwargs["max_to_keep"]
self.GPUID=kwargs["GPUID"]
self.fc1_param=int(math.ceil((math.ceil((math.ceil((
self.data_length-self.conv1_filter+1)/4.0)
-self.conv2_filter+1)/4.0)
-self.conv21_filter+1)/1.0))
self.prediction
self.optimize
self.error
self.saver
self.cost
print('Running deapsea model')
if self.output_dir is not None:
flog=open(str(self.output_dir)+'.log', 'w')
flog.write(str(sys.argv[0])+"\n"
+"the filer number of conv1:"+ str(self.dimension1)+"\n"
+"the filer size of conv1:"+ str(self.conv1_filter)+"\n"
+"the filer number of conv2:"+ str(self.dimension2)+"\n"
+"the filer size of conv2:"+ str(self.conv2_filter)+"\n"
+"the filer number of conv21:"+ str(self.dimension21)+"\n"
+"the filer size of conv21:"+ str(self.conv21_filter)+"\n"
+"the number of neurons in the fully-connected layer:"+ str(self.dimension4)+"\n"
+"the standard deviation of initial varialbles:"+ str(self.initial_variation)+"\n"
+"train speed:"+ str(self.train_speed)+"\n"
+"data length:" + str(self.data_length)+"\n")
flog.close()
@define_scope
def prediction(self):
with tf.device('/device:GPU:'+self.GPUID):
x_image = self.image
def weight_variable(shape, variable_name):
initial = tf.truncated_normal(shape, mean=0, stddev=self.initial_variation)
return tf.Variable(initial, name=variable_name)
def bias_variable(shape, variable_name):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial, name=variable_name)
def bias_variable_high(shape, variable_name, carry_bias=-0.1):
initial = tf.constant(carry_bias, shape=shape)
return tf.Variable(initial, name=variable_name)
def conv2d_1(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='VALID')
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 2, 1, 1], padding='VALID')
def conv2d_depth(x, W):
return tf.nn.depthwise_conv2d(x, W, strides=[1, 1, 1, 1], padding='VALID')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 1, 1], strides=[1, 2, 1, 1], padding='SAME')
def max_pool_4x1(x):
return tf.nn.max_pool(x, ksize=[1, 4, 1, 1], strides=[1, 4, 1, 1], padding='SAME')
def max_pool_8x1(x):
return tf.nn.max_pool(x, ksize=[1, 17, 1, 1], strides=[1, 17, 1, 1], padding='SAME')
#fc1_param=int(math.ceil((math.ceil((data_length-conv1_filter+1)/4.0)-conv2_filter+1)/2.0))
#fc1_param=int(math.ceil((math.ceil((math.ceil((math.ceil((data_length-conv1_filter+1)/1.0)-conv2_filter+1)/2.0)-conv22_filter+1)/2.0)-conv3_filter+1)/2.0))
l2norm_list=[]
W_conv1 = weight_variable([self.conv1_filter, 4, 1, self.dimension1], 'W_conv1')
cond=tf.constant(0.9)
wconv1_l2=tf.reduce_sum(tf.square(W_conv1))
l2norm_list.append(wconv1_l2)
W_conv1.assign(tf.cond(wconv1_l2>cond, lambda: tf.multiply(W_conv1, cond/wconv1_l2),lambda: W_conv1 ))
h_conv1 = tf.nn.relu(conv2d_1(x_image, W_conv1))
h_pool1 = tf.nn.dropout(max_pool_4x1(h_conv1), self.keep_prob)
W_conv2 = weight_variable([self.conv2_filter, 1, self.dimension1, self.dimension2], 'W_conv2')
wconv2_l2=tf.reduce_sum(tf.square(W_conv2))
l2norm_list.append(wconv2_l2)
W_conv2.assign(tf.cond(wconv2_l2>cond, lambda: tf.multiply(W_conv2, cond/wconv2_l2),lambda: W_conv2 ))
#h_conv2 = tf.nn.dropout(tf.nn.relu(tf.nn.batch_normalization(conv2d(h_conv1, W_conv2), mean=0.0, variance=1, offset=0, scale=1, variance_epsilon=0.001)), keep_prob2)
h_conv2 = tf.nn.relu(conv2d_1(h_pool1, W_conv2))
h_pool2 = tf.nn.dropout(max_pool_4x1(h_conv2), self.keep_prob)
W_conv21 = weight_variable([self.conv21_filter, 1, self.dimension2, self.dimension21], 'W_conv21')
wconv21_l2=tf.reduce_sum(tf.square(W_conv21))
l2norm_list.append(wconv21_l2)
W_conv21.assign(tf.cond(wconv21_l2>cond, lambda: tf.multiply(W_conv21, cond/wconv21_l2),lambda: W_conv21 ))
#h_conv22 = tf.nn.relu(tf.nn.batch_normalization(conv2d(h_conv2, W_conv22), mean=0.0, variance=1, offset=0, scale=1, variance_epsilon=0.001))
h_conv21 = tf.nn.dropout(tf.nn.relu(conv2d_1(h_pool2, W_conv21)), self.keep_prob2)
#h_pool21 = max_pool_4x1(h_conv21)
W_fc1 = weight_variable([1 * self.fc1_param * self.dimension21, self.dimension4], 'W_fc1')
wfc1_l2=tf.reduce_sum(tf.square(W_fc1))
l2norm_list.append(wfc1_l2)
W_fc1.assign(tf.cond(wfc1_l2>cond, lambda: tf.multiply(W_fc1, cond/wfc1_l2),lambda: W_fc1 ))
b_fc1 = bias_variable([self.dimension4], 'b_fc1')
bfc1_l2=tf.reduce_sum(tf.square(b_fc1))
l2norm_list.append(bfc1_l2)
b_fc1.assign(tf.cond(bfc1_l2>cond, lambda: tf.multiply(b_fc1, cond/bfc1_l2),lambda: b_fc1 ))
h_pool3_flat = tf.reshape(h_conv21, [-1, 1*self.fc1_param*self.dimension21])
h_fc1 = tf.nn.relu(tf.add(tf.matmul(h_pool3_flat, W_fc1), b_fc1))
h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob3)
label_shape=self.label.shape[1]
W_fc4 = weight_variable([self.dimension4, tf.cast(label_shape, tf.int32)], 'W_fc4')
wfc4_l2=tf.reduce_sum(tf.square(W_fc4))
l2norm_list.append(wfc4_l2)
W_fc4.assign(tf.cond(wfc4_l2>cond, lambda: tf.multiply(W_fc4, cond/wfc4_l2),lambda: W_fc4 ))
b_fc4 = bias_variable([label_shape], 'b_fc4')
bfc4_l2=tf.reduce_sum(tf.square(b_fc4))
l2norm_list.append(bfc4_l2)
b_fc4.assign(tf.cond(bfc4_l2>cond, lambda: tf.multiply(b_fc4, cond/bfc4_l2),lambda: b_fc4 ))
y_conv=tf.add(tf.matmul(h_fc1_drop, W_fc4), b_fc4)
variable_dict={"W_conv1": W_conv1, "W_conv2": W_conv2,"W_conv21": W_conv21,
"W_fc1": W_fc1,"W_fc4": W_fc4,
"b_fc1": b_fc1, "b_fc4": b_fc4}
neurons_dict={"h_conv21":h_conv21, "h_conv2":h_conv2, "h_conv1":h_conv1,"h_fc1_drop": h_fc1_drop}
return y_conv,tf.nn.sigmoid(y_conv), variable_dict, neurons_dict, l2norm_list
@define_scope
def saver(self):
return tf.train.Saver(max_to_keep=self.max_to_keep)
@define_scope
def cost(self):
with tf.device('/device:GPU:'+self.GPUID):
"""nll=tf.reduce_mean(-tf.reduce_sum(
tf.log(
tf.add(
tf.clip_by_value(tf.multiply(self.label, self.prediction[1]),1e-10,1.0),
tf.clip_by_value(tf.multiply(tf.subtract(1.00,self.label), tf.subtract(1.00,self.prediction[1])),1e-10,1.0))
),
reduction_indices=[1]))"""
nll=tf.reduce_mean(tf.nn.weighted_cross_entropy_with_logits(targets=self.label, logits=self.prediction[0],pos_weight=1.0))
l2_norm=tf.reduce_sum(self.prediction[4])
l1_norm=tf.reduce_sum(tf.abs(self.prediction[1]))
return tf.add_n([nll,tf.multiply((5*10**-7), l2_norm),tf.multiply((1*10**-8),l1_norm)])
#return tf.reduce_mean(-tf.reduce_sum(self.label * tf.log(tf.clip_by_value(self.prediction[0],1e-10,1.0))+(1-self.label)*tf.log(tf.clip_by_value(1-self.prediction[0],1e-10,1.0)), reduction_indices=[1]))
@define_scope
def optimize(self):
with tf.device('/device:GPU:'+self.GPUID):
#cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_conv, y_))
#cost = tf.reduce_mean(tf.reduce_sum(tf.square(y_conv*tf.log(tf.clip_by_value(2*y_conv,1e-10,1.0)/(tf.clip_by_value(y_conv,1e-10,1.0)+tf.clip_by_value(y_,1e-10,1.0)))+y_*tf.log(2*tf.clip_by_value(y_,1e-10,1.0)/(tf.clip_by_value(y_conv,1e-10,1.0)+tf.clip_by_value(y_,1e-10,1.0)))), reduction_indices=[1]))
#cost = tf.reduce_sum(tf.square(y_conv*tf.log(tf.clip_by_value(2*y_conv,1e-10,1.0)/(tf.clip_by_value(y_conv,1e-10,1.0)+tf.clip_by_value(y_,1e-10,1.0)))+y_*tf.log(2*tf.clip_by_value(y_,1e-10,1.0)/(tf.clip_by_value(y_conv,1e-10,1.0)+tf.clip_by_value(y_,1e-10,1.0)))))
optimizer = tf.train.AdamOptimizer(self.train_speed)
return optimizer.minimize(self.cost)
@define_scope
def error(self):
with tf.device('/device:GPU:'+self.GPUID):
class_n=self.label.shape[1]
FPR_list=[]
TPR_list=[]
PPV_list=[]
for i in range(class_n):
true=self.label[:,i]
prob=self.prediction[1][:,i]
FPR, TPR, PPV=_auc_pr(true,prob,0.5)
FPR_list.append(FPR)
TPR_list.append(TPR)
PPV_list.append(PPV)
return FPR_list, TPR_list, PPV_list
|
408909
|
from importlib import import_module
from ..config import *
import fython.traceback as tbk
from .lexem import Lexem
class Unit:
def __init__(s, t, module=None):
s.lex_type = t.type
s.value = t.value
s.lineno = t.lineno
s.url_value = t.value
s.is_interpolation_safe = 1
s.lexem = [Lexem(type=t.type, lineno=t.lineno, unit=s)]
s.raw = []
t.value = s
if module:
s.module = module
else:
s.module = t.lexer.module
# helper
s.is_module = 0
s.is_classpec = 0
s.is_varpec = 0
s.is_routpec = 0
s.next_linecod = None
s.previous_linecod = None
s.guid_overwrite = 0
s.is_klass_pset = 0
s.is_iruc_target = 1
def __format__(s, format):
return str(s)
def __str__(s):
return s.value
@property
def newline(s):
r = ' ! {:d}\n'.format(s.lineno)
return r
# tbk
@property
def tbk_frame(s):
if s.module.debug:
if s.module.tbk_frame_rout[s.level]:
n = s.module.tbk_frame_rout[s.level]
s.module.tbk_frame_rout[s.level] = 0
return tbk.init_frame(s.module.value, n)
return ''
@property
def tbk_eframe(s):
if s.debug:
if s.module.tbk_frame_rout[s.level] == 0:
return tbk.del_frame()
return ''
def tbk_mark_enable(s):
s.module.tbk_mark_on = 1
def tbk_mark_disable(s):
s.module.tbk_mark_on = 0
@property
def tbk_mark(s):
if s.release or s.module.tbk_mark_off:
return ''
elif s.module.tbk_frame_rout[s.level]:
n = s.module.tbk_frame_rout[s.level]
s.module.tbk_frame_rout[s.level] = 0
return tbk.init_frame(s.module.value, n) + tbk.open_line(s.lineno)
else:
return tbk.open_line(s.lineno)
@property
def tbk_emark(s):
if s.release or s.module.tbk_mark_off:
return ''
else:
return tbk.close_line()
def throw(s, error, **kwargs):
return s.module.throw(
error = error,
line = s.lineno,
**kwargs
)
def Buffer(s):
return Buffer(newline = s.newline)
# unit querying
@property
def nb_linecod(s):
return len(s.linecod)
@property
def nb_modifier(s):
return len(s.modifier)
@property
def is_bdotx(s):
return s.unit == l.bdotx
@property
def is_commax(s):
return s.unit == l.commax
@property
def is_childcod(s):
return s.unit == l.childcod
@property
def is_dotbol(s):
return s.unit == l.dotbol
@property
def is_dedentx(s):
return s.unit == l.dedentx
@property
def is_eofx(s):
return s.unit == l.eofx
@property
def is_eopx(s):
return s.unit == l.eopx
@property
def is_opx(s):
return s.unit == l.opx
@property
def is_enumbol(s):
return s.unit == l.enumbol
@property
def is_funbol(s):
return s.unit == l.funbol
@property
def is_ibol(s):
return s.unit == l.ibol
@property
def is_interpolationx(s):
return s.unit == l.interpolationx
@property
def is_ketbol(s):
return s.unit == l.ketbol
@property
def is_lkcax(s):
return s.unit == l.lkcax
@property
def is_lpcax(s):
return s.unit == l.lpcax
@property
def is_linefeedx(s):
return s.unit == l.linefeedx
@property
def is_lpackagex(s):
return s.unit == l.lpackagex
@property
def is_opbol(s):
return s.unit == l.opbol
@property
def is_namex(s):
return s.unit == l.namex
@property
def is_numberx(s):
return s.unit == l.numberx
@property
def is_newlinex(s):
return s.unit == l.newlinex
@property
def is_rpackagex(s):
return s.unit == l.rpackagex
@property
def is_rparx(s):
return s.unit == l.rparx
@property
def is_rketx(s):
return s.unit == l.rketx
@property
def is_semibol(s):
return s.unit == l.semibol
@property
def is_semix(s):
return s.unit == l.semix
@property
def is_slicebol(s):
return s.unit == l.slicebol
@property
def is_stringx(s):
return s.unit == l.stringx
@property
def is_terminal(s):
return s.unit in l.terminal
@property
def module_dir(s):
return s.module.url.module_dir
@property
def modifier_only(s):
t = s.modifier[-1]
if t.is_childcod:
r = s.modifier[1:-1]
elif t.is_enumbol:
r = s.modifier[1:-1]
elif t.is_ibol:
r = s.modifier[1:-1]
elif t.is_newlinex:
r = s.modifier[1:-2]
else:
s.throw(err.cannot_resolve_modifier)
return r
@property
def modifier_and_atomic_target(s):
r = s.modifier_only
r.extend(s.atomic_target)
return r
@property
def atomic_target(s):
t = s.modifier[-1]
r = []
if t.is_childcod:
for c in t.linecod:
if c.has_ibol:
r.append(c.modifier[0])
else:
r.extend(c.modifier[:-1])
elif t.is_enumbol:
r.extend(t.modifier)
elif t.is_ibol:
r.append(t)
elif t.is_newlinex:
r.append(s.modifier[-2])
else:
s.throw(err.cannot_resolve_modifier)
return r
@property
def atomic_target_name_only(s):
r = s.atomic_target
for i in range(len(r)):
t = r[i]
if t.is_ibol:
r[i] = t.target
elif t.is_opbol:
r[i] = t.modifier[0]
return r
# url
def url(
s,
url = None,
cwd = None,
ext = exts.importable,
path_only = 0,
skip_if_not_found = 0,
packagebol = 0,
release = None,
pickle_hash = '',
):
if url is None:
url = s.url_value
if cwd is None:
cwd = s.module_dir
if release is None:
release = s.release
r = Url(
url = url,
cwd = cwd,
ext = ext,
path_only = path_only,
skip_if_not_found = skip_if_not_found,
packagebol = packagebol,
release = release,
pickle_hash = pickle_hash,
)
return r
# frame management
def add_frame(s):
s.module.add_frame()
def pop_frame(s):
ast = s.module.pop_frame()
return ast
def add_ast(s, name, ast):
s.module.add_ast(name, ast)
def add_name(s, name):
s.module.add_name(name)
@property
def frame_ast(s):
return s.module.ast[s.level]
@property
def frame_name(s):
return s.module.name[s.level]
@property
def module_ast(s):
return s.module.ast[0]
@property
def module_name(s):
return s.module.name[0]
# for lexical interpolation purpose
def clone(s, module):
token = Data()
token.type = s.lex_type
token.lineno = s.lineno
token.value = s.value
U = s.get_unit()
c = U(token, module)
return c
@property
def release(s):
return s.module.release
@property
def debug(s):
return s.module.debug
@property
def klass(s):
return s.module.get_klass()
def set_klass(s, klass):
s.module.klass[s.level] = klass
# classpec specific
def get_method_name(s, name):
r = '{class_name:s}_{routname:s}'.format(
class_name = s.name,
routname = name,
)
# the class name will be guided above
return r
@property
def ast_target(s):
return s.module.get_ast(s.value)
def get_ast(s, alias):
ast = s.module.get_ast(alias)
if ast:
return ast
else:
s.throw(err.cannot_find_targetted_ast, alias=alias)
@property
def childcod_target(s):
return s.modifier[-1].linecod
@property
def cname(s):
return s.__class__.__name__
@property
def nfo(s):
r = repr(s) + '\n'
for key, item in s.__dict__.items():
r += '\t{:s} {:s}\n'.format(key, repr(item))
return r
@property
def rep(s):
return s.__repr__()
# ^: add lexem ; add modifier raw
def __xor__(s, unit):
s.lexem.extend(unit.lexem)
s.raw.append(unit)
return s
# print modifiers
@property
def repmod(s):
r = ''
for m in s.modifier:
r += m.rep + ' '
return r
# print args
@property
def repargs(s):
for m in s.args:
print(m.rep, end=' ')
print('')
# instruction buffer
@property
def b(s):
return s.module.b
@property
def i(s):
if s.level == 0:
return s.module.main
else:
return s.module.b
@property
def level(s):
return s.module.level
@property
def contains(s):
if s.module.contains_used[s.level]:
return ''
else:
s.module.contains_used[s.level] = 1
return '\ncontains\n'
@property
def indent(s):
s.module.b.indent
s.module.main.indent
@property
def dedent(s):
s.module.b.dedent
s.module.main.dedent
@property
def redirect(s):
pass
@redirect.setter
def redirect(s, receiver):
s.module.receiver = receiver
@property
def contains_disable(s):
s.module.contains_used[s.level] = 1
def __repr__(s):
if len(s.lexem) == 1:
r = '{:s}({:d}, {:s})'.format(
s.cname,
s.lineno,
str(s.lexem[0].value.value),
)
else:
a = ''
for x in s.lexem:
if x.value.value != '':
if not x.value.is_linefeedx:
a = str(x.value.value)
break
b = ''
for x in s.lexem[::-1]:
if x.value.value != '':
b = str(x.value.value)
break
r = '{:s}({:d}, {:s}>{:s})'.format(
s.cname,
s.lineno,
a,
b,
)
return r
@property
def modifier_only_direct_production(s):
b = Buffer(newline='')
for m in s.modifier_only:
b != m
return b
@property
def modifier_only_for_rout(s):
return s.modifier_only[:-1]
@property
def childcod(s):
return s.modifier[-1]
def get_unit(s):
c = s.cname
x = c.lower()
if c.endswith('X'):
n = 'fython.lexem.' + x
elif c.endswith('Bol'):
n = 'fython.symbol.' + x
else:
n = 'fython.code.' + x
m = import_module(n)
u = getattr(m, c)
return u
# >: add import
def __gt__(s, other):
b = s.b
b.rstrip(implicit_none)
b != other
b != ';'
b != implicit_none
return s
@property
def method_buffer_enable(s):
s.module.method_buffer_enable = 1
@property
def method_buffer_disable(s):
s.module.method_buffer_enable = 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.